1
0
mirror of git://git.gnupg.org/gnupg.git synced 2024-12-23 10:29:58 +01:00
gnupg/util/secmem.c

485 lines
11 KiB
C
Raw Normal View History

2002-06-29 13:46:34 +00:00
/* secmem.c - memory allocation from a secure heap
* Copyright (C) 1998, 1999, 2000 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
*
* GnuPG is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* GnuPG is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include <config.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <stdarg.h>
#include <unistd.h>
#if defined(HAVE_MLOCK) || defined(HAVE_MMAP)
#include <sys/mman.h>
#include <sys/types.h>
#include <fcntl.h>
#ifdef USE_CAPABILITIES
#include <sys/capability.h>
#endif
#ifdef HAVE_PLOCK
#include <sys/lock.h>
#endif
2002-06-29 13:46:34 +00:00
#endif
#include "types.h"
#include "memory.h"
#include "util.h"
#include "i18n.h"
#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
#define MAP_ANONYMOUS MAP_ANON
2002-06-29 13:46:34 +00:00
#endif
/* It seems that Slackware 7.1 does not know about EPERM */
#if !defined(EPERM) && defined(ENOMEM)
#define EPERM ENOMEM
2002-06-29 13:46:34 +00:00
#endif
#define DEFAULT_POOLSIZE 16384
typedef struct memblock_struct MEMBLOCK;
struct memblock_struct {
unsigned size;
union {
MEMBLOCK *next;
PROPERLY_ALIGNED_TYPE aligned;
} u;
};
static void *pool;
static volatile int pool_okay; /* may be checked in an atexit function */
2002-09-12 16:25:36 +00:00
#ifdef HAVE_MMAP
2002-06-29 13:46:34 +00:00
static volatile int pool_is_mmapped;
2002-09-12 16:25:36 +00:00
#endif
2002-06-29 13:46:34 +00:00
static size_t poolsize; /* allocated length */
static size_t poollen; /* used length */
static MEMBLOCK *unused_blocks;
static unsigned max_alloced;
static unsigned cur_alloced;
static unsigned max_blocks;
static unsigned cur_blocks;
static int disable_secmem;
static int show_warning;
static int no_warning;
static int suspend_warning;
static void
print_warn(void)
{
if (!no_warning)
{
2002-07-25 18:00:00 +00:00
log_info(_("WARNING: using insecure memory!\n"));
log_info(_("please see http://www.gnupg.org/faq.html"
" for more information\n"));
2002-06-29 13:46:34 +00:00
}
}
static void
lock_pool( void *p, size_t n )
{
#if defined(USE_CAPABILITIES) && defined(HAVE_MLOCK)
2002-06-29 13:46:34 +00:00
int err;
cap_set_proc( cap_from_text("cap_ipc_lock+ep") );
err = mlock( p, n );
if( err && errno )
err = errno;
cap_set_proc( cap_from_text("cap_ipc_lock+p") );
if( err ) {
if( errno != EPERM
#ifdef EAGAIN /* OpenBSD returns this */
2002-06-29 13:46:34 +00:00
&& errno != EAGAIN
#endif
#ifdef ENOSYS /* Some SCOs return this (function not implemented) */
2002-06-29 13:46:34 +00:00
&& errno != ENOSYS
#endif
#ifdef ENOMEM /* Linux can return this */
2002-06-29 13:46:34 +00:00
&& errno != ENOMEM
#endif
2002-06-29 13:46:34 +00:00
)
log_error("can't lock memory: %s\n", strerror(err));
show_warning = 1;
}
#elif defined(HAVE_MLOCK)
2002-06-29 13:46:34 +00:00
uid_t uid;
int err;
uid = getuid();
#ifdef HAVE_BROKEN_MLOCK
2002-06-29 13:46:34 +00:00
/* ick. but at least we get secured memory. about to lock
entire data segment. */
#ifdef HAVE_PLOCK
2003-10-23 07:38:07 +00:00
# ifdef _AIX
/* The configure for AIX returns broken mlock but the plock has
the strange requirement to somehow set the stack limit first.
The problem might turn out in indeterministic program behaviour
and hanging processes which can somehow be solved when enough
processes are clogging up the memory. To get this problem out
of the way we simply don't try to lock the memory at all.
*/
2004-02-24 16:06:55 +00:00
errno = EPERM;
err = errno;
2003-10-23 07:38:07 +00:00
# else /* !_AIX */
2002-06-29 13:46:34 +00:00
err = plock( DATLOCK );
if( err && errno )
err = errno;
2003-10-23 07:38:07 +00:00
# endif /*_AIX*/
2002-06-29 13:46:34 +00:00
#else /*!HAVE_PLOCK*/
if( uid ) {
errno = EPERM;
err = errno;
}
else {
err = mlock( p, n );
if( err && errno )
err = errno;
}
#endif /*!HAVE_PLOCK*/
#else
2002-06-29 13:46:34 +00:00
err = mlock( p, n );
if( err && errno )
err = errno;
#endif
2002-06-29 13:46:34 +00:00
if( uid && !geteuid() ) {
/* check that we really dropped the privs.
* Note: setuid(0) should always fail */
if( setuid( uid ) || getuid() != geteuid() || !setuid(0) )
log_fatal("failed to reset uid: %s\n", strerror(errno));
}
if( err ) {
if( errno != EPERM
#ifdef EAGAIN /* OpenBSD returns this */
2002-06-29 13:46:34 +00:00
&& errno != EAGAIN
#endif
#ifdef ENOSYS /* Some SCOs return this (function not implemented) */
2002-06-29 13:46:34 +00:00
&& errno != ENOSYS
#endif
#ifdef ENOMEM /* Linux can return this */
2002-06-29 13:46:34 +00:00
&& errno != ENOMEM
#endif
2002-06-29 13:46:34 +00:00
)
log_error("can't lock memory: %s\n", strerror(err));
show_warning = 1;
}
#elif defined ( __QNX__ )
2002-06-29 13:46:34 +00:00
/* QNX does not page at all, so the whole secure memory stuff does
* not make much sense. However it is still of use because it
* wipes out the memory on a free().
* Therefore it is sufficient to suppress the warning
*/
#elif defined (HAVE_DOSISH_SYSTEM) || defined (__CYGWIN__)
2002-06-29 13:46:34 +00:00
/* It does not make sense to print such a warning, given the fact that
* this whole Windows !@#$% and their user base are inherently insecure
*/
#elif defined (__riscos__)
2002-06-29 13:46:34 +00:00
/* no virtual memory on RISC OS, so no pages are swapped to disc,
* besides we don't have mmap, so we don't use it! ;-)
* But don't complain, as explained above.
*/
#else
2002-06-29 13:46:34 +00:00
log_info("Please note that you don't have secure memory on this system\n");
#endif
2002-06-29 13:46:34 +00:00
}
static void
init_pool( size_t n)
{
size_t pgsize;
poolsize = n;
if( disable_secmem )
log_bug("secure memory is disabled");
#ifdef HAVE_GETPAGESIZE
2002-06-29 13:46:34 +00:00
pgsize = getpagesize();
#else
2002-06-29 13:46:34 +00:00
pgsize = 4096;
#endif
2002-06-29 13:46:34 +00:00
#ifdef HAVE_MMAP
2002-06-29 13:46:34 +00:00
poolsize = (poolsize + pgsize -1 ) & ~(pgsize-1);
#ifdef MAP_ANONYMOUS
2002-06-29 13:46:34 +00:00
pool = mmap( 0, poolsize, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
#else /* map /dev/zero instead */
2002-06-29 13:46:34 +00:00
{ int fd;
fd = open("/dev/zero", O_RDWR);
if( fd == -1 ) {
log_error("can't open /dev/zero: %s\n", strerror(errno) );
pool = (void*)-1;
}
else {
pool = mmap( 0, poolsize, PROT_READ|PROT_WRITE,
MAP_PRIVATE, fd, 0);
}
}
#endif
2002-06-29 13:46:34 +00:00
if( pool == (void*)-1 )
log_info("can't mmap pool of %u bytes: %s - using malloc\n",
(unsigned)poolsize, strerror(errno));
else {
pool_is_mmapped = 1;
pool_okay = 1;
}
#endif
2002-06-29 13:46:34 +00:00
if( !pool_okay ) {
pool = malloc( poolsize );
if( !pool )
log_fatal("can't allocate memory pool of %u bytes\n",
(unsigned)poolsize);
else
pool_okay = 1;
}
lock_pool( pool, poolsize );
poollen = 0;
}
/* concatenate unused blocks */
static void
compress_pool(void)
{
/* fixme: we really should do this */
}
void
secmem_set_flags( unsigned flags )
{
int was_susp = suspend_warning;
no_warning = flags & 1;
suspend_warning = flags & 2;
/* and now issue the warning if it is not longer suspended */
if( was_susp && !suspend_warning && show_warning ) {
show_warning = 0;
print_warn();
}
}
unsigned
secmem_get_flags(void)
{
unsigned flags;
flags = no_warning ? 1:0;
flags |= suspend_warning ? 2:0;
return flags;
}
/* Returns 1 if memory was locked, 0 if not. */
int
2002-06-29 13:46:34 +00:00
secmem_init( size_t n )
{
if( !n ) {
#ifndef __riscos__
#ifdef USE_CAPABILITIES
2002-06-29 13:46:34 +00:00
/* drop all capabilities */
cap_set_proc( cap_from_text("all-eip") );
#elif !defined(HAVE_DOSISH_SYSTEM)
2002-06-29 13:46:34 +00:00
uid_t uid;
disable_secmem=1;
uid = getuid();
if( uid != geteuid() ) {
if( setuid( uid ) || getuid() != geteuid() || !setuid(0) )
log_fatal("failed to drop setuid\n" );
}
#endif
2002-06-29 13:46:34 +00:00
#endif /* !__riscos__ */
}
else {
if( n < DEFAULT_POOLSIZE )
n = DEFAULT_POOLSIZE;
if( !pool_okay )
init_pool(n);
else
log_error("Oops, secure memory pool already initialized\n");
}
return !show_warning;
2002-06-29 13:46:34 +00:00
}
void *
secmem_malloc( size_t size )
{
MEMBLOCK *mb, *mb2;
int compressed=0;
if( !pool_okay ) {
log_info(
_("operation is not possible without initialized secure memory\n"));
log_info(_("(you may have used the wrong program for this task)\n"));
exit(2);
}
if( show_warning && !suspend_warning ) {
show_warning = 0;
print_warn();
}
/* blocks are always a multiple of 32 */
size += sizeof(MEMBLOCK);
size = ((size + 31) / 32) * 32;
retry:
/* try to get it from the used blocks */
for(mb = unused_blocks,mb2=NULL; mb; mb2=mb, mb = mb->u.next )
if( mb->size >= size ) {
if( mb2 )
mb2->u.next = mb->u.next;
else
unused_blocks = mb->u.next;
goto leave;
}
/* allocate a new block */
if( (poollen + size <= poolsize) ) {
mb = (void*)((char*)pool + poollen);
poollen += size;
mb->size = size;
}
else if( !compressed ) {
compressed=1;
compress_pool();
goto retry;
}
else
return NULL;
leave:
cur_alloced += mb->size;
cur_blocks++;
if( cur_alloced > max_alloced )
max_alloced = cur_alloced;
if( cur_blocks > max_blocks )
max_blocks = cur_blocks;
return &mb->u.aligned.c;
}
void *
secmem_realloc( void *p, size_t newsize )
{
MEMBLOCK *mb;
size_t size;
void *a;
mb = (MEMBLOCK*)((char*)p - ((size_t) &((MEMBLOCK*)0)->u.aligned.c));
size = mb->size;
if( newsize < size )
return p; /* it is easier not to shrink the memory */
a = secmem_malloc( newsize );
if ( a ) {
memcpy(a, p, size);
memset((char*)a+size, 0, newsize-size);
secmem_free(p);
}
return a;
}
void
secmem_free( void *a )
{
MEMBLOCK *mb;
size_t size;
if( !a )
return;
mb = (MEMBLOCK*)((char*)a - ((size_t) &((MEMBLOCK*)0)->u.aligned.c));
size = mb->size;
/* This does not make much sense: probably this memory is held in the
* cache. We do it anyway: */
wipememory2(mb, 0xff, size );
wipememory2(mb, 0xaa, size );
wipememory2(mb, 0x55, size );
wipememory2(mb, 0x00, size );
2002-06-29 13:46:34 +00:00
mb->size = size;
mb->u.next = unused_blocks;
unused_blocks = mb;
cur_blocks--;
cur_alloced -= size;
}
int
m_is_secure( const void *p )
{
return p >= pool && p < (void*)((char*)pool+poolsize);
}
/****************
* Warning: This code might be called by an interrupt handler
* and frankly, there should really be such a handler,
* to make sure that the memory is wiped out.
* We hope that the OS wipes out mlocked memory after
* receiving a SIGKILL - it really should do so, otherwise
* there is no chance to get the secure memory cleaned.
*/
void
secmem_term()
{
if( !pool_okay )
return;
wipememory2( pool, 0xff, poolsize);
wipememory2( pool, 0xaa, poolsize);
wipememory2( pool, 0x55, poolsize);
wipememory2( pool, 0x00, poolsize);
#ifdef HAVE_MMAP
2002-06-29 13:46:34 +00:00
if( pool_is_mmapped )
munmap( pool, poolsize );
#endif
2002-06-29 13:46:34 +00:00
pool = NULL;
pool_okay = 0;
poolsize=0;
poollen=0;
unused_blocks=NULL;
}
void
secmem_dump_stats()
{
if( disable_secmem )
return;
fprintf(stderr,
"secmem usage: %u/%u bytes in %u/%u blocks of pool %lu/%lu\n",
cur_alloced, max_alloced, cur_blocks, max_blocks,
(ulong)poollen, (ulong)poolsize );
}