cryptlib  3.4.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Macros
sec_mem.c
Go to the documentation of this file.
1 /****************************************************************************
2 * *
3 * Secure Memory Management *
4 * Copyright Peter Gutmann 1995-2007 *
5 * *
6 ****************************************************************************/
7 
8 #if defined( INC_ALL )
9  #include "crypt.h"
10  #include "acl.h"
11  #include "kernel.h"
12 #else
13  #include "crypt.h"
14  #include "kernel/acl.h"
15  #include "kernel/kernel.h"
16 #endif /* Compiler-specific includes */
17 
18 /* A pointer to the kernel data block */
19 
20 static KERNEL_DATA *krnlData = NULL;
21 
22 /* The minimum and maximum amount of secure memory that we can ever allocate.
23  A more normal upper bound is 8K, however the SSL session cache constitutes
24  a single large chunk of secure memory that goes way over this limit */
25 
26 #define MIN_ALLOC_SIZE 8
27 #define MAX_ALLOC_SIZE 8192
28 
29 /* To support page locking we need to store some additional information with
30  the memory block. We do this by reserving an extra memory block at the
31  start of the allocated block and saving the information there.
32 
33  The information stored in the extra block is a flag indicating whether the
34  block is pagelocked (so we can call the unlock function when we free it),
35  the size of the block, and pointers to the next and previous pointers in
36  the list of allocated blocks (this is used by the thread that walks the
37  block list touching each one) */
38 
39 #define CANARY_SIZE 4 /* Size of canary used to spot overwrites */
40 
41 typedef struct {
42  BOOLEAN isLocked; /* Whether this block is locked */
43  int size; /* Size of the block (including the size
44  of the MEMLOCK_INFO) */
45  void *next, *prev; /* Next, previous memory block */
46 #if defined( __BEOS__ )
47  area_id areaID; /* Needed for page locking under BeOS */
48 #endif /* BeOS and BeOS areas */
49  BYTE canary[ CANARY_SIZE ]; /* Canary for spotting overwrites */
50  } MEMLOCK_INFO;
51 
52 #if INT_MAX <= 32767
53  #define MEMLOCK_HEADERSIZE roundUp( sizeof( MEMLOCK_INFO ), 4 )
54 #elif INT_MAX <= 0xFFFFFFFFUL
55  #define MEMLOCK_HEADERSIZE roundUp( sizeof( MEMLOCK_INFO ), 8 )
56 #else
57  #define MEMLOCK_HEADERSIZE roundUp( sizeof( MEMLOCK_INFO ), 16 )
58 #endif /* 16/32/64-bit systems */
59 
60 /* We also insert a canary at the start and end of each block to detect
61  memory overwrites, the block size is adjusted accordingly to handle this
62  extra data */
63 
64 #define CANARY_STARTVALUE "\xC0\xED\xBA\xBE" /* More fun than dead beef */
65 #define CANARY_ENDVALUE "\x36\xDD\x24\x36"
66 
67 #define adjustMemCanary( size ) \
68  size += CANARY_SIZE /* Other canary is in MEMLOCK_INFO header */
69 #define insertMemCanary( memBlockPtr, memPtr ) \
70  memcpy( memBlockPtr->canary, CANARY_STARTVALUE, CANARY_SIZE ); \
71  memcpy( memPtr + memBlockPtr->size - CANARY_SIZE, CANARY_ENDVALUE, \
72  CANARY_SIZE )
73 #define checkMemCanary( memBlockPtr, memPtr ) \
74  REQUIRES( !memcmp( memBlockPtr->canary, CANARY_STARTVALUE, CANARY_SIZE ) ); \
75  REQUIRES( !memcmp( memPtr + memBlockPtr->size - CANARY_SIZE, \
76  CANARY_ENDVALUE, CANARY_SIZE ) );
77 
78 /****************************************************************************
79 * *
80 * Misc Functions *
81 * *
82 ****************************************************************************/
83 
84 /* Prepare to allocate/free a block of secure memory */
85 
87 static int checkInitAlloc( OUT void **allocPtrPtr,
89  const int size )
90  {
92  "Memlock header size" );
93 
94  /* Make sure that the parameters are in order */
95  if( !isWritePtrConst( allocPtrPtr, sizeof( void * ) ) )
96  retIntError();
97 
98  REQUIRES( size >= MIN_ALLOC_SIZE && size <= MAX_ALLOC_SIZE );
99 
100  return( CRYPT_OK );
101  }
102 
104 static int checkInitFree( const void **freePtrPtr,
105  OUT_OPT_PTR BYTE **memPtrPtr,
106  OUT_OPT_PTR MEMLOCK_INFO **memBlockPtrPtr )
107  {
108  MEMLOCK_INFO *memBlockPtr;
109  BYTE *memPtr;
110 
111  assert( isWritePtr( memPtrPtr, sizeof( BYTE * ) ) );
112  assert( isWritePtr( memBlockPtrPtr, sizeof( MEMLOCK_INFO * ) ) );
113 
114  /* Make sure that the parameters are in order */
115  if( !isReadPtrConst( freePtrPtr, sizeof( void * ) ) || \
116  !isReadPtrConst( *freePtrPtr, MIN_ALLOC_SIZE ) )
117  retIntError();
118 
119  /* Clear return values */
120  *memPtrPtr = NULL;
121  *memBlockPtrPtr = NULL;
122 
123  /* Recover the actual allocated memory block data from the pointer */
124  memPtr = ( ( BYTE * ) *freePtrPtr ) - MEMLOCK_HEADERSIZE;
125  if( !isReadPtrConst( memPtr, sizeof( MEMLOCK_INFO ) ) )
126  retIntError();
127  memBlockPtr = ( MEMLOCK_INFO * ) memPtr;
128  REQUIRES( memBlockPtr->size >= sizeof( MEMLOCK_INFO ) + MIN_ALLOC_SIZE && \
129  memBlockPtr->size <= sizeof( MEMLOCK_INFO ) + MAX_ALLOC_SIZE && \
130  ( memBlockPtr->isLocked == FALSE || \
131  memBlockPtr->isLocked == TRUE ) );
132 
133  *memPtrPtr = memPtr;
134  *memBlockPtrPtr = memBlockPtr;
135  return( CRYPT_OK );
136  }
137 
138 /* Insert and unlink a memory block from a list of memory blocks. We can't
139  use insertDoubleListElements()/deleteDoubleListElement() for this because
140  they don't handle the end-of-list pointer, since they're intended for
141  random-access lists rather than append-only lists */
142 
143 STDC_NONNULL_ARG( ( 1, 2, 3 ) ) \
144 static void insertMemBlock( INOUT MEMLOCK_INFO **allocatedListHeadPtr,
145  INOUT MEMLOCK_INFO **allocatedListTailPtr,
146  INOUT MEMLOCK_INFO *memBlockPtr )
147  {
148  MEMLOCK_INFO *allocatedListHead = *allocatedListHeadPtr;
149  MEMLOCK_INFO *allocatedListTail = *allocatedListTailPtr;
150 
151  assert( isWritePtr( allocatedListHeadPtr, sizeof( MEMLOCK_INFO * ) ) );
152  assert( allocatedListHead == NULL || \
153  isWritePtr( allocatedListHead, sizeof( MEMLOCK_INFO ) ) );
154  assert( isWritePtr( allocatedListTailPtr, sizeof( MEMLOCK_INFO * ) ) );
155  assert( allocatedListTail == NULL || \
156  isWritePtr( allocatedListTail, sizeof( MEMLOCK_INFO ) ) );
157  assert( isWritePtr( memBlockPtr, sizeof( MEMLOCK_INFO * ) ) );
158 
159  REQUIRES_V( ( allocatedListHead == NULL && \
160  allocatedListTail == NULL ) || \
161  ( allocatedListHead != NULL && \
162  allocatedListTail != NULL ) );
163 
164  /* If it's a new list, set up the head and tail pointers and return */
165  if( allocatedListHead == NULL )
166  {
167  *allocatedListHeadPtr = *allocatedListTailPtr = memBlockPtr;
168  return;
169  }
170 
171  /* It's an existing list, add the new element to the end */
172  allocatedListTail->next = memBlockPtr;
173  memBlockPtr->prev = allocatedListTail;
174  *allocatedListTailPtr = memBlockPtr;
175  }
176 
177 STDC_NONNULL_ARG( ( 1, 2, 3 ) ) \
178 static void unlinkMemBlock( INOUT MEMLOCK_INFO **allocatedListHeadPtr,
179  INOUT MEMLOCK_INFO **allocatedListTailPtr,
180  INOUT MEMLOCK_INFO *memBlockPtr )
181  {
182  MEMLOCK_INFO *allocatedListHead = *allocatedListHeadPtr;
183  MEMLOCK_INFO *allocatedListTail = *allocatedListTailPtr;
184  MEMLOCK_INFO *nextBlockPtr = memBlockPtr->next;
185  MEMLOCK_INFO *prevBlockPtr = memBlockPtr->prev;
186 
187  assert( isWritePtr( allocatedListHeadPtr, sizeof( MEMLOCK_INFO * ) ) );
188  assert( allocatedListHead == NULL || \
189  isWritePtr( allocatedListHead, sizeof( MEMLOCK_INFO ) ) );
190  assert( isWritePtr( allocatedListTailPtr, sizeof( MEMLOCK_INFO * ) ) );
191  assert( allocatedListTail == NULL || \
192  isWritePtr( allocatedListTail, sizeof( MEMLOCK_INFO ) ) );
193  assert( isWritePtr( memBlockPtr, sizeof( MEMLOCK_INFO * ) ) );
194 
195  /* If we're removing the block from the start of the list, make the
196  start the next block */
197  if( memBlockPtr == allocatedListHead )
198  *allocatedListHeadPtr = nextBlockPtr;
199  else
200  {
201  REQUIRES_V( prevBlockPtr != NULL );
202 
203  /* Delete from the middle or end of the list */
204  prevBlockPtr->next = nextBlockPtr;
205  }
206  if( nextBlockPtr != NULL )
207  nextBlockPtr->prev = prevBlockPtr;
208 
209  /* If we're removed the last element, update the end pointer */
210  if( memBlockPtr == allocatedListTail )
211  *allocatedListTailPtr = prevBlockPtr;
212 
213  /* Clear the current block's pointers, just to be clean */
214  memBlockPtr->next = memBlockPtr->prev = NULL;
215  }
216 
217 #if 0 /* Currently unused, in practice would be called from a worker thread
218  that periodically touches all secure-data pages */
219 
220 /* Walk the allocated block list touching each page. In most cases we don't
221  need to explicitly touch the page since the allocated blocks are almost
222  always smaller than the MMU's page size and simply walking the list
223  touches them, but in some rare cases we need to explicitly touch each
224  page */
225 
226 static void touchAllocatedPages( void )
227  {
228  MEMLOCK_INFO *memBlockPtr;
229 
230  /* Lock the allocation object to ensure that other threads don't try to
231  access them */
232  MUTEX_LOCK( allocation );
233 
234  /* Walk down the list (which implicitly touches each page). If the
235  allocated region is larger than 4K, explicitly touch each 4K page.
236  This assumes a page size of 4K which is usually true (and difficult
237  to determine otherwise), in any case it doesn't make much difference
238  since nothing ever allocates more than two 4K pages */
239  for( memBlockPtr = krnlData->allocatedListHead; memBlockPtr != NULL;
240  memBlockPtr = memBlockPtr->next )
241  {
242  const int pageSize = getSysVar( SYSVAR_PAGESIZE );
243 
244  /* If the allocated region has pages beyond the first one (which
245  we've already touched by accessing the header), explicitly
246  touch those pages as well */
247  if( memBlockPtr->size > pageSize )
248  {
249  BYTE *memPtr = ( BYTE * ) memBlockPtr + pageSize;
250  int memSize = memBlockPtr->size;
251 
252  /* Touch each page. The rather convoluted expression is to try
253  and stop it from being optimised away - it always evaluates to
254  true since we only get here if allocatedListHead != NULL, but
255  hopefully the compiler won't be able to figure that out */
256  while( memSize > pageSize )
257  {
258  if( *memPtr || krnlData->allocatedListHead != NULL )
259  memPtr += pageSize;
260  memSize -= pageSize;
261  }
262  }
263  }
264 
265  /* Unlock the allocation object to allow access by other threads */
266  MUTEX_UNLOCK( allocation );
267  }
268 #endif /* 0 */
269 
270 /****************************************************************************
271 * *
272 * Init/Shutdown Functions *
273 * *
274 ****************************************************************************/
275 
276 /* Create and destroy the secure allocation information */
277 
279 int initAllocation( INOUT KERNEL_DATA *krnlDataPtr )
280  {
281  int status;
282 
283  assert( isWritePtr( krnlDataPtr, sizeof( KERNEL_DATA ) ) );
284 
285  /* Set up the reference to the kernel data block */
286  krnlData = krnlDataPtr;
287 
288  /* Clear the list head and tail pointers */
289  krnlData->allocatedListHead = krnlData->allocatedListTail = NULL;
290 
291  /* Initialize any data structures required to make the allocation thread-
292  safe */
293  MUTEX_CREATE( allocation, status );
294  ENSURES( cryptStatusOK( status ) );
295 
296  return( CRYPT_OK );
297  }
298 
299 void endAllocation( void )
300  {
301  /* Destroy any data structures required to make the allocation thread-
302  safe */
303  MUTEX_DESTROY( allocation );
304 
305  krnlData = NULL;
306  }
307 
308 /****************************************************************************
309 * *
310 * Windows Secure Memory Allocation Functions *
311 * *
312 ****************************************************************************/
313 
314 #if defined( __WIN32__ )
315 
316 #if !defined( NDEBUG ) && !defined( NT_DRIVER ) && !defined( __BORLANDC__ )
317  #define USE_HEAP_CHECKING
318 #endif /* Win32 debug version */
319 
320 #ifdef USE_HEAP_CHECKING
321  #include <crtdbg.h> /* For heap checking in debug version */
322 #endif /* USE_HEAP_CHECKING */
323 
324 /* Get the start address of a page and, given an address in a page and a
325  size, determine on which page the data ends. These are used to determine
326  which pages a memory block covers */
327 
328 #if defined( _MSC_VER ) && ( _MSC_VER >= 1400 )
329  #define PTR_TYPE INT_PTR
330 #else
331  #define PTR_TYPE long
332 #endif /* Newer versions of VC++ */
333 
334 #define getPageStartAddress( address ) \
335  ( ( PTR_TYPE ) ( address ) & ~( pageSize - 1 ) )
336 #define getPageEndAddress( address, size ) \
337  getPageStartAddress( ( PTR_TYPE ) address + ( size ) - 1 )
338 
339 /* A safe malloc function that performs page locking if possible */
340 
342 int krnlMemalloc( OUT_BUFFER_ALLOC_OPT( size ) void **pointer,
343  IN_LENGTH int size )
344  {
345  MEMLOCK_INFO *memBlockPtr;
346  BYTE *memPtr;
347  int status;
348 
349  status = checkInitAlloc( pointer, size );
350  if( cryptStatusError( status ) )
351  return( status );
352 
353  /* Clear return values */
354  *pointer = NULL;
355 
356  /* Try and allocate the memory */
357  adjustMemCanary( size ); /* For canary at end of block */
358  if( ( memPtr = clAlloc( "krnlMemAlloc", \
359  size + MEMLOCK_HEADERSIZE ) ) == NULL )
360  return( CRYPT_ERROR_MEMORY );
361  memset( memPtr, 0, size + MEMLOCK_HEADERSIZE );
362  memBlockPtr = ( MEMLOCK_INFO * ) memPtr;
363  memBlockPtr->isLocked = FALSE;
364  memBlockPtr->size = size + MEMLOCK_HEADERSIZE;
365  insertMemCanary( memBlockPtr, memPtr );
366  *pointer = memPtr + MEMLOCK_HEADERSIZE;
367 
368  /* Try to lock the pages in memory */
369 #if !defined( NT_DRIVER )
370  /* Under Win95 the VirtualLock() function is implemented as
371  `return( TRUE )' ("Thank Microsoft kids" - "Thaaaanks Bill"). Under
372  NT the function does actually work, but with a number of caveats.
373  The main one is that it has been claimed that VirtualLock() only
374  guarantees that the memory won't be paged while a thread in the
375  process is running, and when all threads are preempted the memory is
376  still a target for paging. This would mean that on a loaded system a
377  process that was idle for some time could have the memory unlocked by
378  the system and swapped out to disk (actually with NT's somewhat
379  strange paging strategy and gradual creeping takeover of free memory
380  for disk buffers, it can get paged even on a completely unloaded
381  system). However, attempts to force data to be paged under Win2K
382  and XP under various conditions have been unsuccesful so it may be
383  that the behaviour changed in post-NT versions of the OS. In any
384  case VirtualLock() under these newer OSes seems to be fairly
385  effective in keeping data off disk.
386 
387  An additional concern is that although VirtualLock() takes arbitrary
388  memory pointers and a size parameter, the locking is actually done on
389  a per-page basis, so that unlocking a region that shares a page with
390  another locked region means that both reqions are unlocked. Since
391  VirtualLock() doesn't do reference counting (emulating the underlying
392  MMU page locking even though it seems to implement an intermediate
393  layer above the MMU so it could in theory do this), the only way
394  around this is to walk the chain of allocated blocks and not unlock a
395  block if there's another block allocated on the same page. Ick.
396 
397  For the NT kernel driver, the memory is always allocated from the non-
398  paged pool so there's no need for these gyrations */
399  if( VirtualLock( memPtr, memBlockPtr->size ) )
400  memBlockPtr->isLocked = TRUE;
401 #endif /* !NT_DRIVER */
402 
403  /* Lock the memory list, insert the new block, and unlock it again */
404  MUTEX_LOCK( allocation );
405  insertMemBlock( ( MEMLOCK_INFO ** ) &krnlData->allocatedListHead,
406  ( MEMLOCK_INFO ** ) &krnlData->allocatedListTail,
407  memBlockPtr );
408 #ifdef USE_HEAP_CHECKING
409  /* Sanity check to detect memory chain corruption */
410  assert( _CrtIsValidHeapPointer( memBlockPtr ) );
411  assert( memBlockPtr->next == NULL );
412  assert( krnlData->allocatedListHead == krnlData->allocatedListTail || \
413  _CrtIsValidHeapPointer( memBlockPtr->prev ) );
414 #endif /* USE_HEAP_CHECKING */
415  MUTEX_UNLOCK( allocation );
416 
417  return( CRYPT_OK );
418  }
419 
420 /* A safe free function that scrubs memory and zeroes the pointer.
421 
422  "You will softly and suddenly vanish away
423  And never be met with again" - Lewis Carroll,
424  "The Hunting of the Snark" */
425 
427 int krnlMemfree( INOUT_PTR void **pointer )
428  {
429  MEMLOCK_INFO *memBlockPtr;
430  BYTE *memPtr;
431  int status;
432 
433  /* Check the function preconditions */
434  status = checkInitFree( pointer, &memPtr, &memBlockPtr );
435  if( cryptStatusError( status ) )
436  return( status );
437 
438  /* Lock the memory list, unlink the new block, and unlock it again */
439  MUTEX_LOCK( allocation );
440  checkMemCanary( memBlockPtr, memPtr );
441 #ifdef USE_HEAP_CHECKING
442  /* Sanity check to detect memory chain corruption */
443  assert( _CrtIsValidHeapPointer( memBlockPtr ) );
444  assert( memBlockPtr->next == NULL || \
445  _CrtIsValidHeapPointer( memBlockPtr->next ) );
446  assert( memBlockPtr->prev == NULL || \
447  _CrtIsValidHeapPointer( memBlockPtr->prev ) );
448 #endif /* USE_HEAP_CHECKING */
449  unlinkMemBlock( ( MEMLOCK_INFO ** ) &krnlData->allocatedListHead,
450  ( MEMLOCK_INFO ** ) &krnlData->allocatedListTail,
451  memBlockPtr );
452 #if !defined( NT_DRIVER )
453  /* Because VirtualLock() works on a per-page basis, we can't unlock a
454  memory block if there's another locked block on the same page. The
455  only way to manage this is to walk the block list checking to see
456  whether there's another block allocated on the same page. Although in
457  theory this could make freeing memory rather slow, in practice there
458  are only a small number of allocated blocks to check so it's
459  relatively quick, especially compared to the overhead imposed by the
460  lethargic VC++ allocator. The only real disadvantage is that the
461  allocation objects remain locked while we do the free, but this
462  isn't any worse than the overhead of touchAllocatedPages(). Note
463  that the following code assumes that an allocated block will never
464  cover more than two pages, which is always the case */
465  if( memBlockPtr->isLocked )
466  {
467  MEMLOCK_INFO *currentBlockPtr;
468  PTR_TYPE block1PageAddress, block2PageAddress;
469  const int pageSize = getSysVar( SYSVAR_PAGESIZE );
470 
471  /* Calculate the addresses of the page(s) in which the memory block
472  resides */
473  block1PageAddress = getPageStartAddress( memBlockPtr );
474  block2PageAddress = getPageEndAddress( memBlockPtr, memBlockPtr->size );
475  if( block1PageAddress == block2PageAddress )
476  block2PageAddress = 0;
477 
478  /* Walk down the block list checking whether the page(s) contain
479  another locked block */
480  for( currentBlockPtr = krnlData->allocatedListHead; \
481  currentBlockPtr != NULL; currentBlockPtr = currentBlockPtr->next )
482  {
483  const PTR_TYPE currentPage1Address = \
484  getPageStartAddress( currentBlockPtr );
485  PTR_TYPE currentPage2Address = \
486  getPageEndAddress( currentBlockPtr, currentBlockPtr->size );
487 
488  if( currentPage1Address == currentPage2Address )
489  currentPage2Address = 0;
490 
491  /* There's another block allocated on either of the pages, don't
492  unlock it */
493  if( block1PageAddress == currentPage1Address || \
494  block1PageAddress == currentPage2Address )
495  {
496  block1PageAddress = 0;
497  if( !block2PageAddress )
498  break;
499  }
500  if( block2PageAddress == currentPage1Address || \
501  block2PageAddress == currentPage2Address )
502  {
503  block2PageAddress = 0;
504  if( !block1PageAddress )
505  break;
506  }
507  }
508 
509  /* Finally, if either page needs unlocking, do so. The supplied size
510  is irrelevant since the entire page the memory is on is unlocked */
511  if( block1PageAddress )
512  VirtualUnlock( ( void * ) block1PageAddress, 16 );
513  if( block2PageAddress )
514  VirtualUnlock( ( void * ) block2PageAddress, 16 );
515  }
516 #endif /* !NT_DRIVER */
517  MUTEX_UNLOCK( allocation );
518 
519  /* Zeroise the memory (including the memlock info), free it, and zero
520  the pointer */
521  zeroise( memPtr, memBlockPtr->size );
522  clFree( "krnlMemFree", memPtr );
523  *pointer = NULL;
524 
525  return( CRYPT_OK );
526  }
527 
528 /****************************************************************************
529 * *
530 * Unix/BeOS Secure Memory Allocation Functions *
531 * *
532 ****************************************************************************/
533 
534 #elif defined( __UNIX__ ) || defined( __BEOS__ )
535 
536 /* Since the function prototypes for the SYSV/Posix mlock() call are stored
537  all over the place depending on the Unix version, we usually have to
538  prototype it ourselves here rather than trying to guess its location */
539 
540 #if defined( __osf__ ) || defined( __alpha__ )
541  #include <sys/mman.h>
542 #elif defined( sun )
543  #include <sys/mman.h>
544  #include <sys/types.h>
545 #else
546  int mlock( void *address, size_t length );
547  int munlock( void *address, size_t length );
548 #endif /* Unix-variant-specific includes */
549 
550 /* Under many Unix variants the SYSV/Posix mlock() call can be used, but
551  only by the superuser (with occasional OS-specific variants, for example
552  under some newer Linux variants the caller needs the specific
553  CAP_IPC_LOCK privilege rather than just generally being root). OSF/1 has
554  mlock(), but this is defined to the nonexistant memlk() so we need to
555  special-case it out. QNX (depending on the version) either doesn't have
556  mlock() at all or it's a dummy that just returns -1, so we no-op it out.
557  Aches, A/UX, PHUX, Linux < 1.3.something, and Ultrix don't even pretend
558  to have mlock(). Many systems also have plock(), but this is pretty
559  crude since it locks all data, and also has various other shortcomings.
560  Finally, PHUX has datalock(), which is just a plock() variant */
561 
562 #if defined( _AIX ) || defined( __alpha__ ) || defined( __aux ) || \
563  defined( _CRAY ) || defined( __CYGWIN__ ) || defined( __hpux ) || \
564  ( defined( __linux__ ) && OSVERSION < 2 ) || \
565  defined( _M_XENIX ) || defined( __osf__ ) || \
566  ( defined( __QNX__ ) && OSVERSION <= 6 ) || \
567  defined( __TANDEM_NSK__ ) || defined( __TANDEM_OSS__ ) || \
568  defined( __ultrix )
569  #define mlock( a, b ) 1
570  #define munlock( a, b )
571 #endif /* Unix OS-specific defines */
572 
573 /* A safe malloc function that performs page locking if possible */
574 
576 int krnlMemalloc( OUT_BUFFER_ALLOC_OPT( size ) void **pointer,
577  IN_LENGTH int size )
578  {
579  MEMLOCK_INFO *memBlockPtr;
580  BYTE *memPtr;
581 #if defined( __BEOS__ )
582  area_id areaID;
583 #endif /* __BEOS__ && BeOS areas */
584  int status;
585 
586  status = checkInitAlloc( pointer, size );
587  if( cryptStatusError( status ) )
588  return( status );
589 
590  /* Clear return values */
591  *pointer = NULL;
592 
593  /* Try and allocate the memory */
594  adjustMemCanary( size ); /* For canary at end of block */
595 #if defined( __BEOS__ )
596  /* Under BeOS we have to allocate a locked area, we can't lock it after
597  the event. create_area(), like most of the low-level memory access
598  functions provided by different OSes, functions at the page level, so
599  we round the size up to the page size. We can mitigate the
600  granularity somewhat by specifying lazy locking, which means that the
601  page isn't locked until it's committed.
602 
603  In pre-open-source BeOS, areas were bit of a security tradeoff because
604  they were globally visible(!!!) through the use of find_area(), so
605  that any other process in the system could find them. An attacker
606  could always find the app's malloc() arena anyway because of this,
607  but putting data directly into areas made the attacker's task
608  somewhat easier. Open-source BeOS fixed this, mostly because it
609  would have taken extra work to make areas explicitly globally visible
610  and no-one could see a reason for this, so it's somewhat safer there.
611 
612  However, the implementation of create_area() in the open-source BeOS
613  seems to be rather flaky (simply creating an area and then
614  immediately destroying it again causes a segmentation violation) so
615  it may be necessary to turn it off for some BeOS releases */
616  areaID = create_area( "memory_block", ( void ** ) &memPtr, B_ANY_ADDRESS,
617  roundUp( size + MEMLOCK_HEADERSIZE, B_PAGE_SIZE ),
618  B_LAZY_LOCK, B_READ_AREA | B_WRITE_AREA );
619  if( areaID < B_NO_ERROR )
620 #else
621  if( ( memPtr = clAlloc( "krnlMemAlloc", \
622  size + MEMLOCK_HEADERSIZE ) ) == NULL )
623 #endif /* __BEOS__ */
624  return( CRYPT_ERROR_MEMORY );
625  memset( memPtr, 0, size + MEMLOCK_HEADERSIZE );
626  memBlockPtr = ( MEMLOCK_INFO * ) memPtr;
627  memBlockPtr->isLocked = FALSE;
628  memBlockPtr->size = size + MEMLOCK_HEADERSIZE;
629 #if defined( __BEOS__ )
630  memBlockPtr->areaID = areaID;
631 #endif /* __BEOS__ && BeOS areas */
632  insertMemCanary( memBlockPtr, memPtr );
633  *pointer = memPtr + MEMLOCK_HEADERSIZE;
634 
635  /* Try to lock the pages in memory */
636 #if !defined( __BEOS__ )
637  if( !mlock( memPtr, memBlockPtr->size ) )
638  memBlockPtr->isLocked = TRUE;
639 #endif /* !__BEOS__ */
640 
641  /* Lock the memory list, insert the new block, and unlock it again */
642  MUTEX_LOCK( allocation );
643  insertMemBlock( ( MEMLOCK_INFO ** ) &krnlData->allocatedListHead,
644  ( MEMLOCK_INFO ** ) &krnlData->allocatedListTail,
645  memBlockPtr );
646  MUTEX_UNLOCK( allocation );
647 
648  return( CRYPT_OK );
649  }
650 
651 /* A safe free function that scrubs memory and zeroes the pointer.
652 
653  "You will softly and suddenly vanish away
654  And never be met with again" - Lewis Carroll,
655  "The Hunting of the Snark" */
656 
658 int krnlMemfree( INOUT_PTR void **pointer )
659  {
660  MEMLOCK_INFO *memBlockPtr;
661  BYTE *memPtr;
662 #if defined( __BEOS__ )
663  area_id areaID;
664 #endif /* __BEOS__ && BeOS areas */
665  int status;
666 
667  /* Check the function preconditions */
668  status = checkInitFree( ( const void ** ) pointer, &memPtr,
669  &memBlockPtr );
670  if( cryptStatusError( status ) )
671  return( status );
672 
673  /* Lock the memory list, unlink the new block, and unlock it again */
674  MUTEX_LOCK( allocation );
675  checkMemCanary( memBlockPtr, memPtr );
676  unlinkMemBlock( ( MEMLOCK_INFO ** ) &krnlData->allocatedListHead,
677  ( MEMLOCK_INFO ** ) &krnlData->allocatedListTail,
678  memBlockPtr );
679  MUTEX_UNLOCK( allocation );
680 
681  /* If the memory was locked, unlock it now */
682 #if defined( __BEOS__ )
683  areaID = memBlockPtr->areaID;
684  zeroise( memPtr, memBlockPtr->size );
685  delete_area( areaID );
686 #else
687  if( memBlockPtr->isLocked )
688  munlock( memPtr, memBlockPtr->size );
689 #endif /* OS-specific memory unlocking */
690 
691  /* Zeroise the memory (including the memlock info), free it, and zero
692  the pointer */
693 #if !defined( __BEOS__ )
694  zeroise( memPtr, memBlockPtr->size );
695  clFree( "krnlMemFree", memPtr );
696 #endif /* !__BEOS__ */
697  *pointer = NULL;
698 
699  return( CRYPT_OK );
700  }
701 
702 /****************************************************************************
703 * *
704 * ChorusOS Secure Memory Allocation Functions *
705 * *
706 ****************************************************************************/
707 
708 #elif defined( __CHORUS__ )
709 
710 /* ChorusOS is one of the very few embedded OSes with paging capabilities,
711  fortunately there's a way to allocate nonpageable memory if paging is
712  enabled */
713 
714 #include <mem/chMem.h>
715 
716 /* A safe malloc function that performs page locking if possible */
717 
719 int krnlMemalloc( OUT_BUFFER_ALLOC_OPT( size ) void **pointer,
720  IN_LENGTH int size )
721  {
722  MEMLOCK_INFO *memBlockPtr;
723  BYTE *memPtr;
724  KnRgnDesc rgnDesc = { K_ANYWHERE, size + MEMLOCK_HEADERSIZE, \
725  K_WRITEABLE | K_NODEMAND };
726  int status;
727 
728  status = checkInitAlloc( pointer, size );
729  if( cryptStatusError( status ) )
730  return( status );
731 
732  /* Clear return values */
733  *pointer = NULL;
734 
735  /* Try and allocate the memory */
736  adjustMemCanary( size ); /* For canary at end of block */
737  if( rgnAllocate( K_MYACTOR, &rgnDesc ) != K_OK )
738  return( CRYPT_ERROR_MEMORY );
739  memPtr = rgnDesc.startAddr;
740  memset( memPtr, 0, size + MEMLOCK_HEADERSIZE );
741  memBlockPtr = ( MEMLOCK_INFO * ) memPtr;
742  memBlockPtr->isLocked = FALSE;
743  memBlockPtr->size = size + MEMLOCK_HEADERSIZE;
744  insertMemCanary( memBlockPtr, memPtr );
745  *pointer = memPtr + MEMLOCK_HEADERSIZE;
746 
747  /* Lock the memory list, insert the new block, and unlock it again */
748  MUTEX_LOCK( allocation );
749  insertMemBlock( ( MEMLOCK_INFO ** ) &krnlData->allocatedListHead,
750  ( MEMLOCK_INFO ** ) &krnlData->allocatedListTail,
751  memBlockPtr );
752  MUTEX_UNLOCK( allocation );
753 
754  return( CRYPT_OK );
755  }
756 
757 /* A safe free function that scrubs memory and zeroes the pointer.
758 
759  "You will softly and suddenly vanish away
760  And never be met with again" - Lewis Carroll,
761  "The Hunting of the Snark" */
762 
764 int krnlMemfree( INOUT_PTR void **pointer )
765  {
766  MEMLOCK_INFO *memBlockPtr;
767  BYTE *memPtr;
768  KnRgnDesc rgnDesc = { K_ANYWHERE, 0, 0 };
769  int status;
770 
771  /* Check the function preconditions */
772  status = checkInitFree( pointer, &memPtr, &memBlockPtr );
773  if( cryptStatusError( status ) )
774  return( status );
775 
776  /* Lock the memory list, unlink the new block, and unlock it again */
777  MUTEX_LOCK( allocation );
778  checkMemCanary( memBlockPtr, memPtr );
779  unlinkMemBlock( ( MEMLOCK_INFO ** ) &krnlData->allocatedListHead,
780  ( MEMLOCK_INFO ** ) &krnlData->allocatedListTail,
781  memBlockPtr );
782  MUTEX_UNLOCK( allocation );
783 
784  /* Zeroise the memory (including the memlock info), free it, and zero
785  the pointer */
786  rgnDesc.size = memBlockPtr->size;
787  rgnDesc.startAddr = memPtr;
788  zeroise( memPtr, memBlockPtr->size );
789  rgnFree( K_MYACTOR, &rgnDesc );
790  *pointer = NULL;
791 
792  return( CRYPT_OK );
793  }
794 
795 /****************************************************************************
796 * *
797 * Macintosh Secure Memory Allocation Functions *
798 * *
799 ****************************************************************************/
800 
801 #elif defined( __MAC__ )
802 
803 #include <Memory.h>
804 
805 /* A safe malloc function that performs page locking if possible */
806 
808 int krnlMemalloc( OUT_BUFFER_ALLOC_OPT( size ) void **pointer,
809  IN_LENGTH int size )
810  {
811  MEMLOCK_INFO *memBlockPtr;
812  BYTE *memPtr;
813  int status;
814 
815  status = checkInitAlloc( pointer, size );
816  if( cryptStatusError( status ) )
817  return( status );
818 
819  /* Clear return values */
820  *pointer = NULL;
821 
822  /* Try and allocate the memory */
823  adjustMemCanary( size ); /* For canary at end of block */
824  if( ( memPtr = clAlloc( "krnlMemAlloc", \
825  size + MEMLOCK_HEADERSIZE ) ) == NULL )
826  return( CRYPT_ERROR_MEMORY );
827  memset( memPtr, 0, size + MEMLOCK_HEADERSIZE );
828  memBlockPtr = ( MEMLOCK_INFO * ) memPtr;
829  memBlockPtr->isLocked = FALSE;
830  memBlockPtr->size = size + MEMLOCK_HEADERSIZE;
831  insertMemCanary( memBlockPtr, memPtr );
832  *pointer = memPtr + MEMLOCK_HEADERSIZE;
833 
834  /* Try to lock the pages in memory */
835 #if !defined( CALL_NOT_IN_CARBON ) || CALL_NOT_IN_CARBON
836  /* The Mac has two functions for locking memory, HoldMemory() (which
837  makes the memory ineligible for paging) and LockMemory() (which makes
838  it ineligible for paging and also immovable). We use HoldMemory()
839  since it's slightly more friendly, but really critical applications
840  could use LockMemory() */
841  if( HoldMemory( memPtr, memBlockPtr->size ) == noErr )
842  memBlockPtr->isLocked = TRUE;
843 #endif /* Non Mac OS X memory locking */
844 
845  /* Lock the memory list, insert the new block, and unlock it again */
846  MUTEX_LOCK( allocation );
847  insertMemBlock( ( MEMLOCK_INFO ** ) &krnlData->allocatedListHead,
848  ( MEMLOCK_INFO ** ) &krnlData->allocatedListTail,
849  memBlockPtr );
850  MUTEX_UNLOCK( allocation );
851 
852  return( CRYPT_OK );
853  }
854 
855 /* A safe free function that scrubs memory and zeroes the pointer.
856 
857  "You will softly and suddenly vanish away
858  And never be met with again" - Lewis Carroll,
859  "The Hunting of the Snark" */
860 
862 int krnlMemfree( INOUT_PTR void **pointer )
863  {
864  MEMLOCK_INFO *memBlockPtr;
865  BYTE *memPtr;
866  int status;
867 
868  /* Check the function preconditions */
869  status = checkInitFree( pointer, &memPtr, &memBlockPtr );
870  if( cryptStatusError( status ) )
871  return( status );
872 
873  /* Lock the memory list, unlink the new block, and unlock it again */
874  MUTEX_LOCK( allocation );
875  checkMemCanary( memBlockPtr, memPtr );
876  unlinkMemBlock( ( MEMLOCK_INFO ** ) &krnlData->allocatedListHead,
877  ( MEMLOCK_INFO ** ) &krnlData->allocatedListTail,
878  memBlockPtr );
879  MUTEX_UNLOCK( allocation );
880 
881  /* If the memory is locked, unlock it now */
882 #if !defined( CALL_NOT_IN_CARBON ) || CALL_NOT_IN_CARBON
883  if( memBlockPtr->isLocked )
884  UnholdMemory( memPtr, memBlockPtr->size );
885 #endif /* Non Mac OS X memory locking */
886 
887  /* Zeroise the memory (including the memlock info), free it, and zero
888  the pointer */
889  zeroise( memPtr, memBlockPtr->size );
890  clFree( "krnlMemFree", memPtr );
891  *pointer = NULL;
892 
893  return( CRYPT_OK );
894  }
895 
896 /****************************************************************************
897 * *
898 * Misc.Secure Memory Allocation Functions *
899 * *
900 ****************************************************************************/
901 
902 #else
903 
904 #if defined( __MSDOS__ ) && defined( __DJGPP__ )
905  #include <dpmi.h>
906  #include <go32.h>
907 #endif /* DOS-32 */
908 
909 /* A safe malloc function that performs page locking if possible */
910 
912 int krnlMemalloc( OUT_BUFFER_ALLOC_OPT( size ) void **pointer,
913  IN_LENGTH int size )
914  {
915  MEMLOCK_INFO *memBlockPtr;
916  BYTE *memPtr;
917  int status;
918 
919  status = checkInitAlloc( pointer, size );
920  if( cryptStatusError( status ) )
921  return( status );
922 
923  /* Clear return values */
924  *pointer = NULL;
925 
926  /* Try and allocate the memory */
927  adjustMemCanary( size ); /* For canary at end of block */
928  if( ( memPtr = clAlloc( "krnlMemAlloc", \
929  size + MEMLOCK_HEADERSIZE ) ) == NULL )
930  return( CRYPT_ERROR_MEMORY );
931  memset( memPtr, 0, size + MEMLOCK_HEADERSIZE );
932  memBlockPtr = ( MEMLOCK_INFO * ) memPtr;
933  memBlockPtr->isLocked = FALSE;
934  memBlockPtr->size = size + MEMLOCK_HEADERSIZE;
935  insertMemCanary( memBlockPtr, memPtr );
936  *pointer = memPtr + MEMLOCK_HEADERSIZE;
937 
938  /* If the OS supports paging, try to lock the pages in memory */
939 #if defined( __MSDOS__ ) && defined( __DJGPP__ )
940  /* Under 32-bit MSDOS use the DPMI functions to lock memory */
941  if( _go32_dpmi_lock_data( memPtr, memBlockPtr->size ) == 0)
942  memBlockPtr->isLocked = TRUE;
943 #endif /* Systems that support memory locking */
944 
945  /* Lock the memory list, insert the new block, and unlock it again */
946  MUTEX_LOCK( allocation );
947  insertMemBlock( ( MEMLOCK_INFO ** ) &krnlData->allocatedListHead,
948  ( MEMLOCK_INFO ** ) &krnlData->allocatedListTail,
949  memBlockPtr );
950  MUTEX_UNLOCK( allocation );
951 
952  return( CRYPT_OK );
953  }
954 
955 /* A safe free function that scrubs memory and zeroes the pointer.
956 
957  "You will softly and suddenly vanish away
958  And never be met with again" - Lewis Carroll,
959  "The Hunting of the Snark" */
960 
962 int krnlMemfree( INOUT_PTR void **pointer )
963  {
964  MEMLOCK_INFO *memBlockPtr;
965  BYTE *memPtr;
966  int status;
967 
968  /* Check the function preconditions */
969  status = checkInitFree( pointer, &memPtr, &memBlockPtr );
970  if( cryptStatusError( status ) )
971  return( status );
972 
973  /* Lock the memory list, unlink the new block, and unlock it again */
974  MUTEX_LOCK( allocation );
975  checkMemCanary( memBlockPtr, memPtr );
976  unlinkMemBlock( ( MEMLOCK_INFO ** ) &krnlData->allocatedListHead,
977  ( MEMLOCK_INFO ** ) &krnlData->allocatedListTail,
978  memBlockPtr );
979  MUTEX_UNLOCK( allocation );
980 
981  /* If the memory is locked, unlock it now */
982 #if defined( __MSDOS__ ) && defined( __DJGPP__ )
983  /* Under 32-bit MSDOS we *could* use the DPMI functions to unlock
984  memory, but as many DPMI hosts implement page locking in a binary
985  form (no lock count maintained), it's better not to unlock anything
986  at all. Note that this may lead to a shortage of virtual memory in
987  long-running applications */
988 #endif /* Systems that support memory locking */
989 
990  /* Zeroise the memory (including the memlock info), free it, and zero
991  the pointer */
992  zeroise( memPtr, memBlockPtr->size );
993  clFree( "krnlMemFree", memPtr );
994  *pointer = NULL;
995 
996  return( CRYPT_OK );
997  }
998 #endif /* OS-specific secure memory handling */