Main Page | Class Hierarchy | Data Structures | Directories | File List | Data Fields | Related Pages

mutex_int.h

00001 /*-
00002  * See the file LICENSE for redistribution information.
00003  *
00004  * Copyright (c) 1996-2005
00005  *      Sleepycat Software.  All rights reserved.
00006  *
00007  * $Id: mutex_int.h,v 12.17 2005/11/08 22:26:49 mjc Exp $
00008  */
00009 
00010 #ifndef _DB_MUTEX_INT_H_
00011 #define _DB_MUTEX_INT_H_
00012 
00013 /*********************************************************************
00014  * POSIX.1 pthreads interface.
00015  *********************************************************************/
00016 #ifdef HAVE_MUTEX_PTHREADS
00017 #include <pthread.h>
00018 
00019 #define MUTEX_FIELDS                                                    \
00020         pthread_mutex_t mutex;          /* Mutex. */                    \
00021         pthread_cond_t  cond;           /* Condition variable. */
00022 #endif
00023 
00024 #ifdef HAVE_MUTEX_UI_THREADS
00025 #include <thread.h>
00026 #endif
00027 
00028 /*********************************************************************
00029  * Solaris lwp threads interface.
00030  *
00031  * !!!
00032  * We use LWP mutexes on Solaris instead of UI or POSIX mutexes (both of
00033  * which are available), for two reasons.  First, the Solaris C library
00034  * includes versions of the both UI and POSIX thread mutex interfaces, but
00035  * they are broken in that they don't support inter-process locking, and
00036  * there's no way to detect it, e.g., calls to configure the mutexes for
00037  * inter-process locking succeed without error.  So, we use LWP mutexes so
00038  * that we don't fail in fairly undetectable ways because the application
00039  * wasn't linked with the appropriate threads library.  Second, there were
00040  * bugs in SunOS 5.7 (Solaris 7) where if an application loaded the C library
00041  * before loading the libthread/libpthread threads libraries (e.g., by using
00042  * dlopen to load the DB library), the pwrite64 interface would be translated
00043  * into a call to pwrite and DB would drop core.
00044  *********************************************************************/
00045 #ifdef HAVE_MUTEX_SOLARIS_LWP
00046 /*
00047  * XXX
00048  * Don't change <synch.h> to <sys/lwp.h> -- although lwp.h is listed in the
00049  * Solaris manual page as the correct include to use, it causes the Solaris
00050  * compiler on SunOS 2.6 to fail.
00051  */
00052 #include <synch.h>
00053 
00054 #define MUTEX_FIELDS                                                    \
00055         lwp_mutex_t mutex;              /* Mutex. */                    \
00056         lwp_cond_t cond;                /* Condition variable. */
00057 #endif
00058 
00059 /*********************************************************************
00060  * Solaris/Unixware threads interface.
00061  *********************************************************************/
00062 #ifdef HAVE_MUTEX_UI_THREADS
00063 #include <thread.h>
00064 #include <synch.h>
00065 
00066 #define MUTEX_FIELDS                                                    \
00067         mutex_t mutex;                  /* Mutex. */                    \
00068         cond_t  cond;                   /* Condition variable. */
00069 #endif
00070 
00071 /*********************************************************************
00072  * AIX C library functions.
00073  *********************************************************************/
00074 #ifdef HAVE_MUTEX_AIX_CHECK_LOCK
00075 #include <sys/atomic_op.h>
00076 typedef int tsl_t;
00077 
00078 #ifdef LOAD_ACTUAL_MUTEX_CODE
00079 #define MUTEX_INIT(x)   0
00080 #define MUTEX_SET(x)    (!_check_lock(x, 0, 1))
00081 #define MUTEX_UNSET(x)  _clear_lock(x, 0)
00082 #endif
00083 #endif
00084 
00085 /*********************************************************************
00086  * Apple/Darwin library functions.
00087  *********************************************************************/
00088 #ifdef HAVE_MUTEX_DARWIN_SPIN_LOCK_TRY
00089 typedef u_int32_t tsl_t;
00090 
00091 #ifdef LOAD_ACTUAL_MUTEX_CODE
00092 extern int _spin_lock_try(tsl_t *);
00093 extern void _spin_unlock(tsl_t *);
00094 #define MUTEX_SET(tsl)          _spin_lock_try(tsl)
00095 #define MUTEX_UNSET(tsl)        _spin_unlock(tsl)
00096 #define MUTEX_INIT(tsl)         (MUTEX_UNSET(tsl), 0)
00097 #endif
00098 #endif
00099 
00100 /*********************************************************************
00101  * General C library functions (msemaphore).
00102  *
00103  * !!!
00104  * Check for HPPA as a special case, because it requires unusual alignment,
00105  * and doesn't support semaphores in malloc(3) or shmget(2) memory.
00106  *
00107  * !!!
00108  * Do not remove the MSEM_IF_NOWAIT flag.  The problem is that if a single
00109  * process makes two msem_lock() calls in a row, the second one returns an
00110  * error.  We depend on the fact that we can lock against ourselves in the
00111  * locking subsystem, where we set up a mutex so that we can block ourselves.
00112  * Tested on OSF1 v4.0.
00113  *********************************************************************/
00114 #ifdef HAVE_MUTEX_HPPA_MSEM_INIT
00115 #define MUTEX_ALIGN     16
00116 #endif
00117 
00118 #if defined(HAVE_MUTEX_MSEM_INIT) || defined(HAVE_MUTEX_HPPA_MSEM_INIT)
00119 #include <sys/mman.h>
00120 typedef msemaphore tsl_t;
00121 
00122 #ifdef LOAD_ACTUAL_MUTEX_CODE
00123 #define MUTEX_INIT(x)   (msem_init(x, MSEM_UNLOCKED) <= (msemaphore *)0)
00124 #define MUTEX_SET(x)    (!msem_lock(x, MSEM_IF_NOWAIT))
00125 #define MUTEX_UNSET(x)  msem_unlock(x, 0)
00126 #endif
00127 #endif
00128 
00129 /*********************************************************************
00130  * Plan 9 library functions.
00131  *********************************************************************/
00132 #ifdef HAVE_MUTEX_PLAN9
00133 typedef Lock tsl_t;
00134 
00135 #define MUTEX_INIT(x)   (memset(x, 0, sizeof(Lock)), 0)
00136 #define MUTEX_SET(x)    canlock(x)
00137 #define MUTEX_UNSET(x)  unlock(x)
00138 #endif
00139 
00140 /*********************************************************************
00141  * Reliant UNIX C library functions.
00142  *********************************************************************/
00143 #ifdef HAVE_MUTEX_RELIANTUNIX_INITSPIN
00144 #include <ulocks.h>
00145 typedef spinlock_t tsl_t;
00146 
00147 #ifdef LOAD_ACTUAL_MUTEX_CODE
00148 #define MUTEX_INIT(x)   (initspin(x, 1), 0)
00149 #define MUTEX_SET(x)    (cspinlock(x) == 0)
00150 #define MUTEX_UNSET(x)  spinunlock(x)
00151 #endif
00152 #endif
00153 
00154 /*********************************************************************
00155  * General C library functions (POSIX 1003.1 sema_XXX).
00156  *
00157  * !!!
00158  * Never selected by autoconfig in this release (semaphore calls are known
00159  * to not work in Solaris 5.5).
00160  *********************************************************************/
00161 #ifdef HAVE_MUTEX_SEMA_INIT
00162 #include <synch.h>
00163 typedef sema_t tsl_t;
00164 
00165 #ifdef LOAD_ACTUAL_MUTEX_CODE
00166 #define MUTEX_DESTROY(x) sema_destroy(x)
00167 #define MUTEX_INIT(x)    (sema_init(x, 1, USYNC_PROCESS, NULL) != 0)
00168 #define MUTEX_SET(x)     (sema_wait(x) == 0)
00169 #define MUTEX_UNSET(x)   sema_post(x)
00170 #endif
00171 #endif
00172 
00173 /*********************************************************************
00174  * SGI C library functions.
00175  *********************************************************************/
00176 #ifdef HAVE_MUTEX_SGI_INIT_LOCK
00177 #include <abi_mutex.h>
00178 typedef abilock_t tsl_t;
00179 
00180 #ifdef LOAD_ACTUAL_MUTEX_CODE
00181 #define MUTEX_INIT(x)   (init_lock(x) != 0)
00182 #define MUTEX_SET(x)    (!acquire_lock(x))
00183 #define MUTEX_UNSET(x)  release_lock(x)
00184 #endif
00185 #endif
00186 
00187 /*********************************************************************
00188  * Solaris C library functions.
00189  *
00190  * !!!
00191  * These are undocumented functions, but they're the only ones that work
00192  * correctly as far as we know.
00193  *********************************************************************/
00194 #ifdef HAVE_MUTEX_SOLARIS_LOCK_TRY
00195 #include <sys/machlock.h>
00196 typedef lock_t tsl_t;
00197 
00198 #ifdef LOAD_ACTUAL_MUTEX_CODE
00199 #define MUTEX_INIT(x)   0
00200 #define MUTEX_SET(x)    _lock_try(x)
00201 #define MUTEX_UNSET(x)  _lock_clear(x)
00202 #endif
00203 #endif
00204 
00205 /*********************************************************************
00206  * VMS.
00207  *********************************************************************/
00208 #ifdef HAVE_MUTEX_VMS
00209 #include <sys/mman.h>;
00210 #include <builtins.h>
00211 typedef volatile unsigned char tsl_t;
00212 
00213 #ifdef LOAD_ACTUAL_MUTEX_CODE
00214 #ifdef __ALPHA
00215 #define MUTEX_SET(tsl)          (!__TESTBITSSI(tsl, 0))
00216 #else /* __VAX */
00217 #define MUTEX_SET(tsl)          (!(int)_BBSSI(0, tsl))
00218 #endif
00219 #define MUTEX_UNSET(tsl)        (*(tsl) = 0)
00220 #define MUTEX_INIT(tsl)         MUTEX_UNSET(tsl)
00221 #endif
00222 #endif
00223 
00224 /*********************************************************************
00225  * VxWorks
00226  * Use basic binary semaphores in VxWorks, as we currently do not need
00227  * any special features.  We do need the ability to single-thread the
00228  * entire system, however, because VxWorks doesn't support the open(2)
00229  * flag O_EXCL, the mechanism we normally use to single thread access
00230  * when we're first looking for a DB environment.
00231  *********************************************************************/
00232 #ifdef HAVE_MUTEX_VXWORKS
00233 #include "taskLib.h"
00234 typedef SEM_ID tsl_t;
00235 
00236 #ifdef LOAD_ACTUAL_MUTEX_CODE
00237 #define MUTEX_SET(tsl)          (semTake((*tsl), WAIT_FOREVER) == OK)
00238 #define MUTEX_UNSET(tsl)        (semGive((*tsl)))
00239 #define MUTEX_INIT(tsl)                                                 \
00240         ((*(tsl) = semBCreate(SEM_Q_FIFO, SEM_FULL)) == NULL)
00241 #define MUTEX_DESTROY(tsl)      semDelete(*tsl)
00242 #endif
00243 #endif
00244 
00245 /*********************************************************************
00246  * Win16
00247  *
00248  * Win16 spinlocks are simple because we cannot possibly be preempted.
00249  *
00250  * !!!
00251  * We should simplify this by always returning a no-need-to-lock lock
00252  * when we initialize the mutex.
00253  *********************************************************************/
00254 #ifdef HAVE_MUTEX_WIN16
00255 typedef unsigned int tsl_t;
00256 
00257 #ifdef LOAD_ACTUAL_MUTEX_CODE
00258 #define MUTEX_INIT(x)           0
00259 #define MUTEX_SET(tsl)          (*(tsl) = 1)
00260 #define MUTEX_UNSET(tsl)        (*(tsl) = 0)
00261 #endif
00262 #endif
00263 
00264 /*********************************************************************
00265  * Win32
00266  *********************************************************************/
00267 #if defined(HAVE_MUTEX_WIN32) || defined(HAVE_MUTEX_WIN32_GCC)
00268 #define MUTEX_FIELDS                                                    \
00269         LONG volatile tas;                                              \
00270         LONG nwaiters;                                                  \
00271         u_int32_t id;   /* ID used for creating events */               \
00272 
00273 #if defined(LOAD_ACTUAL_MUTEX_CODE)
00274 #define MUTEX_SET(tsl)          (!InterlockedExchange((PLONG)tsl, 1))
00275 #define MUTEX_UNSET(tsl)        InterlockedExchange((PLONG)tsl, 0)
00276 #define MUTEX_INIT(tsl)         MUTEX_UNSET(tsl)
00277 
00278 /*
00279  * From Intel's performance tuning documentation (and see SR #6975):
00280  * ftp://download.intel.com/design/perftool/cbts/appnotes/sse2/w_spinlock.pdf
00281  *
00282  * "For this reason, it is highly recommended that you insert the PAUSE
00283  * instruction into all spin-wait code immediately. Using the PAUSE
00284  * instruction does not affect the correctness of programs on existing
00285  * platforms, and it improves performance on Pentium 4 processor platforms."
00286  */
00287 #ifdef HAVE_MUTEX_WIN32
00288 #ifndef _WIN64
00289 #define MUTEX_PAUSE             {__asm{_emit 0xf3}; __asm{_emit 0x90}}
00290 #endif
00291 #endif
00292 #ifdef HAVE_MUTEX_WIN32_GCC
00293 #define MUTEX_PAUSE             asm volatile ("rep; nop" : : );
00294 #endif
00295 #endif
00296 #endif
00297 
00298 /*********************************************************************
00299  * 68K/gcc assembly.
00300  *********************************************************************/
00301 #ifdef HAVE_MUTEX_68K_GCC_ASSEMBLY
00302 typedef unsigned char tsl_t;
00303 
00304 #ifdef LOAD_ACTUAL_MUTEX_CODE
00305 /* gcc/68K: 0 is clear, 1 is set. */
00306 #define MUTEX_SET(tsl) ({                                               \
00307         register tsl_t *__l = (tsl);                                    \
00308         int __r;                                                        \
00309             asm volatile("tas  %1; \n                                   \
00310                           seq  %0"                                      \
00311                 : "=dm" (__r), "=m" (*__l)                              \
00312                 : "1" (*__l)                                            \
00313                 );                                                      \
00314         __r & 1;                                                        \
00315 })
00316 
00317 #define MUTEX_UNSET(tsl)        (*(tsl) = 0)
00318 #define MUTEX_INIT(tsl)         MUTEX_UNSET(tsl)
00319 #endif
00320 #endif
00321 
00322 /*********************************************************************
00323  * ALPHA/gcc assembly.
00324  *********************************************************************/
00325 #ifdef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY
00326 typedef u_int32_t tsl_t;
00327 
00328 #define MUTEX_ALIGN     4
00329 
00330 #ifdef LOAD_ACTUAL_MUTEX_CODE
00331 /*
00332  * For gcc/alpha.  Should return 0 if could not acquire the lock, 1 if
00333  * lock was acquired properly.
00334  */
00335 static inline int
00336 MUTEX_SET(tsl_t *tsl) {
00337         register tsl_t *__l = tsl;
00338         register tsl_t __r;
00339         asm volatile(
00340                 "1:     ldl_l   %0,%2\n"
00341                 "       blbs    %0,2f\n"
00342                 "       or      $31,1,%0\n"
00343                 "       stl_c   %0,%1\n"
00344                 "       beq     %0,3f\n"
00345                 "       mb\n"
00346                 "       br      3f\n"
00347                 "2:     xor     %0,%0\n"
00348                 "3:"
00349                 : "=&r"(__r), "=m"(*__l) : "1"(*__l) : "memory");
00350         return __r;
00351 }
00352 
00353 /*
00354  * Unset mutex. Judging by Alpha Architecture Handbook, the mb instruction
00355  * might be necessary before unlocking
00356  */
00357 static inline int
00358 MUTEX_UNSET(tsl_t *tsl) {
00359         asm volatile("  mb\n");
00360         return *tsl = 0;
00361 }
00362 
00363 #define MUTEX_INIT(tsl)         MUTEX_UNSET(tsl)
00364 #endif
00365 #endif
00366 
00367 /*********************************************************************
00368  * Tru64/cc assembly.
00369  *********************************************************************/
00370 #ifdef HAVE_MUTEX_TRU64_CC_ASSEMBLY
00371 typedef volatile u_int32_t tsl_t;
00372 
00373 #define MUTEX_ALIGN     4
00374 
00375 #ifdef LOAD_ACTUAL_MUTEX_CODE
00376 #include <alpha/builtins.h>
00377 #define MUTEX_SET(tsl)          (__LOCK_LONG_RETRY((tsl), 1) != 0)
00378 #define MUTEX_UNSET(tsl)        (__UNLOCK_LONG(tsl))
00379 
00380 #define MUTEX_INIT(tsl)         (MUTEX_UNSET(tsl), 0)
00381 #endif
00382 #endif
00383 
00384 /*********************************************************************
00385  * ARM/gcc assembly.
00386  *********************************************************************/
00387 #ifdef HAVE_MUTEX_ARM_GCC_ASSEMBLY
00388 typedef unsigned char tsl_t;
00389 
00390 #ifdef LOAD_ACTUAL_MUTEX_CODE
00391 /* gcc/arm: 0 is clear, 1 is set. */
00392 #define MUTEX_SET(tsl) ({                                               \
00393         int __r;                                                        \
00394         asm volatile(                                                   \
00395                 "swpb   %0, %1, [%2]\n\t"                               \
00396                 "eor    %0, %0, #1\n\t"                                 \
00397             : "=&r" (__r)                                               \
00398             : "r" (1), "r" (tsl)                                        \
00399             );                                                          \
00400         __r & 1;                                                        \
00401 })
00402 
00403 #define MUTEX_UNSET(tsl)        (*(volatile tsl_t *)(tsl) = 0)
00404 #define MUTEX_INIT(tsl)         MUTEX_UNSET(tsl)
00405 #endif
00406 #endif
00407 
00408 /*********************************************************************
00409  * HPPA/gcc assembly.
00410  *********************************************************************/
00411 #ifdef HAVE_MUTEX_HPPA_GCC_ASSEMBLY
00412 typedef u_int32_t tsl_t;
00413 
00414 #define MUTEX_ALIGN     16
00415 
00416 #ifdef LOAD_ACTUAL_MUTEX_CODE
00417 /*
00418  * The PA-RISC has a "load and clear" instead of a "test and set" instruction.
00419  * The 32-bit word used by that instruction must be 16-byte aligned.  We could
00420  * use the "aligned" attribute in GCC but that doesn't work for stack variables.
00421  */
00422 #define MUTEX_SET(tsl) ({                                               \
00423         register tsl_t *__l = (tsl);                                    \
00424         int __r;                                                        \
00425         asm volatile("ldcws 0(%1),%0" : "=r" (__r) : "r" (__l));        \
00426         __r & 1;                                                        \
00427 })
00428 
00429 #define MUTEX_UNSET(tsl)        (*(volatile tsl_t *)(tsl) = -1)
00430 #define MUTEX_INIT(tsl)         (MUTEX_UNSET(tsl), 0)
00431 #endif
00432 #endif
00433 
00434 /*********************************************************************
00435  * IA64/gcc assembly.
00436  *********************************************************************/
00437 #ifdef HAVE_MUTEX_IA64_GCC_ASSEMBLY
00438 typedef volatile unsigned char tsl_t;
00439 
00440 #ifdef LOAD_ACTUAL_MUTEX_CODE
00441 /* gcc/ia64: 0 is clear, 1 is set. */
00442 #define MUTEX_SET(tsl) ({                                               \
00443         register tsl_t *__l = (tsl);                                    \
00444         long __r;                                                       \
00445         asm volatile("xchg1 %0=%1,%2" :                                 \
00446                      "=r"(__r), "+m"(*__l) : "r"(1));                   \
00447         __r ^ 1;                                                        \
00448 })
00449 
00450 /*
00451  * Store through a "volatile" pointer so we get a store with "release"
00452  * semantics.
00453  */
00454 #define MUTEX_UNSET(tsl)        (*(tsl_t *)(tsl) = 0)
00455 #define MUTEX_INIT(tsl)         MUTEX_UNSET(tsl)
00456 #endif
00457 #endif
00458 
00459 /*********************************************************************
00460  * PowerPC/gcc assembly.
00461  *********************************************************************/
00462 #if defined(HAVE_MUTEX_PPC_GCC_ASSEMBLY)
00463 typedef u_int32_t tsl_t;
00464 
00465 #ifdef LOAD_ACTUAL_MUTEX_CODE
00466 /*
00467  * The PowerPC does a sort of pseudo-atomic locking.  You set up a
00468  * 'reservation' on a chunk of memory containing a mutex by loading the
00469  * mutex value with LWARX.  If the mutex has an 'unlocked' (arbitrary)
00470  * value, you then try storing into it with STWCX.  If no other process or
00471  * thread broke your 'reservation' by modifying the memory containing the
00472  * mutex, then the STCWX succeeds; otherwise it fails and you try to get
00473  * a reservation again.
00474  *
00475  * While mutexes are explicitly 4 bytes, a 'reservation' applies to an
00476  * entire cache line, normally 32 bytes, aligned naturally.  If the mutex
00477  * lives near data that gets changed a lot, there's a chance that you'll
00478  * see more broken reservations than you might otherwise.  The only
00479  * situation in which this might be a problem is if one processor is
00480  * beating on a variable in the same cache block as the mutex while another
00481  * processor tries to acquire the mutex.  That's bad news regardless
00482  * because of the way it bashes caches, but if you can't guarantee that a
00483  * mutex will reside in a relatively quiescent cache line, you might
00484  * consider padding the mutex to force it to live in a cache line by
00485  * itself.  No, you aren't guaranteed that cache lines are 32 bytes.  Some
00486  * embedded processors use 16-byte cache lines, while some 64-bit
00487  * processors use 128-bit cache lines.  But assuming a 32-byte cache line
00488  * won't get you into trouble for now.
00489  *
00490  * If mutex locking is a bottleneck, then you can speed it up by adding a
00491  * regular LWZ load before the LWARX load, so that you can test for the
00492  * common case of a locked mutex without wasting cycles making a reservation.
00493  *
00494  * gcc/ppc: 0 is clear, 1 is set.
00495  */
00496 static inline int
00497 MUTEX_SET(int *tsl)  {
00498         int __r;
00499         asm volatile (
00500 "0:                             \n\t"
00501 "       lwarx   %0,0,%1         \n\t"
00502 "       cmpwi   %0,0            \n\t"
00503 "       bne-    1f              \n\t"
00504 "       stwcx.  %1,0,%1         \n\t"
00505 "       isync                   \n\t"
00506 "       beq+    2f              \n\t"
00507 "       b       0b              \n\t"
00508 "1:                             \n\t"
00509 "       li      %1,0            \n\t"
00510 "2:                             \n\t"
00511          : "=&r" (__r), "+r" (tsl)
00512          :
00513          : "cr0", "memory");
00514          return (int)tsl;
00515 }
00516 
00517 static inline int
00518 MUTEX_UNSET(tsl_t *tsl) {
00519          asm volatile("sync" : : : "memory");
00520          return *tsl = 0;
00521 }
00522 #define MUTEX_INIT(tsl)         MUTEX_UNSET(tsl)
00523 #endif
00524 #endif
00525 
00526 /*********************************************************************
00527  * OS/390 C
00528  *********************************************************************/
00529 #ifdef HAVE_MUTEX_S390_CC_ASSEMBLY
00530 typedef int tsl_t;
00531 
00532 #ifdef LOAD_ACTUAL_MUTEX_CODE
00533 /*
00534  * cs() is declared in <stdlib.h> but is built in to the compiler.
00535  * Must use LANGLVL(EXTENDED) to get its declaration.
00536  */
00537 #define MUTEX_SET(tsl)          (!cs(&zero, (tsl), 1))
00538 #define MUTEX_UNSET(tsl)        (*(tsl) = 0)
00539 #define MUTEX_INIT(tsl)         MUTEX_UNSET(tsl)
00540 #endif
00541 #endif
00542 
00543 /*********************************************************************
00544  * S/390 32-bit assembly.
00545  *********************************************************************/
00546 #ifdef HAVE_MUTEX_S390_GCC_ASSEMBLY
00547 typedef int tsl_t;
00548 
00549 #ifdef LOAD_ACTUAL_MUTEX_CODE
00550 /* gcc/S390: 0 is clear, 1 is set. */
00551 static inline int
00552 MUTEX_SET(tsl_t *tsl) {                                                 \
00553         register tsl_t *__l = (tsl);                                    \
00554         int __r;                                                        \
00555   asm volatile(                                                         \
00556        "    la    1,%1\n"                                               \
00557        "    lhi   0,1\n"                                                \
00558        "    l     %0,%1\n"                                              \
00559        "0:  cs    %0,0,0(1)\n"                                          \
00560        "    jl    0b"                                                   \
00561        : "=&d" (__r), "+m" (*__l)                                       \
00562        : : "0", "1", "cc");                                             \
00563         return !__r;                                                    \
00564 }
00565 
00566 #define MUTEX_UNSET(tsl)        (*(tsl) = 0)
00567 #define MUTEX_INIT(tsl)         MUTEX_UNSET(tsl)
00568 #endif
00569 #endif
00570 
00571 /*********************************************************************
00572  * SCO/cc assembly.
00573  *********************************************************************/
00574 #ifdef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY
00575 typedef unsigned char tsl_t;
00576 
00577 #ifdef LOAD_ACTUAL_MUTEX_CODE
00578 /*
00579  * UnixWare has threads in libthread, but OpenServer doesn't (yet).
00580  *
00581  * cc/x86: 0 is clear, 1 is set.
00582  */
00583 #if defined(__USLC__)
00584 asm int
00585 _tsl_set(void *tsl)
00586 {
00587 %mem tsl
00588         movl    tsl, %ecx
00589         movl    $1, %eax
00590         lock
00591         xchgb   (%ecx),%al
00592         xorl    $1,%eax
00593 }
00594 #endif
00595 
00596 #define MUTEX_SET(tsl)          _tsl_set(tsl)
00597 #define MUTEX_UNSET(tsl)        (*(tsl) = 0)
00598 #define MUTEX_INIT(tsl)         MUTEX_UNSET(tsl)
00599 #endif
00600 #endif
00601 
00602 /*********************************************************************
00603  * Sparc/gcc assembly.
00604  *********************************************************************/
00605 #ifdef HAVE_MUTEX_SPARC_GCC_ASSEMBLY
00606 typedef unsigned char tsl_t;
00607 
00608 #ifdef LOAD_ACTUAL_MUTEX_CODE
00609 /*
00610  *
00611  * The ldstub instruction takes the location specified by its first argument
00612  * (a register containing a memory address) and loads its contents into its
00613  * second argument (a register) and atomically sets the contents the location
00614  * specified by its first argument to a byte of 1s.  (The value in the second
00615  * argument is never read, but only overwritten.)
00616  *
00617  * The stbar is needed for v8, and is implemented as membar #sync on v9,
00618  * so is functional there as well.  For v7, stbar may generate an illegal
00619  * instruction and we have no way to tell what we're running on.  Some
00620  * operating systems notice and skip this instruction in the fault handler.
00621  *
00622  * gcc/sparc: 0 is clear, 1 is set.
00623  */
00624 #define MUTEX_SET(tsl) ({                                               \
00625         register tsl_t *__l = (tsl);                                    \
00626         register tsl_t __r;                                             \
00627         __asm__ volatile                                                \
00628             ("ldstub [%1],%0; stbar"                                    \
00629             : "=r"( __r) : "r" (__l));                                  \
00630         !__r;                                                           \
00631 })
00632 
00633 #define MUTEX_UNSET(tsl)        (*(tsl) = 0)
00634 #define MUTEX_INIT(tsl)         MUTEX_UNSET(tsl)
00635 #endif
00636 #endif
00637 
00638 /*********************************************************************
00639  * UTS/cc assembly.
00640  *********************************************************************/
00641 #ifdef HAVE_MUTEX_UTS_CC_ASSEMBLY
00642 typedef int tsl_t;
00643 
00644 #ifdef LOAD_ACTUAL_MUTEX_CODE
00645 #define MUTEX_INIT(x)   0
00646 #define MUTEX_SET(x)    (!uts_lock(x, 1))
00647 #define MUTEX_UNSET(x)  (*(x) = 0)
00648 #endif
00649 #endif
00650 
00651 /*********************************************************************
00652  * MIPS/gcc assembly.
00653  *********************************************************************/
00654 #ifdef HAVE_MUTEX_MIPS_GCC_ASSEMBLY
00655 typedef u_int32_t tsl_t;
00656 
00657 #define MUTEX_ALIGN     4
00658 
00659 #ifdef LOAD_ACTUAL_MUTEX_CODE
00660 /*
00661  * For gcc/MIPS.  Should return 0 if could not acquire the lock, 1 if
00662  * lock was acquired properly.
00663  */
00664 static inline int
00665 MUTEX_SET(tsl_t *tsl) {
00666        register tsl_t *__l = tsl;
00667        register tsl_t __r;
00668        __asm__ __volatile__(
00669                "       .set push           \n"
00670                "       .set mips2          \n"
00671                "       .set noreorder      \n"
00672                "       .set nomacro        \n"
00673                "1:     ll      %0,%1       \n"
00674                "       bne     %0,$0,1f    \n"
00675                "       xori    %0,%0,1     \n"
00676                "       sc      %0,%1       \n"
00677                "       beql    %0,$0,1b    \n"
00678                "       xori    %0,1        \n"
00679                "1:     .set pop              "
00680                : "=&r" (__r), "+R" (*__l));
00681        return __r;
00682 }
00683 
00684 #define MUTEX_UNSET(tsl)        (*(volatile tsl_t *)(tsl) = 0)
00685 #define MUTEX_INIT(tsl)         MUTEX_UNSET(tsl)
00686 #endif
00687 #endif
00688 
00689 /*********************************************************************
00690  * x86/gcc assembly.
00691  *********************************************************************/
00692 #ifdef HAVE_MUTEX_X86_GCC_ASSEMBLY
00693 typedef unsigned char tsl_t;
00694 
00695 #ifdef LOAD_ACTUAL_MUTEX_CODE
00696 /* gcc/x86: 0 is clear, 1 is set. */
00697 #define MUTEX_SET(tsl) ({                                               \
00698         register tsl_t *__l = (tsl);                                    \
00699         int __r;                                                        \
00700         asm volatile("movl $1,%%eax\n"                                  \
00701                      "lock\n"                                           \
00702                      "xchgb %1,%%al\n"                                  \
00703                      "xorl $1,%%eax"                                    \
00704             : "=&a" (__r), "=m" (*__l)                                  \
00705             : "m1" (*__l)                                               \
00706             );                                                          \
00707         __r & 1;                                                        \
00708 })
00709 
00710 #define MUTEX_UNSET(tsl)        (*(volatile tsl_t *)(tsl) = 0)
00711 #define MUTEX_INIT(tsl)         MUTEX_UNSET(tsl)
00712 
00713 /*
00714  * From Intel's performance tuning documentation (and see SR #6975):
00715  * ftp://download.intel.com/design/perftool/cbts/appnotes/sse2/w_spinlock.pdf
00716  *
00717  * "For this reason, it is highly recommended that you insert the PAUSE
00718  * instruction into all spin-wait code immediately. Using the PAUSE
00719  * instruction does not affect the correctness of programs on existing
00720  * platforms, and it improves performance on Pentium 4 processor platforms."
00721  */
00722 #define MUTEX_PAUSE             asm volatile ("rep; nop" : : );
00723 #endif
00724 #endif
00725 
00726 /*********************************************************************
00727  * x86_64/gcc assembly.
00728  *********************************************************************/
00729 #ifdef HAVE_MUTEX_X86_64_GCC_ASSEMBLY
00730 typedef unsigned char tsl_t;
00731 
00732 #ifdef LOAD_ACTUAL_MUTEX_CODE
00733 /* gcc/x86_64: 0 is clear, 1 is set. */
00734 #define  MUTEX_SET(tsl) ({                                              \
00735         register tsl_t *__l = (tsl);                                    \
00736         int __r;                                                        \
00737         asm volatile("mov $1,%%rax\n"                                   \
00738                      "lock\n"                                           \
00739                      "xchgb %1,%%al\n"                                  \
00740                      "xor $1,%%rax"                                     \
00741                 : "=&a" (__r), "=m" (*__l)                              \
00742                 : "1m" (*__l)                                           \
00743                 );                                                      \
00744         __r & 1;                                                        \
00745 })
00746 
00747 #define MUTEX_UNSET(tsl)        (*(tsl) = 0)
00748 #define MUTEX_INIT(tsl)         MUTEX_UNSET(tsl)
00749 #endif
00750 #endif
00751 
00752 /*
00753  * Mutex alignment defaults to sizeof(unsigned int).
00754  *
00755  * !!!
00756  * Various systems require different alignments for mutexes (the worst we've
00757  * seen so far is 16-bytes on some HP architectures).  Malloc(3) is assumed
00758  * to return reasonable alignment, all other mutex users must ensure proper
00759  * alignment locally.
00760  */
00761 #ifndef MUTEX_ALIGN
00762 #define MUTEX_ALIGN     sizeof(unsigned int)
00763 #endif
00764 
00765 /*
00766  * Mutex destruction defaults to a no-op.
00767  */
00768 #ifndef MUTEX_DESTROY
00769 #define MUTEX_DESTROY(x)
00770 #endif
00771 
00772 /*
00773  * DB_MUTEXMGR --
00774  *      The mutex manager encapsulates the mutex system.
00775  */
00776 typedef struct __db_mutexmgr {
00777         /* These fields are never updated after creation, so not protected. */
00778         DB_ENV  *dbenv;                 /* Environment */
00779         REGINFO  reginfo;               /* Region information */
00780 
00781         void    *mutex_array;           /* Base of the mutex array */
00782 } DB_MUTEXMGR;
00783 
00784 /* Macros to lock/unlock the mutex region as a whole. */
00785 #define MUTEX_SYSTEM_LOCK(dbenv)                                        \
00786         MUTEX_LOCK(dbenv, ((DB_MUTEXREGION *)((DB_MUTEXMGR *)           \
00787             (dbenv)->mutex_handle)->reginfo.primary)->mtx_region)
00788 #define MUTEX_SYSTEM_UNLOCK(dbenv)                                      \
00789         MUTEX_UNLOCK(dbenv, ((DB_MUTEXREGION *)((DB_MUTEXMGR *)         \
00790             (dbenv)->mutex_handle)->reginfo.primary)->mtx_region)
00791 
00792 /*
00793  * DB_MUTEXREGION --
00794  *      The primary mutex data structure in the shared memory region.
00795  */
00796 typedef struct __db_mutexregion {
00797         /* These fields are initialized at create time and never modified. */
00798         roff_t          mutex_offset;   /* Offset of mutex array */
00799         size_t          mutex_size;     /* Size of the aligned mutex */
00800         roff_t          thread_off;     /* Offset of the thread area. */
00801 
00802         db_mutex_t      mtx_region;     /* Region mutex. */
00803 
00804         /* Protected using the region mutex. */
00805         u_int32_t       mutex_next;     /* Next free mutex */
00806 
00807         DB_MUTEX_STAT   stat;           /* Mutex statistics */
00808 } DB_MUTEXREGION;
00809 
00810 typedef struct __mutex_t {              /* Mutex. */
00811 #ifdef MUTEX_FIELDS
00812         MUTEX_FIELDS
00813 #endif
00814 #if !defined(MUTEX_FIELDS) && !defined(HAVE_MUTEX_FCNTL)
00815         tsl_t           tas;            /* Test and set. */
00816 #endif
00817         pid_t           pid;            /* Process owning mutex */
00818         db_threadid_t   tid;            /* Thread owning mutex */
00819 
00820         u_int32_t mutex_next_link;      /* Linked list of free mutexes. */
00821 
00822 #ifdef HAVE_STATISTICS
00823         int       alloc_id;             /* Allocation ID. */
00824 
00825         u_int32_t mutex_set_wait;       /* Granted after wait. */
00826         u_int32_t mutex_set_nowait;     /* Granted without waiting. */
00827 #endif
00828 
00829         /*
00830          * A subset of the flag arguments for __mutex_alloc().
00831          *
00832          * Flags should be an unsigned integer even if it's not required by
00833          * the possible flags values, getting a single byte on some machines
00834          * is expensive, and the mutex structure is a MP hot spot.
00835          */
00836         u_int32_t flags;                /* MUTEX_XXX */
00837 } DB_MUTEX;
00838 
00839 /* Macro to get a reference to a specific mutex. */
00840 #define MUTEXP_SET(indx)                                                \
00841         (DB_MUTEX *)                                                    \
00842             ((u_int8_t *)mtxmgr->mutex_array + (indx) * mtxregion->mutex_size);
00843 
00844 #endif /* !_DB_MUTEX_INT_H_ */

Generated on Sun Dec 25 12:14:22 2005 for Berkeley DB 4.4.16 by  doxygen 1.4.2