RTBKit  0.9
Open-source framework to create real-time ad bidding systems.
soa/gc/testing/gc_test.cc
00001 /* gc_test.cc
00002    Jeremy Barnes, 23 February 2010
00003    Copyright (c) 2010 Datacratic.  All rights reserved.
00004 
00005    Test of the garbage collector locking.
00006 */
00007 
00008 #define BOOST_TEST_MAIN
00009 #define BOOST_TEST_DYN_LINK
00010 
00011 #include "soa/gc/gc_lock.h"
00012 #include "soa/gc/rcu_lock.h"
00013 #include "jml/utils/string_functions.h"
00014 #include "jml/utils/exc_assert.h"
00015 #include "jml/utils/guard.h"
00016 #include "jml/arch/atomic_ops.h"
00017 #include "jml/arch/thread_specific.h"
00018 #include "jml/arch/rwlock.h"
00019 #include "jml/arch/spinlock.h"
00020 #include "jml/arch/tick_counter.h"
00021 #include <boost/test/unit_test.hpp>
00022 #include <boost/bind.hpp>
00023 #include <iostream>
00024 
00025 #include <boost/thread.hpp>
00026 #include <boost/thread/barrier.hpp>
00027 
00028 
00029 using namespace ML;
00030 using namespace Datacratic;
00031 using namespace std;
00032 
00033 // Defined in gc_lock.cc
00034 namespace Datacratic {
00035 extern int32_t gcLockStartingEpoch;
00036 };
00037 
00038 struct doInit {
00039     doInit()
00040     {
00041         rcu_init();
00042     }
00043 } init;
00044 
00045 #if 1
00046 
00047 BOOST_AUTO_TEST_CASE ( test_gc )
00048 {
00049     GcLock gc;
00050     gc.lockShared();
00051 
00052     BOOST_CHECK(gc.isLockedShared());
00053 
00054     bool deferred = false;
00055 
00056     cerr << endl << "before defer" << endl;
00057     gc.dump();
00058 
00059     gc.defer([&] () { deferred = true; memory_barrier(); });
00060 
00061     cerr << endl << "after defer" << endl;
00062     gc.dump();
00063 
00064     gc.unlockShared();
00065 
00066     cerr << endl << "after unlock shared" << endl;
00067     gc.dump();
00068 
00069     BOOST_CHECK(!gc.isLockedShared());
00070     BOOST_CHECK(deferred);
00071 }
00072 
00073 BOOST_AUTO_TEST_CASE(test_mutual_exclusion)
00074 {
00075     cerr << "testing mutual exclusion" << endl;
00076 
00077     GcLock lock;
00078     volatile bool finished = false;
00079     volatile int numExclusive = 0;
00080     volatile int numShared = 0;
00081     int errors = 0;
00082     int multiShared = 0;
00083     uint64_t sharedIterations = 0;
00084     uint64_t exclusiveIterations = 0;
00085 
00086     auto sharedThread = [&] ()
00087         {
00088             while (!finished) {
00089                 GcLock::SharedGuard guard(lock);
00090                 ML::atomic_inc(numShared);
00091 
00092                 if (numExclusive > 0) {
00093                     cerr << "exclusive and shared" << endl;
00094                     ML::atomic_inc(errors);
00095                 }
00096                 if (numShared > 1) {
00097                     ML::atomic_inc(multiShared);
00098                 }
00099 
00100                 ML::atomic_dec(numShared);
00101                 ML::atomic_inc(sharedIterations);
00102                 ML::memory_barrier();
00103             }
00104         };
00105 
00106     auto exclusiveThread = [&] ()
00107         {
00108             while (!finished) {
00109                 GcLock::ExclusiveGuard guard(lock);
00110                 ML::atomic_inc(numExclusive);
00111 
00112                 if (numExclusive > 1) {
00113                     cerr << "more than one exclusive" << endl;
00114                     ML::atomic_inc(errors);
00115                 }
00116                 if (numShared > 0) {
00117                     cerr << "exclusive and shared" << endl;
00118                     ML::atomic_inc(multiShared);
00119                 }
00120 
00121                 ML::atomic_dec(numExclusive);
00122                 ML::atomic_inc(exclusiveIterations);
00123                 ML::memory_barrier();
00124             }
00125         };
00126 
00127     lock.getEntry();
00128 
00129     int nthreads = 4;
00130 
00131     {
00132         cerr << "single shared" << endl;
00133         sharedIterations = exclusiveIterations = multiShared = finished = 0;
00134         boost::thread_group tg;
00135         tg.create_thread(sharedThread);
00136         sleep(1);
00137         finished = true;
00138         tg.join_all();
00139         BOOST_CHECK_EQUAL(errors, 0);
00140         cerr << "iterations: shared " << sharedIterations
00141              << " exclusive " << exclusiveIterations << endl;
00142         cerr << "multiShared = " << multiShared << endl;
00143     }
00144 
00145     {
00146         cerr << "multi shared" << endl;
00147         sharedIterations = exclusiveIterations = multiShared = finished = 0;
00148         boost::thread_group tg;
00149         for (unsigned i = 0;  i < nthreads;  ++i)
00150             tg.create_thread(sharedThread);
00151         sleep(1);
00152         finished = true;
00153         tg.join_all();
00154         BOOST_CHECK_EQUAL(errors, 0);
00155         if (nthreads > 1)
00156             BOOST_CHECK_GE(multiShared, 0);
00157         cerr << "iterations: shared " << sharedIterations
00158              << " exclusive " << exclusiveIterations << endl;
00159         cerr << "multiShared = " << multiShared << endl;
00160     }
00161 
00162     {
00163         cerr << "single exclusive" << endl;
00164         sharedIterations = exclusiveIterations = multiShared = finished = 0;
00165         boost::thread_group tg;
00166         tg.create_thread(exclusiveThread);
00167         sleep(1);
00168         finished = true;
00169         tg.join_all();
00170         BOOST_CHECK_EQUAL(errors, 0);
00171         cerr << "iterations: shared " << sharedIterations
00172              << " exclusive " << exclusiveIterations << endl;
00173         cerr << "multiShared = " << multiShared << endl;
00174     }
00175 
00176     {
00177         cerr << "multi exclusive" << endl;
00178         sharedIterations = exclusiveIterations = multiShared = finished = 0;
00179         boost::thread_group tg;
00180         for (unsigned i = 0;  i < nthreads;  ++i)
00181             tg.create_thread(exclusiveThread);
00182         sleep(1);
00183         finished = true;
00184         tg.join_all();
00185         BOOST_CHECK_EQUAL(errors, 0);
00186         cerr << "iterations: shared " << sharedIterations
00187              << " exclusive " << exclusiveIterations << endl;
00188         cerr << "multiShared = " << multiShared << endl;
00189     }
00190 
00191     {
00192         cerr << "mixed shared and exclusive" << endl;
00193         sharedIterations = exclusiveIterations = multiShared = finished = 0;
00194         boost::thread_group tg;
00195         for (unsigned i = 0;  i < nthreads;  ++i)
00196             tg.create_thread(sharedThread);
00197         for (unsigned i = 0;  i < nthreads;  ++i)
00198             tg.create_thread(exclusiveThread);
00199         sleep(1);
00200         finished = true;
00201         tg.join_all();
00202         BOOST_CHECK_EQUAL(errors, 0);
00203         if (nthreads > 1)
00204             BOOST_CHECK_GE(multiShared, 0);
00205         cerr << "iterations: shared " << sharedIterations
00206              << " exclusive " << exclusiveIterations << endl;
00207         cerr << "multiShared = " << multiShared << endl;
00208     }
00209 
00210     {
00211         cerr << "overflow" << endl;
00212         gcLockStartingEpoch = 0xFFFFFFF0;
00213         sharedIterations = exclusiveIterations = multiShared = finished = 0;
00214         boost::thread_group tg;
00215         tg.create_thread(sharedThread);
00216         sleep(1);
00217         finished = true;
00218         tg.join_all();
00219         BOOST_CHECK_EQUAL(errors, 0);
00220         cerr << "iterations: shared " << sharedIterations
00221              << " exclusive " << exclusiveIterations << endl;
00222         cerr << "multiShared = " << multiShared << endl;
00223     }
00224 
00225     {
00226         cerr << "INT_MIN to INT_MAX" << endl;
00227         gcLockStartingEpoch = 0x7FFFFFF0;
00228         sharedIterations = exclusiveIterations = multiShared = finished = 0;
00229         boost::thread_group tg;
00230         tg.create_thread(sharedThread);
00231         sleep(1);
00232         finished = true;
00233         tg.join_all();
00234         BOOST_CHECK_EQUAL(errors, 0);
00235         cerr << "iterations: shared " << sharedIterations
00236              << " exclusive " << exclusiveIterations << endl;
00237         cerr << "multiShared = " << multiShared << endl;
00238     }
00239 
00240     {
00241         cerr << "benign overflow" << endl;
00242         gcLockStartingEpoch = 0xBFFFFFF0;
00243         sharedIterations = exclusiveIterations = multiShared = finished = 0;
00244         boost::thread_group tg;
00245         tg.create_thread(sharedThread);
00246         sleep(1);
00247         finished = true;
00248         tg.join_all();
00249         BOOST_CHECK_EQUAL(errors, 0);
00250         cerr << "iterations: shared " << sharedIterations
00251              << " exclusive " << exclusiveIterations << endl;
00252         cerr << "multiShared = " << multiShared << endl;
00253     }
00254 
00255 }
00256 
00257 #endif
00258 
00259 #define USE_MALLOC 1
00260 
00261 template<typename T>
00262 struct Allocator {
00263     Allocator(int nblocks, T def = T())
00264         : def(def)
00265     {
00266         init(nblocks);
00267         highestAlloc = nallocs = ndeallocs = 0;
00268     }
00269 
00270     ~Allocator()
00271     {
00272 #if ( ! USE_MALLOC )
00273         delete[] blocks;
00274         delete[] free;
00275 #endif
00276     }
00277 
00278     T def;
00279     T * blocks;
00280     int * free;
00281     int nfree;
00282     int highestAlloc;
00283     int nallocs;
00284     int ndeallocs;
00285     ML::Spinlock lock;
00286 
00287     void init(int nblocks)
00288     {
00289 #if ( ! USE_MALLOC )
00290         blocks = new T[nblocks];
00291         free = new int[nblocks];
00292 
00293         std::fill(blocks, blocks + nblocks, def);
00294 
00295         nfree = 0;
00296         for (int i = nblocks - 1;  i >= 0;  --i)
00297             free[nfree++] = i;
00298 #endif
00299     }
00300 
00301     T * alloc()
00302     {
00303 #if USE_MALLOC
00304         ML::atomic_inc(nallocs);
00305         ML::atomic_max(highestAlloc, nallocs - ndeallocs);
00306         return new T(def);
00307 #else
00308         boost::lock_guard<ML::Spinlock> guard(lock);
00309         if (nfree == 0)
00310             throw ML::Exception("none free");
00311         int i = free[nfree - 1];
00312         highestAlloc = std::max(highestAlloc, i);
00313         T * result = blocks + i;
00314         --nfree;
00315         ++nallocs;
00316         return result;
00317 #endif
00318     }
00319 
00320     void dealloc(T * value)
00321     {
00322         if (!value) return;
00323         *value = def;
00324 #if USE_MALLOC
00325         delete value;
00326         ML::atomic_inc(ndeallocs);
00327         return;
00328 #else
00329         boost::lock_guard<ML::Spinlock> guard(lock);
00330         int i = value - blocks;
00331         free[nfree++] = i;
00332         ++ndeallocs;
00333 #endif
00334     }
00335 
00336     static void doDealloc(void * thisptr, void * blockPtr_, void * blockVar_)
00337     {
00338         int * & blockVar = *reinterpret_cast<int **>(blockVar_);
00339         int * blockPtr = reinterpret_cast<int *>(blockPtr_);
00340         ExcAssertNotEqual(blockVar, blockPtr);
00341         //blockVar = 0;
00342         //ML::memory_barrier();
00343         //cerr << "blockPtr = " << blockPtr << endl;
00344         //int * blockPtr = reinterpret_cast<int *>(block);
00345         reinterpret_cast<Allocator *>(thisptr)->dealloc(blockPtr);
00346     }
00347 
00348     static void doDeallocAll(void * thisptr, void * blocksPtr_, void * numBlocks_)
00349     {
00350         size_t numBlocks = reinterpret_cast<size_t>(numBlocks_);
00351         int ** blocksPtr = reinterpret_cast<int **>(blocksPtr_);
00352         Allocator * alloc = reinterpret_cast<Allocator *>(thisptr);
00353 
00354         for (unsigned i = 0;  i != numBlocks;  ++i) {
00355             if (blocksPtr[i])
00356                 alloc->dealloc(blocksPtr[i]);
00357         }
00358 
00359         delete[] blocksPtr;
00360     }
00361 };
00362 
00363 template<typename Lock>
00364 struct TestBase {
00365     TestBase(int nthreads, int nblocks, int nSpinThreads = 0)
00366         : finished(false),
00367           nthreads(nthreads),
00368           nblocks(nblocks),
00369           nSpinThreads(nSpinThreads),
00370           allocator(1024 * 1024, -1),
00371           nerrors(0),
00372           allBlocks(nthreads)
00373     {
00374         for (unsigned i = 0;  i < nthreads;  ++i) {
00375             allBlocks[i] = new int *[nblocks];
00376             std::fill(allBlocks[i], allBlocks[i] + nblocks, (int *)0);
00377         }
00378     }
00379 
00380     ~TestBase()
00381     {
00382         for (unsigned i = 0;  i < nthreads;  ++i)
00383             delete[] allBlocks[i];
00384     }
00385 
00386     volatile bool finished;
00387     int nthreads;
00388     int nblocks;
00389     int nSpinThreads;
00390     Allocator<int> allocator;
00391     Lock gc;
00392     uint64_t nerrors;
00393 
00394     /* All of the blocks are published here.  Any pointer which is read from
00395        here by another thread should always refer to exactly the same
00396        value.
00397     */
00398     vector<int **> allBlocks;
00399 
00400     void checkVisible(int threadNum, unsigned long long start)
00401     {
00402         // We're reading from someone else's pointers, so we need to lock here
00403         gc.enterCS();
00404         //gc.lockShared();
00405 
00406         for (unsigned i = 0;  i < nthreads;  ++i) {
00407             for (unsigned j = 0;  j < nblocks;  ++j) {
00408                 //int * val = allBlocks[i][j];
00409                 int * val = (int *)(rcu_dereference_sym(allBlocks[i][j]));
00410                 if (val) {
00411                     int atVal = *val;
00412                     if (atVal != i) {
00413                         cerr << ML::format("%.6f thread %d: invalid value read "
00414                                 "from thread %d block %d: %d\n",
00415                                 (ticks() - start) / ticks_per_second, threadNum,
00416                                 i, j, atVal);
00417                         ML::atomic_inc(nerrors);
00418                         //abort();
00419                     }
00420                 }
00421             }
00422         }
00423 
00424         gc.exitCS();
00425         //gc.unlockShared();
00426     }
00427 
00428     void doReadThread(int threadNum)
00429     {
00430         gc.getEntry();
00431         unsigned long long start = ticks();
00432         while (!finished) {
00433             checkVisible(threadNum, start);
00434         }
00435     }
00436 
00437     void doSpinThread()
00438     {
00439         while (!finished) {
00440         }
00441     }
00442 
00443     void allocThreadDefer(int threadNum)
00444     {
00445         gc.getEntry();
00446         try {
00447             uint64_t nErrors = 0;
00448 
00449             int ** blocks = allBlocks[threadNum];
00450 
00451             while (!finished) {
00452 
00453                 int ** oldBlocks = new int * [nblocks];
00454 
00455                 //gc.enterCS();
00456 
00457                 for (unsigned i = 0;  i < nblocks;  ++i) {
00458                     int * block = allocator.alloc();
00459                     if (*block != -1) {
00460                         cerr << "old block was allocated" << endl;
00461                         ++nErrors;
00462                     }
00463                     *block = threadNum;
00464                     ML::memory_barrier();
00465                     //rcu_set_pointer_sym((void **)&blocks[i], block);
00466                     int * oldBlock = blocks[i];
00467                     blocks[i] = block;
00468                     ML::memory_barrier();
00469                     oldBlocks[i] = oldBlock;
00470                 }
00471 
00472                 gc.defer(Allocator<int>::doDeallocAll, &allocator, oldBlocks,
00473                          (void *)(size_t)nblocks);
00474 
00475                 //gc.exitCS();
00476             }
00477 
00478 
00479             int * oldBlocks[nblocks];
00480 
00481             for (unsigned i = 0;  i < nblocks;  ++i) {
00482                 oldBlocks[i] = blocks[i];
00483                 blocks[i] = 0;
00484             }
00485 
00486             gc.visibleBarrier();
00487 
00488             //cerr << "at end" << endl;
00489 
00490             for (unsigned i = 0;  i < nblocks;  ++i)
00491                 allocator.dealloc(oldBlocks[i]);
00492 
00493             //cerr << "nErrors = " << nErrors << endl;
00494         } catch (...) {
00495             static ML::Spinlock lock;
00496             lock.acquire();
00497             //cerr << "threadnum " << threadNum << " inEpoch "
00498             //     << gc.getEntry().inEpoch << endl;
00499             gc.dump();
00500             abort();
00501         }
00502     }
00503 
00504     void allocThreadSync(int threadNum)
00505     {
00506         gc.getEntry();
00507         try {
00508             uint64_t nErrors = 0;
00509 
00510             int ** blocks = allBlocks[threadNum];
00511             int * oldBlocks[nblocks];
00512 
00513             while (!finished) {
00514 
00515                 for (unsigned i = 0;  i < nblocks;  ++i) {
00516                     int * block = allocator.alloc();
00517                     if (*block != -1) {
00518                         cerr << "old block was allocated" << endl;
00519                         ++nErrors;
00520                     }
00521                     *block = threadNum;
00522                     int * oldBlock = blocks[i];
00523                     blocks[i] = block;
00524                     oldBlocks[i] = oldBlock;
00525                 }
00526 
00527                 ML::memory_barrier();
00528                 gc.visibleBarrier();
00529 
00530                 for (unsigned i = 0;  i < nblocks;  ++i)
00531                     if (oldBlocks[i]) *oldBlocks[i] = 1234;
00532 
00533                 for (unsigned i = 0;  i < nblocks;  ++i)
00534                     if (oldBlocks[i]) allocator.dealloc(oldBlocks[i]);
00535             }
00536 
00537             for (unsigned i = 0;  i < nblocks;  ++i) {
00538                 oldBlocks[i] = blocks[i];
00539                 blocks[i] = 0;
00540             }
00541 
00542             gc.visibleBarrier();
00543 
00544             for (unsigned i = 0;  i < nblocks;  ++i)
00545                 allocator.dealloc(oldBlocks[i]);
00546 
00547             //cerr << "nErrors = " << nErrors << endl;
00548         } catch (...) {
00549             static ML::Spinlock lock;
00550             lock.acquire();
00551             //cerr << "threadnum " << threadNum << " inEpoch "
00552             //     << gc.getEntry().inEpoch << endl;
00553             gc.dump();
00554             abort();
00555         }
00556     }
00557 
00558     void run(boost::function<void (int)> allocFn,
00559              int runTime = 1)
00560     {
00561         gc.getEntry();
00562         boost::thread_group tg;
00563 
00564         for (unsigned i = 0;  i < nthreads;  ++i)
00565             tg.create_thread(boost::bind<void>(&TestBase::doReadThread, this, i));
00566 
00567         for (unsigned i = 0;  i < nthreads;  ++i)
00568             tg.create_thread(boost::bind<void>(allocFn, i));
00569 
00570         for (unsigned i = 0;  i < nSpinThreads;  ++i)
00571             tg.create_thread(boost::bind<void>(&TestBase::doSpinThread, this));
00572 
00573         sleep(runTime);
00574 
00575         finished = true;
00576 
00577         tg.join_all();
00578 
00579         gc.deferBarrier();
00580 
00581         gc.dump();
00582 
00583         BOOST_CHECK_EQUAL(allocator.nallocs, allocator.ndeallocs);
00584         BOOST_CHECK_EQUAL(nerrors, 0);
00585 
00586         cerr << "allocs " << allocator.nallocs
00587              << " deallocs " << allocator.ndeallocs << endl;
00588         cerr << "highest " << allocator.highestAlloc << endl;
00589 
00590         cerr << "gc.currentEpoch() = " << gc.currentEpoch() << endl;
00591     }
00592 };
00593 
00594 #if 1
00595 BOOST_AUTO_TEST_CASE ( test_gc_sync_many_threads_contention )
00596 {
00597     cerr << "testing contention synchronized GcLock with many threads" << endl;
00598 
00599     int nthreads = 8;
00600     int nSpinThreads = 16;
00601     int nblocks = 2;
00602 
00603     TestBase<GcLock> test(nthreads, nblocks, nSpinThreads);
00604     test.run(boost::bind(&TestBase<GcLock>::allocThreadSync, &test, _1));
00605 }
00606 #endif
00607 
00608 BOOST_AUTO_TEST_CASE ( test_gc_deferred_contention )
00609 {
00610     cerr << "testing contended deferred GcLock" << endl;
00611 
00612     int nthreads = 8;
00613     int nSpinThreads = 0;//16;
00614     int nblocks = 2;
00615 
00616     TestBase<GcLock> test(nthreads, nblocks, nSpinThreads);
00617     test.run(boost::bind(&TestBase<GcLock>::allocThreadDefer, &test, _1));
00618 }
00619 
00620 
00621 #if 1
00622 
00623 BOOST_AUTO_TEST_CASE ( test_gc_sync )
00624 {
00625     cerr << "testing synchronized GcLock" << endl;
00626 
00627     int nthreads = 2;
00628     int nblocks = 2;
00629 
00630     TestBase<GcLock> test(nthreads, nblocks);
00631     test.run(boost::bind(&TestBase<GcLock>::allocThreadSync, &test, _1));
00632 }
00633 
00634 BOOST_AUTO_TEST_CASE ( test_gc_sync_many_threads )
00635 {
00636     cerr << "testing synchronized GcLock with many threads" << endl;
00637 
00638     int nthreads = 8;
00639     int nblocks = 2;
00640 
00641     TestBase<GcLock> test(nthreads, nblocks);
00642     test.run(boost::bind(&TestBase<GcLock>::allocThreadSync, &test, _1));
00643 }
00644 
00645 BOOST_AUTO_TEST_CASE ( test_rcu_sync )
00646 {
00647     cerr << "testing synchronized RCU" << endl;
00648 
00649     int nthreads = 2;
00650     int nblocks = 2;
00651 
00652     TestBase<RcuLock> test(nthreads, nblocks);
00653     test.run(boost::bind(&TestBase<RcuLock>::allocThreadSync, &test, _1));
00654 }
00655 
00656 BOOST_AUTO_TEST_CASE ( test_gc_deferred )
00657 {
00658     cerr << "testing deferred GcLock" << endl;
00659 
00660     int nthreads = 2;
00661     int nblocks = 2;
00662 
00663     TestBase<GcLock> test(nthreads, nblocks);
00664     test.run(boost::bind(&TestBase<GcLock>::allocThreadDefer, &test, _1));
00665 }
00666 
00667 BOOST_AUTO_TEST_CASE ( test_rcu_deferred )
00668 {
00669     cerr << "testing deferred RCU" << endl;
00670 
00671     int nthreads = 2;
00672     int nblocks = 2;
00673 
00674     TestBase<RcuLock> test(nthreads, nblocks);
00675     test.run(boost::bind(&TestBase<RcuLock>::allocThreadDefer, &test, _1));
00676 }
00677 
00678 
00679 struct SharedGcLockProxy : public SharedGcLock {
00680     static const char* name;
00681     SharedGcLockProxy() :
00682         SharedGcLock(GC_OPEN, name)
00683     {}
00684 };
00685 const char* SharedGcLockProxy::name = "gc_test.dat";
00686 
00687 BOOST_AUTO_TEST_CASE( test_shared_lock_sync )
00688 {
00689     cerr << "testing contention synchronized GcLock with shared lock" << endl;
00690 
00691     SharedGcLock lockGuard(GC_CREATE, SharedGcLockProxy::name);
00692     Call_Guard unlink_guard([&] { lockGuard.unlink(); });
00693 
00694     int nthreads = 8;
00695     int nSpinThreads = 16;
00696     int nblocks = 2;
00697 
00698     TestBase<SharedGcLockProxy> test(nthreads, nblocks, nSpinThreads);
00699     test.run(boost::bind(
00700                     &TestBase<SharedGcLockProxy>::allocThreadSync, &test, _1));
00701 
00702 }
00703 
00704 BOOST_AUTO_TEST_CASE( test_shared_lock_defer )
00705 {
00706     cerr << "testing contended deferred GcLock with shared lock" << endl;
00707 
00708     SharedGcLock lockGuard(GC_CREATE, SharedGcLockProxy::name);
00709     Call_Guard unlink_guard([&] { lockGuard.unlink(); });
00710 
00711     int nthreads = 8;
00712     int nSpinThreads = 16;
00713     int nblocks = 2;
00714 
00715     TestBase<SharedGcLockProxy> test(nthreads, nblocks, nSpinThreads);
00716     test.run(boost::bind(
00717                     &TestBase<SharedGcLockProxy>::allocThreadSync, &test, _1));
00718 }
00719 
00720 BOOST_AUTO_TEST_CASE ( test_defer_race )
00721 {
00722     cerr << "testing defer race" << endl;
00723     GcLock gc;
00724 
00725     boost::thread_group tg;
00726 
00727     volatile bool finished = false;
00728 
00729     int nthreads = 0;
00730 
00731     volatile int numStarted = 0;
00732 
00733     auto doTestThread = [&] ()
00734         {
00735             while (!finished) {
00736                 ML::atomic_inc(numStarted);
00737                 while (numStarted != nthreads) ;
00738 
00739                 gc.deferBarrier();
00740 
00741                 ML::atomic_dec(numStarted);
00742                 while (numStarted != 0) ;
00743             }
00744         };
00745 
00746 
00747     for (unsigned i = 0;  i < nthreads;  ++i)
00748         tg.create_thread(doTestThread);
00749 
00750     int runTime = 1;
00751 
00752     sleep(runTime);
00753 
00754     finished = true;
00755 
00756     tg.join_all();
00757 }
00758 
00759 #endif
 All Classes Namespaces Functions Variables Typedefs Enumerations Enumerator