RTBKit
0.9
Open-source framework to create real-time ad bidding systems.
|
00001 #ifndef JSONCPP_BATCHALLOCATOR_H_INCLUDED 00002 # define JSONCPP_BATCHALLOCATOR_H_INCLUDED 00003 00004 # include <stdlib.h> 00005 00006 # ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION 00007 00008 namespace Json { 00009 00010 /* Fast memory allocator. 00011 * 00012 * This memory allocator allocates memory for a batch of object (specified by 00013 * the page size, the number of object in each page). 00014 * 00015 * It does not allow the destruction of a single object. All the allocated objects 00016 * can be destroyed at once. The memory can be either released or reused for future 00017 * allocation. 00018 * 00019 * The in-place new operator must be used to construct the object using the pointer 00020 * returned by allocate. 00021 */ 00022 template<typename AllocatedType 00023 ,const unsigned int objectPerAllocation> 00024 class BatchAllocator 00025 { 00026 public: 00027 typedef AllocatedType Type; 00028 00029 BatchAllocator( unsigned int objectsPerPage = 255 ) 00030 : freeHead_( 0 ) 00031 , objectsPerPage_( objectsPerPage ) 00032 { 00033 // printf( "Size: %d => %s\n", sizeof(AllocatedType), typeid(AllocatedType).name() ); 00034 JSON_ASSERT( sizeof(AllocatedType) * objectPerAllocation >= sizeof(AllocatedType *) ); // We must be able to store a slist in the object free space. 00035 JSON_ASSERT( objectsPerPage >= 16 ); 00036 batches_ = allocateBatch( 0 ); // allocated a dummy page 00037 currentBatch_ = batches_; 00038 } 00039 00040 ~BatchAllocator() 00041 { 00042 for ( BatchInfo *batch = batches_; batch; ) 00043 { 00044 BatchInfo *nextBatch = batch->next_; 00045 free( batch ); 00046 batch = nextBatch; 00047 } 00048 } 00049 00052 AllocatedType *allocate() 00053 { 00054 if ( freeHead_ ) // returns node from free list. 00055 { 00056 AllocatedType *object = freeHead_; 00057 freeHead_ = *(AllocatedType **)object; 00058 return object; 00059 } 00060 if ( currentBatch_->used_ == currentBatch_->end_ ) 00061 { 00062 currentBatch_ = currentBatch_->next_; 00063 while ( currentBatch_ && currentBatch_->used_ == currentBatch_->end_ ) 00064 currentBatch_ = currentBatch_->next_; 00065 00066 if ( !currentBatch_ ) // no free batch found, allocate a new one 00067 { 00068 currentBatch_ = allocateBatch( objectsPerPage_ ); 00069 currentBatch_->next_ = batches_; // insert at the head of the list 00070 batches_ = currentBatch_; 00071 } 00072 } 00073 AllocatedType *allocated = currentBatch_->used_; 00074 currentBatch_->used_ += objectPerAllocation; 00075 return allocated; 00076 } 00077 00080 void release( AllocatedType *object ) 00081 { 00082 JSON_ASSERT( object != 0 ); 00083 *(AllocatedType **)object = freeHead_; 00084 freeHead_ = object; 00085 } 00086 00087 private: 00088 struct BatchInfo 00089 { 00090 BatchInfo *next_; 00091 AllocatedType *used_; 00092 AllocatedType *end_; 00093 AllocatedType buffer_[objectPerAllocation]; 00094 }; 00095 00096 // disabled copy constructor and assignement operator. 00097 BatchAllocator( const BatchAllocator & ); 00098 void operator =( const BatchAllocator &); 00099 00100 static BatchInfo *allocateBatch( unsigned int objectsPerPage ) 00101 { 00102 const unsigned int mallocSize = sizeof(BatchInfo) - sizeof(AllocatedType)* objectPerAllocation 00103 + sizeof(AllocatedType) * objectPerAllocation * objectsPerPage; 00104 BatchInfo *batch = static_cast<BatchInfo*>( malloc( mallocSize ) ); 00105 batch->next_ = 0; 00106 batch->used_ = batch->buffer_; 00107 batch->end_ = batch->buffer_ + objectsPerPage; 00108 return batch; 00109 } 00110 00111 BatchInfo *batches_; 00112 BatchInfo *currentBatch_; 00114 AllocatedType *freeHead_; 00115 unsigned int objectsPerPage_; 00116 }; 00117 00118 00119 } // namespace Json 00120 00121 # endif // ifndef JSONCPP_DOC_INCLUDE_IMPLEMENTATION 00122 00123 #endif // JSONCPP_BATCHALLOCATOR_H_INCLUDED 00124