Planeshift
|
00001 /* 00002 * poolallocator.h 00003 * 00004 * Copyright (C) 2001 Atomic Blue ([email protected], http://www.atomicblue.org) 00005 * 00006 * 00007 * This program is free software; you can redistribute it and/or 00008 * modify it under the terms of the GNU General Public License 00009 * as published by the Free Software Foundation (version 2 of the License) 00010 * This program is distributed in the hope that it will be useful, 00011 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00012 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00013 * GNU General Public License for more details. 00014 * You should have received a copy of the GNU General Public License 00015 * along with this program; if not, write to the Free Software 00016 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 00017 * 00018 * 00019 * A template class used to allocate fixed size objects in pools. 00020 * 00021 */ 00022 00023 #ifndef PS_POOLALLOCATOR_H 00024 #define PS_POOLALLOCATOR_H 00025 00026 // Define this to enable testing to be sure a Release()'d object came from the pool to begin with (debug mode only) 00027 //#define POOLALLOC_DEBUG_DELETE 00028 00029 #include <csutil/threading/thread.h> 00030 00035 /* Design notes: 00036 * 00037 * This template is designed to be integrated into a class definition. 00038 * Override the new and delete operators for the class to call CallFromNew() and 00039 * CallFromDelete() respectively. 00040 * Add a private static member variable to the class declaration like so: 00041 * static PoolAllocator<myclass> mypool; 00042 * 00043 * Add the definition (usually in the .cpp file): 00044 * PoolAllocator<myclass> myclass::mypool(number of objects to allocate per "step"); 00045 * 00046 * Use new and delete as normal throughout the program. 00047 * 00048 * DO NOT DERIVE FROM A CLASS USING A POOL ALLOCATOR UNLESS YOU DO THE FOLLOWING: 00049 * 1) Create a new static PoolAllocator<> member for the derived class. 00050 * 2) Implement the new and delete operators as above using the new PoolAllocator<> member. 00051 * 00052 * Benefits: 00053 * Pool allocation using this method is very fast. It usually involves a test, an assignment 00054 * and a return. No mutex locking is needed since this is not an attempt at being thread safe. 00055 * 00056 * Limitations: 00057 * 1) Each pool operates on fixed-size objects. These have to be at least the size of struct 00058 * st_freelist_member (currently a single pointer. Which is 4 bytes on 32 bit platforms.) 00059 * or the size of each object is bumped up to this size (wasting memory). 00060 * 2) Memory is never returned to the OS until the entire pool is destructed. In the suggested 00061 * implementation this is at program exit. 00062 * 3) Allocation time is not constant (it isn't on the heap either). When the pool empties more 00063 * space is allocated from the heap and the pool is filled. The larger the allocation step 00064 * size (specified as a parameter to the constructor) the longer this process takes but the 00065 * process will have to be done less often. 00066 * 4) THIS IS NOT THREADSAFE!! To make this threadsafe the ideal solution would be atomic 00067 * operations (there is a nice paper on using atomic ops to safely handle linked lists 00068 * out there.) An alternative is to mutex most calls - which further reduces the benefit 00069 * over heap allocation. 00070 * 00071 * - A pool size of 3 should be about as efficient as normal heap allocation functions 00072 * 00073 */ 00074 00075 #define POOLALLOC_DEFAULT_ALLOCATION_OBJECTS 1024 00076 00077 00078 00079 template <class TEMPLATE_CLASS> 00080 class PoolAllocator 00081 { 00082 CS::Threading::Mutex mutex; 00083 00085 struct st_pool_entry 00086 { 00087 struct st_pool_entry *next; 00088 }; 00089 00095 struct st_freelist_member 00096 { 00097 struct st_freelist_member *next; 00098 }; 00099 00100 00102 struct st_freelist_member *freelist_head; 00104 struct st_pool_entry *poollist_head; 00110 uint32 allocation_step_itemcount; 00111 00112 public: 00114 PoolAllocator(int items_per_pool=POOLALLOC_DEFAULT_ALLOCATION_OBJECTS) 00115 { 00116 allocation_step_itemcount = items_per_pool; 00117 freelist_head = NULL; 00118 poollist_head = NULL; 00119 } 00120 00122 ~PoolAllocator() 00123 { 00124 struct st_pool_entry *pool=poollist_head,*nextpool; 00125 00126 // Step through each pool, freeing the memory associated 00127 while (pool) 00128 { 00129 nextpool=pool->next; 00130 free(pool); 00131 pool=nextpool; 00132 } 00133 poollist_head=NULL; 00134 freelist_head=NULL; 00135 } 00136 00137 00139 TEMPLATE_CLASS *CallFromNew() 00140 { 00141 CS_ASSERT_MSG("Contention detected in PoolAllocator new", mutex.TryLock()); 00142 TEMPLATE_CLASS *objptr; 00143 00144 // Allocate another pool if no free blocks are available 00145 if (!freelist_head) 00146 AllocNewPool(); 00147 00148 // Test for new pool allocation failure 00149 if (!freelist_head) 00150 { 00151 mutex.Unlock(); 00152 return (TEMPLATE_CLASS *)NULL; 00153 } 00154 00155 // Bump a block off the free list and return it 00156 objptr=(TEMPLATE_CLASS *)freelist_head; 00157 freelist_head=freelist_head->next; 00158 00159 mutex.Unlock(); 00160 return objptr; 00161 } 00162 00164 void CallFromDelete(TEMPLATE_CLASS *obj) 00165 { 00166 CS_ASSERT_MSG("Contention detected in PoolAllocator delete", mutex.TryLock()); 00167 00168 // NULL pointers are OK - just like delete. 00169 if (!obj) 00170 { 00171 mutex.Unlock(); 00172 return; 00173 } 00174 #ifdef POOLALLOC_DEBUG_DELETE 00175 // Throw an assert if this block didn't come from this PoolAllocator 00176 CS_ASSERT(ObjectCameFromPool(obj)); 00177 #endif 00178 // Clear memory to be on the safe side 00179 memset(obj, 0xDD, sizeof(TEMPLATE_CLASS)); 00180 00181 // Add this entry to the head of the free list 00182 ((struct st_freelist_member *)obj)->next=freelist_head; 00183 freelist_head=((struct st_freelist_member *)obj); 00184 mutex.Unlock(); 00185 } 00186 00187 bool PointerCameFromPool(void *ptr) 00188 { 00189 return (ObjectCameFromPool((TEMPLATE_CLASS *)ptr)!=NULL); 00190 } 00191 00192 00193 private: 00195 struct st_pool_entry *ObjectCameFromPool(TEMPLATE_CLASS *obj) 00196 { 00197 int entrysize; 00198 struct st_pool_entry *testentry=poollist_head; 00199 00200 // Determine the size of blocks 00201 entrysize=sizeof(TEMPLATE_CLASS); 00202 if (entrysize<sizeof(struct st_freelist_member)) 00203 entrysize=sizeof(struct st_freelist_member); 00204 00205 // Walk through each pool 00206 while (testentry) 00207 { 00208 /* Each pool consists of: 00209 * 1 struct st_pool_entry (4 bytes) 00210 * (allocation_step_itemcount) blocks of (entrysize) bytes 00211 * 00212 * We know that if an object came from the pool it must satisfy the following tests: 00213 * 1) Its pointer falls after the pool pointer and prior to the pool pointer + the pool size 00214 * 2) Its offset from the pool pointer + header must be divisible by the block size 00215 */ 00216 if ((uint8 *)obj>(uint8 *)testentry && 00217 (uint8 *)obj<(uint8 *)testentry + sizeof(st_pool_entry)+entrysize*allocation_step_itemcount) 00218 { 00219 if ((unsigned long)((uint8 *)obj-((uint8 *)testentry + sizeof(st_pool_entry))) % (unsigned long)entrysize) 00220 { 00221 // This pointer is within a pool, but does not fall on an object boundary! 00222 return NULL; 00223 } 00224 return testentry; 00225 } 00226 testentry=testentry->next; 00227 } 00228 return NULL; 00229 } 00230 00231 00233 void AllocNewPool() 00234 { 00235 // Different allocation methods depending on the size of the class 00236 if (sizeof(TEMPLATE_CLASS)<sizeof(struct st_freelist_member)) 00237 { 00238 // Using the sizeof struct st_freelist_member as the size of blocks 00239 uint8 *buffer; 00240 struct st_freelist_member *workmember; 00241 struct st_pool_entry *pool; 00242 unsigned int i; 00243 00244 // Allocate an appropriately sized block from the heap. Include space for the pool header. 00245 buffer=(uint8 *)calloc(1,sizeof(st_pool_entry)+sizeof(st_freelist_member)*allocation_step_itemcount); 00246 00247 if (!buffer) 00248 return; 00249 00250 // Set the pointer to the pool header 00251 pool=(st_pool_entry *)buffer; 00252 00253 // Set the pointer to the first entry 00254 workmember=(st_freelist_member *)(buffer+sizeof(st_pool_entry)); 00255 for (i=0;i<allocation_step_itemcount-1;i++) 00256 { 00257 // Set each entry (except the last) to point to the next extry 00258 workmember->next=workmember+1; 00259 workmember++; 00260 } 00261 00262 // Last entry should point to the head of the freelist 00263 workmember->next=freelist_head; 00264 // New freelist head should point to the first member 00265 freelist_head=(st_freelist_member *)(buffer+sizeof(st_pool_entry)); 00266 00267 // Add this pool to the list of pools 00268 pool->next=poollist_head; 00269 poollist_head=pool; 00270 } 00271 else 00272 { 00273 uint8 *buffer; 00274 TEMPLATE_CLASS *workclass; 00275 struct st_pool_entry *pool; 00276 unsigned int i; 00277 00278 buffer=(uint8 *)malloc(sizeof(st_pool_entry)+sizeof(TEMPLATE_CLASS)*allocation_step_itemcount); 00279 00280 if (!buffer) 00281 return; 00282 00283 // Junk memory to be on the safe side 00284 memset(buffer, 0, sizeof(st_pool_entry)); 00285 memset(buffer+sizeof(st_pool_entry), 0xDD, sizeof(TEMPLATE_CLASS)*allocation_step_itemcount); 00286 00287 // Set the pointer to the pool header 00288 pool=(st_pool_entry *)buffer; 00289 00290 // Set the pointer to the first entry 00291 workclass=(TEMPLATE_CLASS *)(buffer+sizeof(st_pool_entry)); 00292 for (i=0;i<allocation_step_itemcount-1;i++) 00293 { 00294 // Set each entry (except the last) to point to the next extry 00295 ((st_freelist_member *)workclass)->next=(st_freelist_member *)(workclass+1); 00296 workclass++; 00297 } 00298 00299 // Last entry should point to the head of the freelist 00300 ((st_freelist_member *)workclass)->next=freelist_head; 00301 00302 // New freelist head should point to the first member 00303 freelist_head=(st_freelist_member *)(buffer+sizeof(st_pool_entry)); 00304 00305 // Add this pool to the list of pools 00306 pool->next=poollist_head; 00307 poollist_head=pool; 00308 } 00309 00310 } 00311 00312 00313 00314 }; 00315 00318 #endif