TrinityCore
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
arena.h
Go to the documentation of this file.
1 /******************************************************************************/
2 #ifdef JEMALLOC_H_TYPES
3 
4 /*
5  * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized
6  * as small as possible such that this setting is still honored, without
7  * violating other constraints. The goal is to make runs as small as possible
8  * without exceeding a per run external fragmentation threshold.
9  *
10  * We use binary fixed point math for overhead computations, where the binary
11  * point is implicitly RUN_BFP bits to the left.
12  *
13  * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be
14  * honored for some/all object sizes, since when heap profiling is enabled
15  * there is one pointer of header overhead per object (plus a constant). This
16  * constraint is relaxed (ignored) for runs that are so small that the
17  * per-region overhead is greater than:
18  *
19  * (RUN_MAX_OVRHD / (reg_interval << (3+RUN_BFP))
20  */
21 #define RUN_BFP 12
22 /* \/ Implicit binary fixed point. */
23 #define RUN_MAX_OVRHD 0x0000003dU
24 #define RUN_MAX_OVRHD_RELAX 0x00001800U
25 
26 /* Maximum number of regions in one run. */
27 #define LG_RUN_MAXREGS 11
28 #define RUN_MAXREGS (1U << LG_RUN_MAXREGS)
29 
30 /*
31  * Minimum redzone size. Redzones may be larger than this if necessary to
32  * preserve region alignment.
33  */
34 #define REDZONE_MINSIZE 16
35 
36 /*
37  * The minimum ratio of active:dirty pages per arena is computed as:
38  *
39  * (nactive >> opt_lg_dirty_mult) >= ndirty
40  *
41  * So, supposing that opt_lg_dirty_mult is 3, there can be no less than 8 times
42  * as many active pages as dirty pages.
43  */
44 #define LG_DIRTY_MULT_DEFAULT 3
45 
46 typedef struct arena_chunk_map_s arena_chunk_map_t;
47 typedef struct arena_chunk_s arena_chunk_t;
48 typedef struct arena_run_s arena_run_t;
49 typedef struct arena_bin_info_s arena_bin_info_t;
50 typedef struct arena_bin_s arena_bin_t;
51 typedef struct arena_s arena_t;
52 
53 #endif /* JEMALLOC_H_TYPES */
54 /******************************************************************************/
55 #ifdef JEMALLOC_H_STRUCTS
56 
57 /* Each element of the chunk map corresponds to one page within the chunk. */
58 struct arena_chunk_map_s {
59 #ifndef JEMALLOC_PROF
60  /*
61  * Overlay prof_ctx in order to allow it to be referenced by dead code.
62  * Such antics aren't warranted for per arena data structures, but
63  * chunk map overhead accounts for a percentage of memory, rather than
64  * being just a fixed cost.
65  */
66  union {
67 #endif
68  union {
69  /*
70  * Linkage for run trees. There are two disjoint uses:
71  *
72  * 1) arena_t's runs_avail tree.
73  * 2) arena_run_t conceptually uses this linkage for in-use
74  * non-full runs, rather than directly embedding linkage.
75  */
76  rb_node(arena_chunk_map_t) rb_link;
77  /*
78  * List of runs currently in purgatory. arena_chunk_purge()
79  * temporarily allocates runs that contain dirty pages while
80  * purging, so that other threads cannot use the runs while the
81  * purging thread is operating without the arena lock held.
82  */
83  ql_elm(arena_chunk_map_t) ql_link;
84  } u;
85 
86  /* Profile counters, used for large object runs. */
87  prof_ctx_t *prof_ctx;
88 #ifndef JEMALLOC_PROF
89  }; /* union { ... }; */
90 #endif
91 
92  /*
93  * Run address (or size) and various flags are stored together. The bit
94  * layout looks like (assuming 32-bit system):
95  *
96  * ???????? ???????? ????nnnn nnnndula
97  *
98  * ? : Unallocated: Run address for first/last pages, unset for internal
99  * pages.
100  * Small: Run page offset.
101  * Large: Run size for first page, unset for trailing pages.
102  * n : binind for small size class, BININD_INVALID for large size class.
103  * d : dirty?
104  * u : unzeroed?
105  * l : large?
106  * a : allocated?
107  *
108  * Following are example bit patterns for the three types of runs.
109  *
110  * p : run page offset
111  * s : run size
112  * n : binind for size class; large objects set these to BININD_INVALID
113  * except for promoted allocations (see prof_promote)
114  * x : don't care
115  * - : 0
116  * + : 1
117  * [DULA] : bit set
118  * [dula] : bit unset
119  *
120  * Unallocated (clean):
121  * ssssssss ssssssss ssss++++ ++++du-a
122  * xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx
123  * ssssssss ssssssss ssss++++ ++++dU-a
124  *
125  * Unallocated (dirty):
126  * ssssssss ssssssss ssss++++ ++++D--a
127  * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
128  * ssssssss ssssssss ssss++++ ++++D--a
129  *
130  * Small:
131  * pppppppp pppppppp ppppnnnn nnnnd--A
132  * pppppppp pppppppp ppppnnnn nnnn---A
133  * pppppppp pppppppp ppppnnnn nnnnd--A
134  *
135  * Large:
136  * ssssssss ssssssss ssss++++ ++++D-LA
137  * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
138  * -------- -------- ----++++ ++++D-LA
139  *
140  * Large (sampled, size <= PAGE):
141  * ssssssss ssssssss ssssnnnn nnnnD-LA
142  *
143  * Large (not sampled, size == PAGE):
144  * ssssssss ssssssss ssss++++ ++++D-LA
145  */
146  size_t bits;
147 #define CHUNK_MAP_BININD_SHIFT 4
148 #define BININD_INVALID ((size_t)0xffU)
149 /* CHUNK_MAP_BININD_MASK == (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) */
150 #define CHUNK_MAP_BININD_MASK ((size_t)0xff0U)
151 #define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
152 #define CHUNK_MAP_FLAGS_MASK ((size_t)0xcU)
153 #define CHUNK_MAP_DIRTY ((size_t)0x8U)
154 #define CHUNK_MAP_UNZEROED ((size_t)0x4U)
155 #define CHUNK_MAP_LARGE ((size_t)0x2U)
156 #define CHUNK_MAP_ALLOCATED ((size_t)0x1U)
157 #define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED
158 };
159 typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t;
160 typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
161 typedef ql_head(arena_chunk_map_t) arena_chunk_mapelms_t;
162 
163 /* Arena chunk header. */
164 struct arena_chunk_s {
165  /* Arena that owns the chunk. */
166  arena_t *arena;
167 
168  /* Linkage for tree of arena chunks that contain dirty runs. */
169  rb_node(arena_chunk_t) dirty_link;
170 
171  /* Number of dirty pages. */
172  size_t ndirty;
173 
174  /* Number of available runs. */
175  size_t nruns_avail;
176 
177  /*
178  * Number of available run adjacencies that purging could coalesce.
179  * Clean and dirty available runs are not coalesced, which causes
180  * virtual memory fragmentation. The ratio of
181  * (nruns_avail-nruns_adjac):nruns_adjac is used for tracking this
182  * fragmentation.
183  */
184  size_t nruns_adjac;
185 
186  /*
187  * Map of pages within chunk that keeps track of free/large/small. The
188  * first map_bias entries are omitted, since the chunk header does not
189  * need to be tracked in the map. This omission saves a header page
190  * for common chunk sizes (e.g. 4 MiB).
191  */
192  arena_chunk_map_t map[1]; /* Dynamically sized. */
193 };
194 typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
195 
196 struct arena_run_s {
197  /* Bin this run is associated with. */
198  arena_bin_t *bin;
199 
200  /* Index of next region that has never been allocated, or nregs. */
201  uint32_t nextind;
202 
203  /* Number of free regions in run. */
204  unsigned nfree;
205 };
206 
207 /*
208  * Read-only information associated with each element of arena_t's bins array
209  * is stored separately, partly to reduce memory usage (only one copy, rather
210  * than one per arena), but mainly to avoid false cacheline sharing.
211  *
212  * Each run has the following layout:
213  *
214  * /--------------------\
215  * | arena_run_t header |
216  * | ... |
217  * bitmap_offset | bitmap |
218  * | ... |
219  * ctx0_offset | ctx map |
220  * | ... |
221  * |--------------------|
222  * | redzone |
223  * reg0_offset | region 0 |
224  * | redzone |
225  * |--------------------| \
226  * | redzone | |
227  * | region 1 | > reg_interval
228  * | redzone | /
229  * |--------------------|
230  * | ... |
231  * | ... |
232  * | ... |
233  * |--------------------|
234  * | redzone |
235  * | region nregs-1 |
236  * | redzone |
237  * |--------------------|
238  * | alignment pad? |
239  * \--------------------/
240  *
241  * reg_interval has at least the same minimum alignment as reg_size; this
242  * preserves the alignment constraint that sa2u() depends on. Alignment pad is
243  * either 0 or redzone_size; it is present only if needed to align reg0_offset.
244  */
245 struct arena_bin_info_s {
246  /* Size of regions in a run for this bin's size class. */
247  size_t reg_size;
248 
249  /* Redzone size. */
250  size_t redzone_size;
251 
252  /* Interval between regions (reg_size + (redzone_size << 1)). */
253  size_t reg_interval;
254 
255  /* Total size of a run for this bin's size class. */
256  size_t run_size;
257 
258  /* Total number of regions in a run for this bin's size class. */
259  uint32_t nregs;
260 
261  /*
262  * Offset of first bitmap_t element in a run header for this bin's size
263  * class.
264  */
265  uint32_t bitmap_offset;
266 
267  /*
268  * Metadata used to manipulate bitmaps for runs associated with this
269  * bin.
270  */
271  bitmap_info_t bitmap_info;
272 
273  /*
274  * Offset of first (prof_ctx_t *) in a run header for this bin's size
275  * class, or 0 if (config_prof == false || opt_prof == false).
276  */
277  uint32_t ctx0_offset;
278 
279  /* Offset of first region in a run for this bin's size class. */
280  uint32_t reg0_offset;
281 };
282 
283 struct arena_bin_s {
284  /*
285  * All operations on runcur, runs, and stats require that lock be
286  * locked. Run allocation/deallocation are protected by the arena lock,
287  * which may be acquired while holding one or more bin locks, but not
288  * vise versa.
289  */
290  malloc_mutex_t lock;
291 
292  /*
293  * Current run being used to service allocations of this bin's size
294  * class.
295  */
296  arena_run_t *runcur;
297 
298  /*
299  * Tree of non-full runs. This tree is used when looking for an
300  * existing run when runcur is no longer usable. We choose the
301  * non-full run that is lowest in memory; this policy tends to keep
302  * objects packed well, and it can also help reduce the number of
303  * almost-empty chunks.
304  */
305  arena_run_tree_t runs;
306 
307  /* Bin statistics. */
308  malloc_bin_stats_t stats;
309 };
310 
311 struct arena_s {
312  /* This arena's index within the arenas array. */
313  unsigned ind;
314 
315  /*
316  * Number of threads currently assigned to this arena. This field is
317  * protected by arenas_lock.
318  */
319  unsigned nthreads;
320 
321  /*
322  * There are three classes of arena operations from a locking
323  * perspective:
324  * 1) Thread asssignment (modifies nthreads) is protected by
325  * arenas_lock.
326  * 2) Bin-related operations are protected by bin locks.
327  * 3) Chunk- and run-related operations are protected by this mutex.
328  */
329  malloc_mutex_t lock;
330 
331  arena_stats_t stats;
332  /*
333  * List of tcaches for extant threads associated with this arena.
334  * Stats from these are merged incrementally, and at exit.
335  */
336  ql_head(tcache_t) tcache_ql;
337 
338  uint64_t prof_accumbytes;
339 
340  dss_prec_t dss_prec;
341 
342  /* Tree of dirty-page-containing chunks this arena manages. */
343  arena_chunk_tree_t chunks_dirty;
344 
345  /*
346  * In order to avoid rapid chunk allocation/deallocation when an arena
347  * oscillates right on the cusp of needing a new chunk, cache the most
348  * recently freed chunk. The spare is left in the arena's chunk trees
349  * until it is deleted.
350  *
351  * There is one spare chunk per arena, rather than one spare total, in
352  * order to avoid interactions between multiple threads that could make
353  * a single spare inadequate.
354  */
355  arena_chunk_t *spare;
356 
357  /* Number of pages in active runs. */
358  size_t nactive;
359 
360  /*
361  * Current count of pages within unused runs that are potentially
362  * dirty, and for which madvise(... MADV_DONTNEED) has not been called.
363  * By tracking this, we can institute a limit on how much dirty unused
364  * memory is mapped for each arena.
365  */
366  size_t ndirty;
367 
368  /*
369  * Approximate number of pages being purged. It is possible for
370  * multiple threads to purge dirty pages concurrently, and they use
371  * npurgatory to indicate the total number of pages all threads are
372  * attempting to purge.
373  */
374  size_t npurgatory;
375 
376  /*
377  * Size/address-ordered trees of this arena's available runs. The trees
378  * are used for first-best-fit run allocation.
379  */
380  arena_avail_tree_t runs_avail;
381 
382  /* bins is used to store trees of free regions. */
383  arena_bin_t bins[NBINS];
384 };
385 
386 #endif /* JEMALLOC_H_STRUCTS */
387 /******************************************************************************/
388 #ifdef JEMALLOC_H_EXTERNS
389 
390 extern ssize_t opt_lg_dirty_mult;
391 /*
392  * small_size2bin is a compact lookup table that rounds request sizes up to
393  * size classes. In order to reduce cache footprint, the table is compressed,
394  * and all accesses are via the SMALL_SIZE2BIN macro.
395  */
396 extern uint8_t const small_size2bin[];
397 #define SMALL_SIZE2BIN(s) (small_size2bin[(s-1) >> LG_TINY_MIN])
398 
399 extern arena_bin_info_t arena_bin_info[NBINS];
400 
401 /* Number of large size classes. */
402 #define nlclasses (chunk_npages - map_bias)
403 
404 void arena_purge_all(arena_t *arena);
405 void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
406  size_t binind, uint64_t prof_accumbytes);
407 void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
408  bool zero);
409 #ifdef JEMALLOC_JET
410 typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t,
411  uint8_t);
412 extern arena_redzone_corruption_t *arena_redzone_corruption;
413 typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
414 extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
415 #else
416 void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
417 #endif
418 void arena_quarantine_junk_small(void *ptr, size_t usize);
419 void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
420 void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
421 void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
422 void arena_prof_promoted(const void *ptr, size_t size);
423 void arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
424  arena_chunk_map_t *mapelm);
425 void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
426  size_t pageind, arena_chunk_map_t *mapelm);
427 void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
428  size_t pageind);
429 #ifdef JEMALLOC_JET
430 typedef void (arena_dalloc_junk_large_t)(void *, size_t);
431 extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
432 #endif
433 void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk,
434  void *ptr);
435 void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
436 #ifdef JEMALLOC_JET
437 typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
438 extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
439 #endif
440 bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
441  size_t extra, bool zero);
442 void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
443  size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
444  bool try_tcache_dalloc);
445 dss_prec_t arena_dss_prec_get(arena_t *arena);
446 void arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
447 void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
448  size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
449  malloc_large_stats_t *lstats);
450 bool arena_new(arena_t *arena, unsigned ind);
451 void arena_boot(void);
452 void arena_prefork(arena_t *arena);
453 void arena_postfork_parent(arena_t *arena);
454 void arena_postfork_child(arena_t *arena);
455 
456 #endif /* JEMALLOC_H_EXTERNS */
457 /******************************************************************************/
458 #ifdef JEMALLOC_H_INLINES
459 
460 #ifndef JEMALLOC_ENABLE_INLINE
461 arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind);
462 size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
463 size_t arena_mapbitsp_read(size_t *mapbitsp);
464 size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
465 size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
466  size_t pageind);
467 size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
468 size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
469 size_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
470 size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
471 size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
472 size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
473 size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
474 void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
475 void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
476  size_t size, size_t flags);
477 void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
478  size_t size);
479 void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
480  size_t size, size_t flags);
481 void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
482  size_t binind);
483 void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
484  size_t runind, size_t binind, size_t flags);
485 void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
486  size_t unzeroed);
487 bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
488 bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
489 bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
490 size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
491 size_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
492 unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
493  const void *ptr);
494 prof_ctx_t *arena_prof_ctx_get(const void *ptr);
495 void arena_prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx);
496 void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache);
497 size_t arena_salloc(const void *ptr, bool demote);
498 void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr,
499  bool try_tcache);
500 #endif
501 
502 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
503 # ifdef JEMALLOC_ARENA_INLINE_A
504 JEMALLOC_ALWAYS_INLINE arena_chunk_map_t *
505 arena_mapp_get(arena_chunk_t *chunk, size_t pageind)
506 {
507 
508  assert(pageind >= map_bias);
509  assert(pageind < chunk_npages);
510 
511  return (&chunk->map[pageind-map_bias]);
512 }
513 
514 JEMALLOC_ALWAYS_INLINE size_t *
515 arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
516 {
517 
518  return (&arena_mapp_get(chunk, pageind)->bits);
519 }
520 
522 arena_mapbitsp_read(size_t *mapbitsp)
523 {
524 
525  return (*mapbitsp);
526 }
527 
529 arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
530 {
531 
532  return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
533 }
534 
536 arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
537 {
538  size_t mapbits;
539 
540  mapbits = arena_mapbits_get(chunk, pageind);
541  assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
542  return (mapbits & ~PAGE_MASK);
543 }
544 
546 arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
547 {
548  size_t mapbits;
549 
550  mapbits = arena_mapbits_get(chunk, pageind);
551  assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
552  (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
553  return (mapbits & ~PAGE_MASK);
554 }
555 
557 arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
558 {
559  size_t mapbits;
560 
561  mapbits = arena_mapbits_get(chunk, pageind);
562  assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
563  CHUNK_MAP_ALLOCATED);
564  return (mapbits >> LG_PAGE);
565 }
566 
568 arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
569 {
570  size_t mapbits;
571  size_t binind;
572 
573  mapbits = arena_mapbits_get(chunk, pageind);
574  binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
575  assert(binind < NBINS || binind == BININD_INVALID);
576  return (binind);
577 }
578 
580 arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
581 {
582  size_t mapbits;
583 
584  mapbits = arena_mapbits_get(chunk, pageind);
585  return (mapbits & CHUNK_MAP_DIRTY);
586 }
587 
589 arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
590 {
591  size_t mapbits;
592 
593  mapbits = arena_mapbits_get(chunk, pageind);
594  return (mapbits & CHUNK_MAP_UNZEROED);
595 }
596 
598 arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
599 {
600  size_t mapbits;
601 
602  mapbits = arena_mapbits_get(chunk, pageind);
603  return (mapbits & CHUNK_MAP_LARGE);
604 }
605 
607 arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
608 {
609  size_t mapbits;
610 
611  mapbits = arena_mapbits_get(chunk, pageind);
612  return (mapbits & CHUNK_MAP_ALLOCATED);
613 }
614 
616 arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
617 {
618 
619  *mapbitsp = mapbits;
620 }
621 
623 arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
624  size_t flags)
625 {
626  size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
627 
628  assert((size & PAGE_MASK) == 0);
629  assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
630  assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags);
631  arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags);
632 }
633 
635 arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
636  size_t size)
637 {
638  size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
639  size_t mapbits = arena_mapbitsp_read(mapbitsp);
640 
641  assert((size & PAGE_MASK) == 0);
642  assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
643  arena_mapbitsp_write(mapbitsp, size | (mapbits & PAGE_MASK));
644 }
645 
647 arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
648  size_t flags)
649 {
650  size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
651  size_t mapbits = arena_mapbitsp_read(mapbitsp);
652  size_t unzeroed;
653 
654  assert((size & PAGE_MASK) == 0);
655  assert((flags & CHUNK_MAP_DIRTY) == flags);
656  unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
657  arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags
658  | unzeroed | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED);
659 }
660 
662 arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
663  size_t binind)
664 {
665  size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
666  size_t mapbits = arena_mapbitsp_read(mapbitsp);
667 
668  assert(binind <= BININD_INVALID);
669  assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE);
670  arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
671  (binind << CHUNK_MAP_BININD_SHIFT));
672 }
673 
675 arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
676  size_t binind, size_t flags)
677 {
678  size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
679  size_t mapbits = arena_mapbitsp_read(mapbitsp);
680  size_t unzeroed;
681 
682  assert(binind < BININD_INVALID);
683  assert(pageind - runind >= map_bias);
684  assert((flags & CHUNK_MAP_DIRTY) == flags);
685  unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
686  arena_mapbitsp_write(mapbitsp, (runind << LG_PAGE) | (binind <<
687  CHUNK_MAP_BININD_SHIFT) | flags | unzeroed | CHUNK_MAP_ALLOCATED);
688 }
689 
691 arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
692  size_t unzeroed)
693 {
694  size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
695  size_t mapbits = arena_mapbitsp_read(mapbitsp);
696 
697  arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_UNZEROED) |
698  unzeroed);
699 }
700 
701 JEMALLOC_INLINE bool
702 arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
703 {
704 
705  cassert(config_prof);
706  assert(prof_interval != 0);
707 
708  arena->prof_accumbytes += accumbytes;
709  if (arena->prof_accumbytes >= prof_interval) {
710  arena->prof_accumbytes -= prof_interval;
711  return (true);
712  }
713  return (false);
714 }
715 
716 JEMALLOC_INLINE bool
717 arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
718 {
719 
720  cassert(config_prof);
721 
722  if (prof_interval == 0)
723  return (false);
724  return (arena_prof_accum_impl(arena, accumbytes));
725 }
726 
727 JEMALLOC_INLINE bool
728 arena_prof_accum(arena_t *arena, uint64_t accumbytes)
729 {
730 
731  cassert(config_prof);
732 
733  if (prof_interval == 0)
734  return (false);
735 
736  {
737  bool ret;
738 
739  malloc_mutex_lock(&arena->lock);
740  ret = arena_prof_accum_impl(arena, accumbytes);
741  malloc_mutex_unlock(&arena->lock);
742  return (ret);
743  }
744 }
745 
747 arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
748 {
749  size_t binind;
750 
751  binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
752 
753  if (config_debug) {
754  arena_chunk_t *chunk;
755  arena_t *arena;
756  size_t pageind;
757  size_t actual_mapbits;
758  arena_run_t *run;
759  arena_bin_t *bin;
760  size_t actual_binind;
761  arena_bin_info_t *bin_info;
762 
763  assert(binind != BININD_INVALID);
764  assert(binind < NBINS);
765  chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
766  arena = chunk->arena;
767  pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
768  actual_mapbits = arena_mapbits_get(chunk, pageind);
769  assert(mapbits == actual_mapbits);
770  assert(arena_mapbits_large_get(chunk, pageind) == 0);
771  assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
772  run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
773  (actual_mapbits >> LG_PAGE)) << LG_PAGE));
774  bin = run->bin;
775  actual_binind = bin - arena->bins;
776  assert(binind == actual_binind);
777  bin_info = &arena_bin_info[actual_binind];
778  assert(((uintptr_t)ptr - ((uintptr_t)run +
779  (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
780  == 0);
781  }
782 
783  return (binind);
784 }
785 # endif /* JEMALLOC_ARENA_INLINE_A */
786 
787 # ifdef JEMALLOC_ARENA_INLINE_B
788 JEMALLOC_INLINE size_t
789 arena_bin_index(arena_t *arena, arena_bin_t *bin)
790 {
791  size_t binind = bin - arena->bins;
792  assert(binind < NBINS);
793  return (binind);
794 }
795 
796 JEMALLOC_INLINE unsigned
797 arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
798 {
799  unsigned shift, diff, regind;
800  size_t interval;
801 
802  /*
803  * Freeing a pointer lower than region zero can cause assertion
804  * failure.
805  */
806  assert((uintptr_t)ptr >= (uintptr_t)run +
807  (uintptr_t)bin_info->reg0_offset);
808 
809  /*
810  * Avoid doing division with a variable divisor if possible. Using
811  * actual division here can reduce allocator throughput by over 20%!
812  */
813  diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run -
814  bin_info->reg0_offset);
815 
816  /* Rescale (factor powers of 2 out of the numerator and denominator). */
817  interval = bin_info->reg_interval;
818  shift = ffs(interval) - 1;
819  diff >>= shift;
820  interval >>= shift;
821 
822  if (interval == 1) {
823  /* The divisor was a power of 2. */
824  regind = diff;
825  } else {
826  /*
827  * To divide by a number D that is not a power of two we
828  * multiply by (2^21 / D) and then right shift by 21 positions.
829  *
830  * X / D
831  *
832  * becomes
833  *
834  * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT
835  *
836  * We can omit the first three elements, because we never
837  * divide by 0, and 1 and 2 are both powers of two, which are
838  * handled above.
839  */
840 #define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS)
841 #define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1)
842  static const unsigned interval_invs[] = {
843  SIZE_INV(3),
844  SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
845  SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
846  SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
847  SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
848  SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
849  SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
850  SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
851  };
852 
853  if (interval <= ((sizeof(interval_invs) / sizeof(unsigned)) +
854  2)) {
855  regind = (diff * interval_invs[interval - 3]) >>
856  SIZE_INV_SHIFT;
857  } else
858  regind = diff / interval;
859 #undef SIZE_INV
860 #undef SIZE_INV_SHIFT
861  }
862  assert(diff == regind * interval);
863  assert(regind < bin_info->nregs);
864 
865  return (regind);
866 }
867 
868 JEMALLOC_INLINE prof_ctx_t *
869 arena_prof_ctx_get(const void *ptr)
870 {
871  prof_ctx_t *ret;
872  arena_chunk_t *chunk;
873  size_t pageind, mapbits;
874 
875  cassert(config_prof);
876  assert(ptr != NULL);
877  assert(CHUNK_ADDR2BASE(ptr) != ptr);
878 
879  chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
880  pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
881  mapbits = arena_mapbits_get(chunk, pageind);
882  assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
883  if ((mapbits & CHUNK_MAP_LARGE) == 0) {
884  if (prof_promote)
885  ret = (prof_ctx_t *)(uintptr_t)1U;
886  else {
887  arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
888  (uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
889  LG_PAGE));
890  size_t binind = arena_ptr_small_binind_get(ptr,
891  mapbits);
892  arena_bin_info_t *bin_info = &arena_bin_info[binind];
893  unsigned regind;
894 
895  regind = arena_run_regind(run, bin_info, ptr);
896  ret = *(prof_ctx_t **)((uintptr_t)run +
897  bin_info->ctx0_offset + (regind *
898  sizeof(prof_ctx_t *)));
899  }
900  } else
901  ret = arena_mapp_get(chunk, pageind)->prof_ctx;
902 
903  return (ret);
904 }
905 
906 JEMALLOC_INLINE void
907 arena_prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx)
908 {
909  arena_chunk_t *chunk;
910  size_t pageind;
911 
912  cassert(config_prof);
913  assert(ptr != NULL);
914  assert(CHUNK_ADDR2BASE(ptr) != ptr);
915 
916  chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
917  pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
918  assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
919 
920  if (usize > SMALL_MAXCLASS || (prof_promote &&
921  ((uintptr_t)ctx != (uintptr_t)1U || arena_mapbits_large_get(chunk,
922  pageind) != 0))) {
923  assert(arena_mapbits_large_get(chunk, pageind) != 0);
924  arena_mapp_get(chunk, pageind)->prof_ctx = ctx;
925  } else {
926  assert(arena_mapbits_large_get(chunk, pageind) == 0);
927  if (prof_promote == false) {
928  size_t mapbits = arena_mapbits_get(chunk, pageind);
929  arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
930  (uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
931  LG_PAGE));
932  size_t binind;
933  arena_bin_info_t *bin_info;
934  unsigned regind;
935 
936  binind = arena_ptr_small_binind_get(ptr, mapbits);
937  bin_info = &arena_bin_info[binind];
938  regind = arena_run_regind(run, bin_info, ptr);
939 
940  *((prof_ctx_t **)((uintptr_t)run +
941  bin_info->ctx0_offset + (regind * sizeof(prof_ctx_t
942  *)))) = ctx;
943  }
944  }
945 }
946 
948 arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache)
949 {
950  tcache_t *tcache;
951 
952  assert(size != 0);
953  assert(size <= arena_maxclass);
954 
955  if (size <= SMALL_MAXCLASS) {
956  if (try_tcache && (tcache = tcache_get(true)) != NULL)
957  return (tcache_alloc_small(tcache, size, zero));
958  else {
959  return (arena_malloc_small(choose_arena(arena), size,
960  zero));
961  }
962  } else {
963  /*
964  * Initialize tcache after checking size in order to avoid
965  * infinite recursion during tcache initialization.
966  */
967  if (try_tcache && size <= tcache_maxclass && (tcache =
968  tcache_get(true)) != NULL)
969  return (tcache_alloc_large(tcache, size, zero));
970  else {
971  return (arena_malloc_large(choose_arena(arena), size,
972  zero));
973  }
974  }
975 }
976 
977 /* Return the size of the allocation pointed to by ptr. */
979 arena_salloc(const void *ptr, bool demote)
980 {
981  size_t ret;
982  arena_chunk_t *chunk;
983  size_t pageind, binind;
984 
985  assert(ptr != NULL);
986  assert(CHUNK_ADDR2BASE(ptr) != ptr);
987 
988  chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
989  pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
990  assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
991  binind = arena_mapbits_binind_get(chunk, pageind);
992  if (binind == BININD_INVALID || (config_prof && demote == false &&
993  prof_promote && arena_mapbits_large_get(chunk, pageind) != 0)) {
994  /*
995  * Large allocation. In the common case (demote == true), and
996  * as this is an inline function, most callers will only end up
997  * looking at binind to determine that ptr is a small
998  * allocation.
999  */
1000  assert(((uintptr_t)ptr & PAGE_MASK) == 0);
1001  ret = arena_mapbits_large_size_get(chunk, pageind);
1002  assert(ret != 0);
1003  assert(pageind + (ret>>LG_PAGE) <= chunk_npages);
1004  assert(ret == PAGE || arena_mapbits_large_size_get(chunk,
1005  pageind+(ret>>LG_PAGE)-1) == 0);
1006  assert(binind == arena_mapbits_binind_get(chunk,
1007  pageind+(ret>>LG_PAGE)-1));
1008  assert(arena_mapbits_dirty_get(chunk, pageind) ==
1009  arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1));
1010  } else {
1011  /*
1012  * Small allocation (possibly promoted to a large object due to
1013  * prof_promote).
1014  */
1015  assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
1017  pageind)) == binind);
1018  ret = arena_bin_info[binind].reg_size;
1019  }
1020 
1021  return (ret);
1022 }
1023 
1025 arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache)
1026 {
1027  size_t pageind, mapbits;
1028  tcache_t *tcache;
1029 
1030  assert(arena != NULL);
1031  assert(chunk->arena == arena);
1032  assert(ptr != NULL);
1033  assert(CHUNK_ADDR2BASE(ptr) != ptr);
1034 
1035  pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1036  mapbits = arena_mapbits_get(chunk, pageind);
1037  assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1038  if ((mapbits & CHUNK_MAP_LARGE) == 0) {
1039  /* Small allocation. */
1040  if (try_tcache && (tcache = tcache_get(false)) != NULL) {
1041  size_t binind;
1042 
1043  binind = arena_ptr_small_binind_get(ptr, mapbits);
1044  tcache_dalloc_small(tcache, ptr, binind);
1045  } else
1046  arena_dalloc_small(arena, chunk, ptr, pageind);
1047  } else {
1048  size_t size = arena_mapbits_large_size_get(chunk, pageind);
1049 
1050  assert(((uintptr_t)ptr & PAGE_MASK) == 0);
1051 
1052  if (try_tcache && size <= tcache_maxclass && (tcache =
1053  tcache_get(false)) != NULL) {
1054  tcache_dalloc_large(tcache, ptr, size);
1055  } else
1056  arena_dalloc_large(arena, chunk, ptr);
1057  }
1058 }
1059 # endif /* JEMALLOC_ARENA_INLINE_B */
1060 #endif
1061 
1062 #endif /* JEMALLOC_H_INLINES */
1063 /******************************************************************************/
#define malloc_mutex_lock
Definition: private_namespace.h:237
#define arena_mapbits_unzeroed_get
Definition: private_namespace.h:34
#define arena_malloc_small
Definition: private_namespace.h:20
#define arena_mapbits_small_runind_get
Definition: private_namespace.h:29
#define JEMALLOC_INLINE
Definition: jemalloc_internal.h:259
static __forceinline int ffs(int x)
Definition: strings.h:17
#define arena_dss_prec_set
Definition: private_namespace.h:17
#define arena_ralloc_no_move
Definition: private_namespace.h:57
#define arena_dalloc_large_locked
Definition: private_namespace.h:14
#define arena_dalloc
Definition: private_namespace.h:8
#define ql_head(a_type)
Definition: ql.h:4
#define arena_dalloc_large
Definition: private_namespace.h:13
#define arena_mapbits_allocated_get
Definition: private_namespace.h:21
arena_t NULL
Definition: jemalloc_internal.h:624
#define arena_postfork_parent
Definition: private_namespace.h:44
#define arena_maxclass
Definition: private_namespace.h:40
#define arena_mapbits_large_set
Definition: private_namespace.h:27
#define arena_purge_all
Definition: private_namespace.h:53
JEMALLOC_INLINE arena_t * choose_arena(arena_t *arena)
Definition: jemalloc_internal.h:734
#define arena_stats_merge
Definition: private_namespace.h:61
Definition: adtfile.h:57
#define arena_bin_info
Definition: private_namespace.h:6
#define ql_elm(a_type)
Definition: ql.h:11
#define arena_prof_accum_locked
Definition: private_namespace.h:48
#define arena_dalloc_junk_large
Definition: private_namespace.h:11
#define tcache_get
Definition: private_namespace.h:387
#define bool
Definition: CascPort.h:16
#define arena_mapbitsp_write
Definition: private_namespace.h:38
#define arena_mapbits_binind_get
Definition: private_namespace.h:22
#define arena_prof_ctx_get
Definition: private_namespace.h:49
unsigned int uint32_t
Definition: stdint.h:80
#define arena_malloc
Definition: private_namespace.h:18
#define arena_dalloc_bin
Definition: private_namespace.h:9
#define tcache_dalloc_large
Definition: private_namespace.h:369
_W64 unsigned int uintptr_t
Definition: stdint.h:119
#define arena_mapbits_unallocated_size_get
Definition: private_namespace.h:32
#define arena_ptr_small_binind_get
Definition: private_namespace.h:52
#define arena_mapp_get
Definition: private_namespace.h:39
#define tcache_maxclass
Definition: private_namespace.h:389
#define opt_lg_dirty_mult
Definition: private_namespace.h:265
#define arena_mapbits_large_size_get
Definition: private_namespace.h:28
#define map_bias
Definition: private_namespace.h:253
unsigned __int64 uint64_t
Definition: stdint.h:90
#define arena_prefork
Definition: private_namespace.h:45
#define small_size2bin
Definition: private_namespace.h:349
#define prof_promote
Definition: private_namespace.h:306
#define arena_postfork_child
Definition: private_namespace.h:43
#define LG_PAGE
Definition: jemalloc_internal.h:359
#define arena_tcache_fill_small
Definition: private_namespace.h:62
#define arena_prof_accum
Definition: private_namespace.h:46
static const bool config_debug
Definition: jemalloc_internal.h:78
#define arena_mapbitsp_read
Definition: private_namespace.h:37
uint16 bits() const
Returns the underlying bits in this representation. Equivalent to:
Definition: unorm16.h:89
#define tcache_alloc_large
Definition: private_namespace.h:357
#define arena_mapbits_get
Definition: private_namespace.h:24
#define arena_new
Definition: private_namespace.h:41
#define arena_mapbits_small_set
Definition: private_namespace.h:30
#define arena_prof_ctx_set
Definition: private_namespace.h:50
#define PAGE_MASK
Definition: jemalloc_internal.h:361
#define malloc_mutex_unlock
Definition: private_namespace.h:241
#define arena_palloc
Definition: private_namespace.h:42
#define JEMALLOC_ALWAYS_INLINE
Definition: jemalloc_internal.h:256
#define arena_salloc
Definition: private_namespace.h:60
#define arena_ralloc_junk_large
Definition: private_namespace.h:56
#define arena_ralloc
Definition: private_namespace.h:55
unsigned char uint8_t
Definition: stdint.h:78
#define arena_prof_accum_impl
Definition: private_namespace.h:47
static const bool config_prof
Definition: jemalloc_internal.h:106
#define arena_mapbits_large_get
Definition: private_namespace.h:26
#define arena_mapbits_unallocated_size_set
Definition: private_namespace.h:33
#define arena_dalloc_junk_small
Definition: private_namespace.h:12
static unorm16 zero()
Definition: unorm16.h:82
#define tcache_alloc_small
Definition: private_namespace.h:358
#define arena_dss_prec_get
Definition: private_namespace.h:16
#define arena_redzone_corruption
Definition: private_namespace.h:58
#define arena_dalloc_small
Definition: private_namespace.h:15
IntFormatSpec< int, TypeSpec<'b'> > bin(int value)
#define arena_boot
Definition: private_namespace.h:7
#define arena_mapbits_dirty_get
Definition: private_namespace.h:23
#define arena_bin_index
Definition: private_namespace.h:5
#define arena_mapbits_large_binind_set
Definition: private_namespace.h:25
uint8 flags
Definition: DisableMgr.cpp:44
#define arena_prof_promoted
Definition: private_namespace.h:51
#define tcache_dalloc_small
Definition: private_namespace.h:370
#define arena_dalloc_bin_locked
Definition: private_namespace.h:10
#define arena_alloc_junk_small
Definition: private_namespace.h:4
#define prof_interval
Definition: private_namespace.h:299
#define rb_tree(a_type)
Definition: rb.h:42
#define arena_malloc_large
Definition: private_namespace.h:19
#define PAGE
Definition: jemalloc_internal.h:360
#define arena_mapbits_unzeroed_set
Definition: private_namespace.h:35
#define arena_run_regind
Definition: private_namespace.h:59
#define arena_mapbitsp_get
Definition: private_namespace.h:36
#define arena_quarantine_junk_small
Definition: private_namespace.h:54
#define chunk_npages
Definition: private_namespace.h:119
#define arena_mapbits_unallocated_set
Definition: private_namespace.h:31
#define rb_node(a_type)
Definition: rb.h:33