TrinityCore
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
jemalloc_internal.h File Reference
#include <math.h>
#include <sys/param.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/uio.h>
#include <pthread.h>
#include <errno.h>
#include <sys/types.h>
#include <limits.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stddef.h>
#include <inttypes.h>
#include <string.h>
#include <strings.h>
#include <ctype.h>
#include <unistd.h>
#include <fcntl.h>
#include "jemalloc_defs.h"
#include "../jemalloc.h"
#include "jemalloc/internal/private_namespace.h"
#include "jemalloc/internal/rb.h"
#include "jemalloc/internal/qr.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
+ Include dependency graph for jemalloc_internal.h:

Go to the source code of this file.

Classes

struct  thread_allocated_t
 

Macros

#define SIZE_T_MAX   SIZE_MAX
 
#define offsetof(type, member)   ((size_t)&(((type *)NULL)->member))
 
#define JEMALLOC_NO_DEMANGLE
 
#define UNUSED
 
#define RB_COMPACT
 
#define JEMALLOC_H_TYPES
 
#define restrict
 
#define MALLOCX_LG_ALIGN_MASK   ((int)0x3f)
 
#define ALLOCM_LG_ALIGN_MASK   ((int)0x3f)
 
#define ZU(z)   ((size_t)z)
 
#define QU(q)   ((uint64_t)q)
 
#define __DECONST(type, var)   ((type)(uintptr_t)(const void *)(var))
 
#define JEMALLOC_ENABLE_INLINE
 
#define JEMALLOC_ALWAYS_INLINE   static inline
 
#define JEMALLOC_ALWAYS_INLINE_C   static inline
 
#define JEMALLOC_INLINE   static inline
 
#define JEMALLOC_INLINE_C   static inline
 
#define LG_TINY_MIN   3
 
#define TINY_MIN   (1U << LG_TINY_MIN)
 
#define QUANTUM   ((size_t)(1U << LG_QUANTUM))
 
#define QUANTUM_MASK   (QUANTUM - 1)
 
#define QUANTUM_CEILING(a)   (((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
 
#define LONG   ((size_t)(1U << LG_SIZEOF_LONG))
 
#define LONG_MASK   (LONG - 1)
 
#define LONG_CEILING(a)   (((a) + LONG_MASK) & ~LONG_MASK)
 
#define SIZEOF_PTR   (1U << LG_SIZEOF_PTR)
 
#define PTR_MASK   (SIZEOF_PTR - 1)
 
#define PTR_CEILING(a)   (((a) + PTR_MASK) & ~PTR_MASK)
 
#define LG_CACHELINE   6
 
#define CACHELINE   64
 
#define CACHELINE_MASK   (CACHELINE - 1)
 
#define CACHELINE_CEILING(s)   (((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
 
#define LG_PAGE   STATIC_PAGE_SHIFT
 
#define PAGE   ((size_t)(1U << STATIC_PAGE_SHIFT))
 
#define PAGE_MASK   ((size_t)(PAGE - 1))
 
#define PAGE_CEILING(s)   (((s) + PAGE_MASK) & ~PAGE_MASK)
 
#define ALIGNMENT_ADDR2BASE(a, alignment)   ((void *)((uintptr_t)(a) & (-(alignment))))
 
#define ALIGNMENT_ADDR2OFFSET(a, alignment)   ((size_t)((uintptr_t)(a) & (alignment - 1)))
 
#define ALIGNMENT_CEILING(s, alignment)   (((s) + (alignment - 1)) & (-(alignment)))
 
#define VARIABLE_ARRAY(type, name, count)   type *name = alloca(sizeof(type) * count)
 
#define RUNNING_ON_VALGRIND   ((unsigned)0)
 
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)   do {} while (0)
 
#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)   do {} while (0)
 
#define VALGRIND_FREELIKE_BLOCK(addr, rzB)   do {} while (0)
 
#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len)   do {} while (0)
 
#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len)   do {} while (0)
 
#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len)   do {} while (0)
 
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero)   do {} while (0)
 
#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, old_rzsize, zero)   do {} while (0)
 
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize)   do {} while (0)
 
#define JEMALLOC_H_STRUCTS
 
#define THREAD_ALLOCATED_INITIALIZER   JEMALLOC_ARG_CONCAT({0, 0})
 
#define JEMALLOC_H_EXTERNS
 
#define JEMALLOC_H_INLINES
 
#define JEMALLOC_ARENA_INLINE_A
 
#define JEMALLOC_ARENA_INLINE_B
 

Functions

arena_t * arenas_extend (unsigned ind)
 
void arenas_cleanup (void *arg)
 
arena_t * choose_arena_hard (void)
 
void jemalloc_prefork (void)
 
void jemalloc_postfork_parent (void)
 
void jemalloc_postfork_child (void)
 
 malloc_tsd_externs (arenas, arena_t *) malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE
 
arena_t arenas_cleanup
JEMALLOC_ALWAYS_INLINE size_t 
s2u (size_t size)
 
JEMALLOC_ALWAYS_INLINE size_t sa2u (size_t size, size_t alignment)
 
JEMALLOC_INLINE unsigned narenas_total_get (void)
 
JEMALLOC_INLINE arena_t * choose_arena (arena_t *arena)
 
JEMALLOC_ALWAYS_INLINE void * imalloct (size_t size, bool try_tcache, arena_t *arena)
 
JEMALLOC_ALWAYS_INLINE void * imalloc (size_t size)
 
JEMALLOC_ALWAYS_INLINE void * icalloct (size_t size, bool try_tcache, arena_t *arena)
 
JEMALLOC_ALWAYS_INLINE void * icalloc (size_t size)
 
JEMALLOC_ALWAYS_INLINE void * ipalloct (size_t usize, size_t alignment, bool zero, bool try_tcache, arena_t *arena)
 
JEMALLOC_ALWAYS_INLINE void * ipalloc (size_t usize, size_t alignment, bool zero)
 
JEMALLOC_ALWAYS_INLINE size_t isalloc (const void *ptr, bool demote)
 
JEMALLOC_ALWAYS_INLINE size_t ivsalloc (const void *ptr, bool demote)
 
JEMALLOC_INLINE size_t u2rz (size_t usize)
 
JEMALLOC_INLINE size_t p2rz (const void *ptr)
 
JEMALLOC_ALWAYS_INLINE void idalloct (void *ptr, bool try_tcache)
 
JEMALLOC_ALWAYS_INLINE void idalloc (void *ptr)
 
JEMALLOC_ALWAYS_INLINE void iqalloct (void *ptr, bool try_tcache)
 
JEMALLOC_ALWAYS_INLINE void iqalloc (void *ptr)
 
JEMALLOC_ALWAYS_INLINE void * iralloct_realign (void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
 
JEMALLOC_ALWAYS_INLINE void * iralloct (void *ptr, size_t size, size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
 
JEMALLOC_ALWAYS_INLINE void * iralloc (void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
 
JEMALLOC_ALWAYS_INLINE bool ixalloc (void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
 
 malloc_tsd_externs (thread_allocated, thread_allocated_t) malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE
 

Variables

static const bool config_debug
 
static const bool config_dss
 
static const bool config_fill
 
static const bool config_lazy_lock
 
static const bool config_prof
 
static const bool config_prof_libgcc
 
static const bool config_prof_libunwind
 
static const bool config_mremap
 
static const bool config_munmap
 
static const bool config_stats
 
static const bool config_tcache
 
static const bool config_tls
 
static const bool config_utrace
 
static const bool config_valgrind
 
static const bool config_xmalloc
 
static const bool config_ivsalloc
 
bool opt_abort
 
bool opt_junk
 
size_t opt_quarantine
 
bool opt_redzone
 
bool opt_utrace
 
bool opt_valgrind
 
bool opt_xmalloc
 
bool opt_zero
 
size_t opt_narenas
 
unsigned ncpus
 
malloc_mutex_t arenas_lock
 
arena_t ** arenas
 
unsigned narenas_total
 
unsigned narenas_auto
 
arena_t NULL
 
 thread_allocated
 
 thread_allocated_t
 
 THREAD_ALLOCATED_INITIALIZER
 

Macro Definition Documentation

#define __DECONST (   type,
  var 
)    ((type)(uintptr_t)(const void *)(var))
#define ALIGNMENT_ADDR2BASE (   a,
  alignment 
)    ((void *)((uintptr_t)(a) & (-(alignment))))
#define ALIGNMENT_ADDR2OFFSET (   a,
  alignment 
)    ((size_t)((uintptr_t)(a) & (alignment - 1)))
#define ALIGNMENT_CEILING (   s,
  alignment 
)    (((s) + (alignment - 1)) & (-(alignment)))
#define ALLOCM_LG_ALIGN_MASK   ((int)0x3f)
#define CACHELINE   64
#define CACHELINE_CEILING (   s)    (((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
#define CACHELINE_MASK   (CACHELINE - 1)
#define JEMALLOC_ALWAYS_INLINE   static inline
#define JEMALLOC_ALWAYS_INLINE_C   static inline
#define JEMALLOC_ARENA_INLINE_A
#define JEMALLOC_ARENA_INLINE_B
#define JEMALLOC_ENABLE_INLINE
#define JEMALLOC_H_EXTERNS
#define JEMALLOC_H_INLINES
#define JEMALLOC_H_STRUCTS
#define JEMALLOC_H_TYPES
#define JEMALLOC_INLINE   static inline
#define JEMALLOC_INLINE_C   static inline
#define JEMALLOC_NO_DEMANGLE
#define JEMALLOC_VALGRIND_FREE (   ptr,
  rzsize 
)    do {} while (0)
#define JEMALLOC_VALGRIND_MALLOC (   cond,
  ptr,
  usize,
  zero 
)    do {} while (0)
#define JEMALLOC_VALGRIND_REALLOC (   ptr,
  usize,
  old_ptr,
  old_usize,
  old_rzsize,
  zero 
)    do {} while (0)
#define LG_CACHELINE   6
#define LG_PAGE   STATIC_PAGE_SHIFT
#define LG_TINY_MIN   3
#define LONG   ((size_t)(1U << LG_SIZEOF_LONG))
#define LONG_CEILING (   a)    (((a) + LONG_MASK) & ~LONG_MASK)
#define LONG_MASK   (LONG - 1)
#define MALLOCX_LG_ALIGN_MASK   ((int)0x3f)
#define offsetof (   type,
  member 
)    ((size_t)&(((type *)NULL)->member))
#define PAGE   ((size_t)(1U << STATIC_PAGE_SHIFT))
#define PAGE_CEILING (   s)    (((s) + PAGE_MASK) & ~PAGE_MASK)
#define PAGE_MASK   ((size_t)(PAGE - 1))
#define PTR_CEILING (   a)    (((a) + PTR_MASK) & ~PTR_MASK)
#define PTR_MASK   (SIZEOF_PTR - 1)
#define QU (   q)    ((uint64_t)q)
#define QUANTUM   ((size_t)(1U << LG_QUANTUM))
#define QUANTUM_CEILING (   a)    (((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
#define QUANTUM_MASK   (QUANTUM - 1)
#define RB_COMPACT
#define restrict
#define RUNNING_ON_VALGRIND   ((unsigned)0)
#define SIZE_T_MAX   SIZE_MAX
#define SIZEOF_PTR   (1U << LG_SIZEOF_PTR)
#define THREAD_ALLOCATED_INITIALIZER   JEMALLOC_ARG_CONCAT({0, 0})
#define TINY_MIN   (1U << LG_TINY_MIN)
#define UNUSED
#define VALGRIND_FREELIKE_BLOCK (   addr,
  rzB 
)    do {} while (0)
#define VALGRIND_MAKE_MEM_DEFINED (   _qzz_addr,
  _qzz_len 
)    do {} while (0)
#define VALGRIND_MAKE_MEM_NOACCESS (   _qzz_addr,
  _qzz_len 
)    do {} while (0)
#define VALGRIND_MAKE_MEM_UNDEFINED (   _qzz_addr,
  _qzz_len 
)    do {} while (0)
#define VALGRIND_MALLOCLIKE_BLOCK (   addr,
  sizeB,
  rzB,
  is_zeroed 
)    do {} while (0)
#define VALGRIND_RESIZEINPLACE_BLOCK (   addr,
  oldSizeB,
  newSizeB,
  rzB 
)    do {} while (0)
#define VARIABLE_ARRAY (   type,
  name,
  count 
)    type *name = alloca(sizeof(type) * count)
#define ZU (   z)    ((size_t)z)

Function Documentation

void arenas_cleanup ( void *  arg)
arena_t* arenas_extend ( unsigned  ind)
JEMALLOC_INLINE arena_t* choose_arena ( arena_t *  arena)
735 {
736  arena_t *ret;
737 
738  if (arena != NULL)
739  return (arena);
740 
741  if ((ret = *arenas_tsd_get()) == NULL) {
742  ret = choose_arena_hard();
743  assert(ret != NULL);
744  }
745 
746  return (ret);
747 }
#define arenas_tsd_get
Definition: private_namespace.h:73
arena_t NULL
Definition: jemalloc_internal.h:624
arena_t * choose_arena_hard(void)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

arena_t* choose_arena_hard ( void  )

+ Here is the caller graph for this function:

JEMALLOC_ALWAYS_INLINE void* icalloc ( size_t  size)
826 {
827 
828  return (icalloct(size, true, NULL));
829 }
arena_t NULL
Definition: jemalloc_internal.h:624
size_t size
Definition: stdsoap2.h:1684
JEMALLOC_ALWAYS_INLINE void * icalloct(size_t size, bool try_tcache, arena_t *arena)
Definition: jemalloc_internal.h:815

+ Here is the call graph for this function:

JEMALLOC_ALWAYS_INLINE void* icalloct ( size_t  size,
bool  try_tcache,
arena_t *  arena 
)
816 {
817 
818  if (size <= arena_maxclass)
819  return (arena_malloc(arena, size, true, try_tcache));
820  else
821  return (huge_malloc(size, true, huge_dss_prec_get(arena)));
822 }
#define huge_dss_prec_get
Definition: private_namespace.h:200
#define arena_maxclass
Definition: private_namespace.h:40
#define arena_malloc
Definition: private_namespace.h:18
size_t size
Definition: stdsoap2.h:1684
#define huge_malloc
Definition: private_namespace.h:201

+ Here is the caller graph for this function:

JEMALLOC_ALWAYS_INLINE void idalloc ( void *  ptr)
936 {
937 
938  idalloct(ptr, true);
939 }
JEMALLOC_ALWAYS_INLINE void idalloct(void *ptr, bool try_tcache)
Definition: jemalloc_internal.h:921

+ Here is the call graph for this function:

JEMALLOC_ALWAYS_INLINE void idalloct ( void *  ptr,
bool  try_tcache 
)
922 {
923  arena_chunk_t *chunk;
924 
925  assert(ptr != NULL);
926 
927  chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
928  if (chunk != ptr)
929  arena_dalloc(chunk->arena, chunk, ptr, try_tcache);
930  else
931  huge_dalloc(ptr, true);
932 }
#define arena_dalloc
Definition: private_namespace.h:8
#define huge_dalloc
Definition: private_namespace.h:198
arena_t NULL
Definition: jemalloc_internal.h:624
Definition: adtfile.h:57

+ Here is the caller graph for this function:

JEMALLOC_ALWAYS_INLINE void* imalloc ( size_t  size)
809 {
810 
811  return (imalloct(size, true, NULL));
812 }
JEMALLOC_ALWAYS_INLINE void * imalloct(size_t size, bool try_tcache, arena_t *arena)
Definition: jemalloc_internal.h:796
arena_t NULL
Definition: jemalloc_internal.h:624
size_t size
Definition: stdsoap2.h:1684

+ Here is the call graph for this function:

JEMALLOC_ALWAYS_INLINE void* imalloct ( size_t  size,
bool  try_tcache,
arena_t *  arena 
)
797 {
798 
799  assert(size != 0);
800 
801  if (size <= arena_maxclass)
802  return (arena_malloc(arena, size, false, try_tcache));
803  else
804  return (huge_malloc(size, false, huge_dss_prec_get(arena)));
805 }
#define huge_dss_prec_get
Definition: private_namespace.h:200
#define arena_maxclass
Definition: private_namespace.h:40
#define arena_malloc
Definition: private_namespace.h:18
size_t size
Definition: stdsoap2.h:1684
#define huge_malloc
Definition: private_namespace.h:201

+ Here is the caller graph for this function:

JEMALLOC_ALWAYS_INLINE void* ipalloc ( size_t  usize,
size_t  alignment,
bool  zero 
)
858 {
859 
860  return (ipalloct(usize, alignment, zero, true, NULL));
861 }
JEMALLOC_ALWAYS_INLINE void * ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache, arena_t *arena)
Definition: jemalloc_internal.h:832
arena_t NULL
Definition: jemalloc_internal.h:624
static unorm16 zero()
Definition: unorm16.h:82

+ Here is the call graph for this function:

JEMALLOC_ALWAYS_INLINE void* ipalloct ( size_t  usize,
size_t  alignment,
bool  zero,
bool  try_tcache,
arena_t *  arena 
)
834 {
835  void *ret;
836 
837  assert(usize != 0);
838  assert(usize == sa2u(usize, alignment));
839 
840  if (usize <= arena_maxclass && alignment <= PAGE)
841  ret = arena_malloc(arena, usize, zero, try_tcache);
842  else {
843  if (usize <= arena_maxclass) {
844  ret = arena_palloc(choose_arena(arena), usize,
845  alignment, zero);
846  } else if (alignment <= chunksize)
847  ret = huge_malloc(usize, zero, huge_dss_prec_get(arena));
848  else
849  ret = huge_palloc(usize, alignment, zero, huge_dss_prec_get(arena));
850  }
851 
852  assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
853  return (ret);
854 }
#define huge_dss_prec_get
Definition: private_namespace.h:200
#define ALIGNMENT_ADDR2BASE(a, alignment)
Definition: jemalloc_internal.h:368
#define chunksize
Definition: private_namespace.h:126
#define arena_maxclass
Definition: private_namespace.h:40
JEMALLOC_INLINE arena_t * choose_arena(arena_t *arena)
Definition: jemalloc_internal.h:734
JEMALLOC_ALWAYS_INLINE size_t sa2u(size_t size, size_t alignment)
Definition: jemalloc_internal.h:647
#define arena_malloc
Definition: private_namespace.h:18
#define huge_palloc
Definition: private_namespace.h:205
#define huge_malloc
Definition: private_namespace.h:201
#define arena_palloc
Definition: private_namespace.h:42
static unorm16 zero()
Definition: unorm16.h:82
#define PAGE
Definition: jemalloc_internal.h:360

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

JEMALLOC_ALWAYS_INLINE void iqalloc ( void *  ptr)
953 {
954 
955  iqalloct(ptr, true);
956 }
JEMALLOC_ALWAYS_INLINE void iqalloct(void *ptr, bool try_tcache)
Definition: jemalloc_internal.h:942

+ Here is the call graph for this function:

JEMALLOC_ALWAYS_INLINE void iqalloct ( void *  ptr,
bool  try_tcache 
)
943 {
944 
946  quarantine(ptr);
947  else
948  idalloct(ptr, try_tcache);
949 }
JEMALLOC_ALWAYS_INLINE void idalloct(void *ptr, bool try_tcache)
Definition: jemalloc_internal.h:921
static const bool config_fill
Definition: jemalloc_internal.h:92
size_t opt_quarantine
#define quarantine
Definition: private_namespace.h:323

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

JEMALLOC_ALWAYS_INLINE void* iralloc ( void *  ptr,
size_t  size,
size_t  extra,
size_t  alignment,
bool  zero 
)
1024 {
1025 
1026  return (iralloct(ptr, size, extra, alignment, zero, true, true, NULL));
1027 }
arena_t NULL
Definition: jemalloc_internal.h:624
JEMALLOC_ALWAYS_INLINE void * iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
Definition: jemalloc_internal.h:992
size_t size
Definition: stdsoap2.h:1684
static unorm16 zero()
Definition: unorm16.h:82

+ Here is the call graph for this function:

JEMALLOC_ALWAYS_INLINE void* iralloct ( void *  ptr,
size_t  size,
size_t  extra,
size_t  alignment,
bool  zero,
bool  try_tcache_alloc,
bool  try_tcache_dalloc,
arena_t *  arena 
)
994 {
995  size_t oldsize;
996 
997  assert(ptr != NULL);
998  assert(size != 0);
999 
1000  oldsize = isalloc(ptr, config_prof);
1001 
1002  if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
1003  != 0) {
1004  /*
1005  * Existing object alignment is inadequate; allocate new space
1006  * and copy.
1007  */
1008  return (iralloct_realign(ptr, oldsize, size, extra, alignment,
1009  zero, try_tcache_alloc, try_tcache_dalloc, arena));
1010  }
1011 
1012  if (size + extra <= arena_maxclass) {
1013  return (arena_ralloc(arena, ptr, oldsize, size, extra,
1014  alignment, zero, try_tcache_alloc,
1015  try_tcache_dalloc));
1016  } else {
1017  return (huge_ralloc(ptr, oldsize, size, extra,
1018  alignment, zero, try_tcache_dalloc, huge_dss_prec_get(arena)));
1019  }
1020 }
JEMALLOC_ALWAYS_INLINE void * iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
Definition: jemalloc_internal.h:959
#define huge_dss_prec_get
Definition: private_namespace.h:200
JEMALLOC_ALWAYS_INLINE size_t isalloc(const void *ptr, bool demote)
Definition: jemalloc_internal.h:869
arena_t NULL
Definition: jemalloc_internal.h:624
#define arena_maxclass
Definition: private_namespace.h:40
_W64 unsigned int uintptr_t
Definition: stdint.h:119
#define huge_ralloc
Definition: private_namespace.h:211
size_t size
Definition: stdsoap2.h:1684
#define arena_ralloc
Definition: private_namespace.h:55
static const bool config_prof
Definition: jemalloc_internal.h:106
static unorm16 zero()
Definition: unorm16.h:82

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

JEMALLOC_ALWAYS_INLINE void* iralloct_realign ( void *  ptr,
size_t  oldsize,
size_t  size,
size_t  extra,
size_t  alignment,
bool  zero,
bool  try_tcache_alloc,
bool  try_tcache_dalloc,
arena_t *  arena 
)
962 {
963  void *p;
964  size_t usize, copysize;
965 
966  usize = sa2u(size + extra, alignment);
967  if (usize == 0)
968  return (NULL);
969  p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
970  if (p == NULL) {
971  if (extra == 0)
972  return (NULL);
973  /* Try again, without extra this time. */
974  usize = sa2u(size, alignment);
975  if (usize == 0)
976  return (NULL);
977  p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
978  if (p == NULL)
979  return (NULL);
980  }
981  /*
982  * Copy at most size bytes (not size+extra), since the caller has no
983  * expectation that the extra bytes will be reliably preserved.
984  */
985  copysize = (size < oldsize) ? size : oldsize;
986  memcpy(p, ptr, copysize);
987  iqalloct(ptr, try_tcache_dalloc);
988  return (p);
989 }
JEMALLOC_ALWAYS_INLINE void iqalloct(void *ptr, bool try_tcache)
Definition: jemalloc_internal.h:942
JEMALLOC_ALWAYS_INLINE void * ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache, arena_t *arena)
Definition: jemalloc_internal.h:832
arena_t NULL
Definition: jemalloc_internal.h:624
JEMALLOC_ALWAYS_INLINE size_t sa2u(size_t size, size_t alignment)
Definition: jemalloc_internal.h:647
size_t size
Definition: stdsoap2.h:1684
static unorm16 zero()
Definition: unorm16.h:82

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

JEMALLOC_ALWAYS_INLINE size_t isalloc ( const void *  ptr,
bool  demote 
)
870 {
871  size_t ret;
872  arena_chunk_t *chunk;
873 
874  assert(ptr != NULL);
875  /* Demotion only makes sense if config_prof is true. */
876  assert(config_prof || demote == false);
877 
878  chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
879  if (chunk != ptr)
880  ret = arena_salloc(ptr, demote);
881  else
882  ret = huge_salloc(ptr);
883 
884  return (ret);
885 }
arena_t NULL
Definition: jemalloc_internal.h:624
Definition: adtfile.h:57
#define arena_salloc
Definition: private_namespace.h:60
static const bool config_prof
Definition: jemalloc_internal.h:106
#define huge_salloc
Definition: private_namespace.h:213

+ Here is the caller graph for this function:

JEMALLOC_ALWAYS_INLINE size_t ivsalloc ( const void *  ptr,
bool  demote 
)
889 {
890 
891  /* Return 0 if ptr is not within a chunk managed by jemalloc. */
892  if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0)
893  return (0);
894 
895  return (isalloc(ptr, demote));
896 }
JEMALLOC_ALWAYS_INLINE size_t isalloc(const void *ptr, bool demote)
Definition: jemalloc_internal.h:869
#define rtree_get
Definition: private_namespace.h:339
_W64 unsigned int uintptr_t
Definition: stdint.h:119
#define chunks_rtree
Definition: private_namespace.h:125

+ Here is the call graph for this function:

JEMALLOC_ALWAYS_INLINE bool ixalloc ( void *  ptr,
size_t  size,
size_t  extra,
size_t  alignment,
bool  zero 
)
1031 {
1032  size_t oldsize;
1033 
1034  assert(ptr != NULL);
1035  assert(size != 0);
1036 
1037  oldsize = isalloc(ptr, config_prof);
1038  if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
1039  != 0) {
1040  /* Existing object alignment is inadequate. */
1041  return (true);
1042  }
1043 
1044  if (size <= arena_maxclass)
1045  return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
1046  else
1047  return (huge_ralloc_no_move(ptr, oldsize, size, extra));
1048 }
#define huge_ralloc_no_move
Definition: private_namespace.h:212
#define arena_ralloc_no_move
Definition: private_namespace.h:57
JEMALLOC_ALWAYS_INLINE size_t isalloc(const void *ptr, bool demote)
Definition: jemalloc_internal.h:869
arena_t NULL
Definition: jemalloc_internal.h:624
#define arena_maxclass
Definition: private_namespace.h:40
_W64 unsigned int uintptr_t
Definition: stdint.h:119
size_t size
Definition: stdsoap2.h:1684
static const bool config_prof
Definition: jemalloc_internal.h:106
static unorm16 zero()
Definition: unorm16.h:82

+ Here is the call graph for this function:

void jemalloc_postfork_child ( void  )
void jemalloc_postfork_parent ( void  )
void jemalloc_prefork ( void  )
malloc_tsd_externs ( arenas  ,
arena_t *   
)
malloc_tsd_externs ( thread_allocated  ,
thread_allocated_t   
)
JEMALLOC_INLINE unsigned narenas_total_get ( void  )
722 {
723  unsigned narenas;
724 
726  narenas = narenas_total;
728 
729  return (narenas);
730 }
#define malloc_mutex_lock
Definition: private_namespace.h:237
malloc_mutex_t arenas_lock
unsigned narenas_total
#define malloc_mutex_unlock
Definition: private_namespace.h:241
JEMALLOC_INLINE size_t p2rz ( const void *  ptr)
914 {
915  size_t usize = isalloc(ptr, false);
916 
917  return (u2rz(usize));
918 }
JEMALLOC_ALWAYS_INLINE size_t isalloc(const void *ptr, bool demote)
Definition: jemalloc_internal.h:869
JEMALLOC_INLINE size_t u2rz(size_t usize)
Definition: jemalloc_internal.h:899

+ Here is the call graph for this function:

arena_t arenas_cleanup JEMALLOC_ALWAYS_INLINE size_t s2u ( size_t  size)
633 {
634 
635  if (size <= SMALL_MAXCLASS)
636  return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
637  if (size <= arena_maxclass)
638  return (PAGE_CEILING(size));
639  return (CHUNK_CEILING(size));
640 }
#define PAGE_CEILING(s)
Definition: jemalloc_internal.h:364
#define arena_maxclass
Definition: private_namespace.h:40
#define arena_bin_info
Definition: private_namespace.h:6
size_t size
Definition: stdsoap2.h:1684
JEMALLOC_ALWAYS_INLINE size_t sa2u ( size_t  size,
size_t  alignment 
)
648 {
649  size_t usize;
650 
651  assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
652 
653  /*
654  * Round size up to the nearest multiple of alignment.
655  *
656  * This done, we can take advantage of the fact that for each small
657  * size class, every object is aligned at the smallest power of two
658  * that is non-zero in the base two representation of the size. For
659  * example:
660  *
661  * Size | Base 2 | Minimum alignment
662  * -----+----------+------------------
663  * 96 | 1100000 | 32
664  * 144 | 10100000 | 32
665  * 192 | 11000000 | 64
666  */
667  usize = ALIGNMENT_CEILING(size, alignment);
668  /*
669  * (usize < size) protects against the combination of maximal
670  * alignment and size greater than maximal alignment.
671  */
672  if (usize < size) {
673  /* size_t overflow. */
674  return (0);
675  }
676 
677  if (usize <= arena_maxclass && alignment <= PAGE) {
678  if (usize <= SMALL_MAXCLASS)
679  return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
680  return (PAGE_CEILING(usize));
681  } else {
682  size_t run_size;
683 
684  /*
685  * We can't achieve subpage alignment, so round up alignment
686  * permanently; it makes later calculations simpler.
687  */
688  alignment = PAGE_CEILING(alignment);
689  usize = PAGE_CEILING(size);
690  /*
691  * (usize < size) protects against very large sizes within
692  * PAGE of SIZE_T_MAX.
693  *
694  * (usize + alignment < usize) protects against the
695  * combination of maximal alignment and usize large enough
696  * to cause overflow. This is similar to the first overflow
697  * check above, but it needs to be repeated due to the new
698  * usize value, which may now be *equal* to maximal
699  * alignment, whereas before we only detected overflow if the
700  * original size was *greater* than maximal alignment.
701  */
702  if (usize < size || usize + alignment < usize) {
703  /* size_t overflow. */
704  return (0);
705  }
706 
707  /*
708  * Calculate the size of the over-size run that arena_palloc()
709  * would need to allocate in order to guarantee the alignment.
710  * If the run wouldn't fit within a chunk, round up to a huge
711  * allocation size.
712  */
713  run_size = usize + alignment - PAGE;
714  if (run_size <= arena_maxclass)
715  return (PAGE_CEILING(usize));
716  return (CHUNK_CEILING(usize));
717  }
718 }
#define PAGE_CEILING(s)
Definition: jemalloc_internal.h:364
#define arena_maxclass
Definition: private_namespace.h:40
#define arena_bin_info
Definition: private_namespace.h:6
size_t size
Definition: stdsoap2.h:1684
#define PAGE
Definition: jemalloc_internal.h:360
#define ALIGNMENT_CEILING(s, alignment)
Definition: jemalloc_internal.h:376

+ Here is the caller graph for this function:

JEMALLOC_INLINE size_t u2rz ( size_t  usize)
900 {
901  size_t ret;
902 
903  if (usize <= SMALL_MAXCLASS) {
904  size_t binind = SMALL_SIZE2BIN(usize);
905  ret = arena_bin_info[binind].redzone_size;
906  } else
907  ret = 0;
908 
909  return (ret);
910 }
#define arena_bin_info
Definition: private_namespace.h:6

+ Here is the caller graph for this function:

Variable Documentation

arenas
malloc_mutex_t arenas_lock
const bool config_debug
static
Initial value:
=
false
const bool config_dss
static
Initial value:
=
false
const bool config_fill
static
Initial value:
=
false
const bool config_ivsalloc
static
Initial value:
=
false
const bool config_lazy_lock
static
Initial value:
=
false
const bool config_mremap
static
Initial value:
=
false
const bool config_munmap
static
Initial value:
=
false
const bool config_prof
static
Initial value:
=
false
const bool config_prof_libgcc
static
Initial value:
=
false
const bool config_prof_libunwind
static
Initial value:
=
false
const bool config_stats
static
Initial value:
=
false
const bool config_tcache
static
Initial value:
=
false
const bool config_tls
static
Initial value:
=
false
const bool config_utrace
static
Initial value:
=
false
const bool config_valgrind
static
Initial value:
=
false
const bool config_xmalloc
static
Initial value:
=
false
unsigned narenas_auto
unsigned narenas_total
unsigned ncpus
arena_t NULL
bool opt_abort
bool opt_junk
size_t opt_narenas
size_t opt_quarantine
bool opt_redzone
bool opt_utrace
bool opt_valgrind
bool opt_xmalloc
bool opt_zero
thread_allocated
THREAD_ALLOCATED_INITIALIZER