|
#define | DEBUG 0 |
|
#define | STATS 0 |
|
#define | FORCED_DEBUG 0 |
|
#define | BYTES_PER_WORD sizeof(void *) |
|
#define | REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long)) |
|
#define | ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN |
|
#define | CREATE_MASK |
|
#define | BUFCTL_END (((kmem_bufctl_t)(~0U))-0) |
|
#define | BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1) |
|
#define | BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) |
|
#define | SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) |
|
#define | SLAB_OBJ_PFMEMALLOC 1 |
|
#define | BOOT_CPUCACHE_ENTRIES 1 |
|
#define | NUM_INIT_LISTS (3 * MAX_NUMNODES) |
|
#define | CACHE_CACHE 0 |
|
#define | SIZE_AC MAX_NUMNODES |
|
#define | SIZE_L3 (2 * MAX_NUMNODES) |
|
#define | CACHE(x) |
|
#define | INDEX_AC index_of(sizeof(struct arraycache_init)) |
|
#define | INDEX_L3 index_of(sizeof(struct kmem_list3)) |
|
#define | MAKE_LIST(cachep, listp, slab, nodeid) |
|
#define | MAKE_ALL_LISTS(cachep, ptr, nodeid) |
|
#define | CFLGS_OFF_SLAB (0x80000000UL) |
|
#define | OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) |
|
#define | BATCHREFILL_LIMIT 16 |
|
#define | REAPTIMEOUT_CPUC (2*HZ) |
|
#define | REAPTIMEOUT_LIST3 (4*HZ) |
|
#define | STATS_INC_ACTIVE(x) do { } while (0) |
|
#define | STATS_DEC_ACTIVE(x) do { } while (0) |
|
#define | STATS_INC_ALLOCED(x) do { } while (0) |
|
#define | STATS_INC_GROWN(x) do { } while (0) |
|
#define | STATS_ADD_REAPED(x, y) do { (void)(y); } while (0) |
|
#define | STATS_SET_HIGH(x) do { } while (0) |
|
#define | STATS_INC_ERR(x) do { } while (0) |
|
#define | STATS_INC_NODEALLOCS(x) do { } while (0) |
|
#define | STATS_INC_NODEFREES(x) do { } while (0) |
|
#define | STATS_INC_ACOVERFLOW(x) do { } while (0) |
|
#define | STATS_SET_FREEABLE(x, i) do { } while (0) |
|
#define | STATS_INC_ALLOCHIT(x) do { } while (0) |
|
#define | STATS_INC_ALLOCMISS(x) do { } while (0) |
|
#define | STATS_INC_FREEHIT(x) do { } while (0) |
|
#define | STATS_INC_FREEMISS(x) do { } while (0) |
|
#define | obj_offset(x) 0 |
|
#define | dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) |
|
#define | dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) |
|
#define | dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) |
|
#define | SLAB_MAX_ORDER_HI 1 |
|
#define | SLAB_MAX_ORDER_LO 0 |
|
#define | CACHE(x) { .cs_size = (x) }, |
|
#define | CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, |
|
#define | BAD_ALIEN_MAGIC 0x01020304ul |
|
#define | init_reap_node(cpu) do { } while (0) |
|
#define | next_reap_node(void) do { } while (0) |
|
#define | drain_alien_cache(cachep, alien) do { } while (0) |
|
#define | reap_alien(cachep, l3) do { } while (0) |
|
|
__kmem_cache_create - Create a cache.
: The size of objects to be created in this cache. : The required alignment for the objects. : SLAB flags : A constructor for the objects.
Returns a ptr to the cache on success, NULL on failure. Cannot be called within a int, but can be interrupted. The is run when new pages are allocated by the cache.
The flags are
SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) to catch references to uninitialised memory.
SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check for buffer overruns.
SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware cacheline. This can be beneficial if you're counting cycles as closely as davem.
|
#define | check_irq_off() do { } while(0) |
|
#define | check_irq_on() do { } while(0) |
|
#define | check_spinlock_acquired(x) do { } while(0) |
|
#define | check_spinlock_acquired_node(x, y) do { } while(0) |
|
#define | kfree_debugcheck(x) do { } while(0) |
|
#define | cache_free_debugcheck(x, objp, z) (objp) |
|
#define | check_slabp(x, y) do { } while(0) |
|
#define | cache_alloc_debugcheck_after(a, b, objp, d) (objp) |
|
int | __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) |
|
int | kmem_cache_shrink (struct kmem_cache *cachep) |
|
| EXPORT_SYMBOL (kmem_cache_shrink) |
|
int | __kmem_cache_shutdown (struct kmem_cache *cachep) |
|
void * | kmem_cache_alloc (struct kmem_cache *cachep, gfp_t flags) |
|
| EXPORT_SYMBOL (kmem_cache_alloc) |
|
void * | __kmalloc (size_t size, gfp_t flags) |
|
| EXPORT_SYMBOL (__kmalloc) |
|
void | kmem_cache_free (struct kmem_cache *cachep, void *objp) |
|
| EXPORT_SYMBOL (kmem_cache_free) |
|
void | kfree (const void *objp) |
|
| EXPORT_SYMBOL (kfree) |
|
unsigned int | kmem_cache_size (struct kmem_cache *cachep) |
|
| EXPORT_SYMBOL (kmem_cache_size) |
|
size_t | ksize (const void *objp) |
|
| EXPORT_SYMBOL (ksize) |
|