Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
slab_def.h
Go to the documentation of this file.
1 #ifndef _LINUX_SLAB_DEF_H
2 #define _LINUX_SLAB_DEF_H
3 
4 /*
5  * Definitions unique to the original Linux SLAB allocator.
6  *
7  * What we provide here is a way to optimize the frequent kmalloc
8  * calls in the kernel by selecting the appropriate general cache
9  * if kmalloc was called with a size that can be established at
10  * compile time.
11  */
12 
13 #include <linux/init.h>
14 #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15 #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16 #include <linux/compiler.h>
17 
18 /*
19  * struct kmem_cache
20  *
21  * manages a cache.
22  */
23 
24 struct kmem_cache {
25 /* 1) Cache tunables. Protected by cache_chain_mutex */
26  unsigned int batchcount;
27  unsigned int limit;
28  unsigned int shared;
29 
30  unsigned int size;
32 /* 2) touched by every alloc & free from the backend */
33 
34  unsigned int flags; /* constant flags */
35  unsigned int num; /* # of objs per slab */
36 
37 /* 3) cache_grow/shrink */
38  /* order of pgs per slab (2^n) */
39  unsigned int gfporder;
40 
41  /* force GFP flags, e.g. GFP_DMA */
43 
44  size_t colour; /* cache colouring range */
45  unsigned int colour_off; /* colour offset */
47  unsigned int slab_size;
48 
49  /* constructor func */
50  void (*ctor)(void *obj);
51 
52 /* 4) cache creation/removal */
53  const char *name;
54  struct list_head list;
55  int refcount;
57  int align;
58 
59 /* 5) statistics */
60 #ifdef CONFIG_DEBUG_SLAB
61  unsigned long num_active;
62  unsigned long num_allocations;
63  unsigned long high_mark;
64  unsigned long grown;
65  unsigned long reaped;
66  unsigned long errors;
67  unsigned long max_freeable;
68  unsigned long node_allocs;
69  unsigned long node_frees;
70  unsigned long node_overflow;
71  atomic_t allochit;
72  atomic_t allocmiss;
73  atomic_t freehit;
74  atomic_t freemiss;
75 
76  /*
77  * If debugging is enabled, then the allocator can add additional
78  * fields and/or padding to every object. size contains the total
79  * object size including these internal fields, the following two
80  * variables contain the offset to the user object and its size.
81  */
82  int obj_offset;
83 #endif /* CONFIG_DEBUG_SLAB */
84 
85 /* 6) per-cpu/per-node data, touched during every alloc/free */
86  /*
87  * We put array[] at the end of kmem_cache, because we want to size
88  * this array to nr_cpu_ids slots instead of NR_CPUS
89  * (see kmem_cache_init())
90  * We still use [NR_CPUS] and not [1] or [0] because cache_cache
91  * is statically defined, so we reserve the max number of cpus.
92  */
95  /*
96  * Do not add fields after array[]
97  */
98 };
99 
100 /* Size description struct for general caches. */
101 struct cache_sizes {
102  size_t cs_size;
104 #ifdef CONFIG_ZONE_DMA
105  struct kmem_cache *cs_dmacachep;
106 #endif
107 };
108 extern struct cache_sizes malloc_sizes[];
109 
110 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
111 void *__kmalloc(size_t size, gfp_t flags);
112 
113 #ifdef CONFIG_TRACING
114 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
115 #else
116 static __always_inline void *
117 kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
118 {
119  return kmem_cache_alloc(cachep, flags);
120 }
121 #endif
122 
123 static __always_inline void *kmalloc(size_t size, gfp_t flags)
124 {
125  struct kmem_cache *cachep;
126  void *ret;
127 
128  if (__builtin_constant_p(size)) {
129  int i = 0;
130 
131  if (!size)
132  return ZERO_SIZE_PTR;
133 
134 #define CACHE(x) \
135  if (size <= x) \
136  goto found; \
137  else \
138  i++;
139 #include <linux/kmalloc_sizes.h>
140 #undef CACHE
141  return NULL;
142 found:
143 #ifdef CONFIG_ZONE_DMA
144  if (flags & GFP_DMA)
145  cachep = malloc_sizes[i].cs_dmacachep;
146  else
147 #endif
148  cachep = malloc_sizes[i].cs_cachep;
149 
150  ret = kmem_cache_alloc_trace(cachep, flags, size);
151 
152  return ret;
153  }
154  return __kmalloc(size, flags);
155 }
156 
157 #ifdef CONFIG_NUMA
158 extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
159 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
160 
161 #ifdef CONFIG_TRACING
162 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
163  gfp_t flags,
164  int nodeid,
165  size_t size);
166 #else
167 static __always_inline void *
168 kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
169  gfp_t flags,
170  int nodeid,
171  size_t size)
172 {
173  return kmem_cache_alloc_node(cachep, flags, nodeid);
174 }
175 #endif
176 
177 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
178 {
179  struct kmem_cache *cachep;
180 
181  if (__builtin_constant_p(size)) {
182  int i = 0;
183 
184  if (!size)
185  return ZERO_SIZE_PTR;
186 
187 #define CACHE(x) \
188  if (size <= x) \
189  goto found; \
190  else \
191  i++;
192 #include <linux/kmalloc_sizes.h>
193 #undef CACHE
194  return NULL;
195 found:
196 #ifdef CONFIG_ZONE_DMA
197  if (flags & GFP_DMA)
198  cachep = malloc_sizes[i].cs_dmacachep;
199  else
200 #endif
201  cachep = malloc_sizes[i].cs_cachep;
202 
203  return kmem_cache_alloc_node_trace(cachep, flags, node, size);
204  }
205  return __kmalloc_node(size, flags, node);
206 }
207 
208 #endif /* CONFIG_NUMA */
209 
210 #endif /* _LINUX_SLAB_DEF_H */