60 #include <linux/kernel.h>
61 #include <linux/slab.h>
68 #include <linux/export.h>
70 #include <linux/list.h>
85 #if PAGE_SIZE <= (32767 * 2)
99 #define SLOB_BREAK1 256
100 #define SLOB_BREAK2 1024
108 static inline int slob_page_free(
struct page *
sp)
110 return PageSlobFree(sp);
115 list_add(&sp->
list, list);
116 __SetPageSlobFree(sp);
119 static inline void clear_slob_page_free(
struct page *
sp)
122 __ClearPageSlobFree(sp);
125 #define SLOB_UNIT sizeof(slob_t)
126 #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
127 #define SLOB_ALIGN L1_CACHE_BYTES
187 static int slob_last(
slob_t *s)
198 page = alloc_pages_exact_node(node, gfp, order);
209 static void slob_free_pages(
void *
b,
int order)
219 static void *slob_page_alloc(
struct page *
sp,
size_t size,
int align)
224 for (prev =
NULL, cur = sp->
freelist; ; prev = cur, cur = slob_next(cur)) {
229 delta = aligned -
cur;
231 if (avail >= units + delta) {
235 next = slob_next(cur);
236 set_slob(aligned, avail - delta, next);
237 set_slob(cur, delta, aligned);
240 avail = slob_units(cur);
243 next = slob_next(cur);
244 if (avail == units) {
246 set_slob(prev, slob_units(prev), next);
251 set_slob(prev, slob_units(prev), cur + units);
254 set_slob(cur + units, avail - units, next);
259 clear_slob_page_free(sp);
270 static void *slob_alloc(
size_t size,
gfp_t gfp,
int align,
int node)
279 slob_list = &free_slob_small;
281 slob_list = &free_slob_medium;
283 slob_list = &free_slob_large;
301 prev = sp->
list.prev;
302 b = slob_page_alloc(sp, size, align);
309 if (prev != slob_list->
prev &&
311 list_move_tail(slob_list, prev->
next);
314 spin_unlock_irqrestore(&slob_lock, flags);
318 b = slob_new_pages(gfp & ~
__GFP_ZERO, 0, node);
327 INIT_LIST_HEAD(&sp->
list);
329 set_slob_page_free(sp, slob_list);
330 b = slob_page_alloc(sp, size, align);
332 spin_unlock_irqrestore(&slob_lock, flags);
342 static void slob_free(
void *
block,
int size)
361 if (slob_page_free(sp))
362 clear_slob_page_free(sp);
363 spin_unlock_irqrestore(&slob_lock, flags);
365 reset_page_mapcount(sp);
366 slob_free_pages(b, 0);
370 if (!slob_page_free(sp)) {
375 (
void *)((
unsigned long)(b +
378 slob_list = &free_slob_small;
380 slob_list = &free_slob_medium;
382 slob_list = &free_slob_large;
383 set_slob_page_free(sp, slob_list);
402 next = slob_next(prev);
405 next = slob_next(prev);
408 if (!slob_last(prev) && b + units == next) {
409 units += slob_units(next);
410 set_slob(b, units, slob_next(next));
412 set_slob(b, units, next);
414 if (prev + slob_units(prev) == b) {
415 units = slob_units(b) + slob_units(prev);
416 set_slob(prev, units, slob_next(b));
418 set_slob(prev, slob_units(prev), b);
421 spin_unlock_irqrestore(&slob_lock, flags);
429 __do_kmalloc_node(
size_t size,
gfp_t gfp,
int node,
unsigned long caller)
443 m = slob_alloc(size + align, gfp, align, node);
448 ret = (
void *)m + align;
450 trace_kmalloc_node(caller, ret,
451 size, size + align, gfp, node);
457 ret = slob_new_pages(gfp, order, node);
464 trace_kmalloc_node(caller, ret,
474 return __do_kmalloc_node(size, gfp, node,
_RET_IP_);
478 #ifdef CONFIG_TRACING
481 return __do_kmalloc_node(size, gfp,
NUMA_NO_NODE, caller);
485 void *__kmalloc_node_track_caller(
size_t size,
gfp_t gfp,
486 int node,
unsigned long caller)
488 return __do_kmalloc_node(size, gfp, node, caller);
506 unsigned int *m = (
unsigned int *)(block - align);
507 slob_free(m, *m + align);
525 unsigned int *m = (
unsigned int *)(block - align);
534 size_t align = c->
size;
545 if (c->
align < align)
560 b = slob_alloc(c->
size, flags, c->
align, node);
574 kmemleak_alloc_recursive(b, c->
size, 1, c->
flags, flags);
579 static void __kmem_cache_free(
void *b,
int size)
590 void *b = (
void *)slob_rcu - (slob_rcu->
size -
sizeof(
struct slob_rcu));
592 __kmem_cache_free(b, slob_rcu->
size);
597 kmemleak_free_recursive(b, c->
flags);
599 struct slob_rcu *slob_rcu;
600 slob_rcu = b + (c->
size -
sizeof(
struct slob_rcu));
604 __kmem_cache_free(b, c->
size);
630 .name =
"kmem_cache",