61 #ifdef CONFIG_ZSMALLOC_DEBUG
65 #include <linux/module.h>
66 #include <linux/kernel.h>
67 #include <linux/bitops.h>
68 #include <linux/errno.h>
71 #include <linux/string.h>
72 #include <linux/slab.h>
73 #include <asm/tlbflush.h>
74 #include <asm/pgtable.h>
80 #include <linux/types.h>
96 #define ZS_MAX_ZSPAGE_ORDER 2
97 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
110 #ifndef MAX_PHYSMEM_BITS
111 #ifdef CONFIG_HIGHMEM64G
112 #define MAX_PHYSMEM_BITS 36
118 #define MAX_PHYSMEM_BITS BITS_PER_LONG
121 #define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
122 #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS)
123 #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
125 #define MAX(a, b) ((a) >= (b) ? (a) : (b))
127 #define ZS_MIN_ALLOC_SIZE \
128 MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
129 #define ZS_MAX_ALLOC_SIZE PAGE_SIZE
144 #define ZS_SIZE_CLASS_DELTA 16
145 #define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \
146 ZS_SIZE_CLASS_DELTA + 1)
174 static const int fullness_threshold_frac = 4;
217 #define CLASS_IDX_BITS 28
218 #define FULLNESS_BITS 4
219 #define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1)
220 #define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1)
231 #if defined(CONFIG_ARM)
232 #define USE_PGTABLE_MAPPING
236 #ifdef USE_PGTABLE_MAPPING
249 static int is_first_page(
struct page *
page)
251 return PagePrivate(page);
254 static int is_last_page(
struct page *
page)
256 return PagePrivate2(page);
259 static void get_zspage_mapping(
struct page *
page,
unsigned int *class_idx,
263 BUG_ON(!is_first_page(page));
270 static void set_zspage_mapping(
struct page *page,
unsigned int class_idx,
274 BUG_ON(!is_first_page(page));
281 static int get_size_class_index(
int size)
294 int inuse, max_objects;
296 BUG_ON(!is_first_page(page));
303 else if (inuse == max_objects)
305 else if (inuse <= max_objects / fullness_threshold_frac)
313 static void insert_zspage(
struct page *page,
struct size_class *
class,
318 BUG_ON(!is_first_page(page));
323 head = &
class->fullness_list[fullness];
330 static void remove_zspage(
struct page *page,
struct size_class *
class,
335 BUG_ON(!is_first_page(page));
340 head = &
class->fullness_list[fullness];
342 if (list_empty(&(*head)->lru))
344 else if (*head == page)
345 *head = (
struct page *)
list_entry((*head)->lru.next,
348 list_del_init(&page->
lru);
358 BUG_ON(!is_first_page(page));
360 get_zspage_mapping(page, &class_idx, &currfg);
361 newfg = get_fullness_group(page);
366 remove_zspage(page,
class, currfg);
367 insert_zspage(page,
class, newfg);
368 set_zspage_mapping(page, class_idx, newfg);
386 static int get_pages_per_zspage(
int class_size)
388 int i, max_usedpc = 0;
390 int max_usedpc_order = 1;
397 waste = zspage_size % class_size;
398 usedpc = (zspage_size - waste) * 100 / zspage_size;
400 if (usedpc > max_usedpc) {
402 max_usedpc_order =
i;
406 return max_usedpc_order;
414 static struct page *get_first_page(
struct page *page)
416 if (is_first_page(page))
422 static struct page *get_next_page(
struct page *page)
426 if (is_last_page(page))
428 else if (is_first_page(page))
429 next = (
struct page *)page->
private;
437 static void *obj_location_to_handle(
struct page *page,
unsigned long obj_idx)
453 static void obj_handle_to_location(
unsigned long handle,
struct page **page,
454 unsigned long *obj_idx)
460 static unsigned long obj_idx_to_offset(
struct page *page,
461 unsigned long obj_idx,
int class_size)
463 unsigned long off = 0;
465 if (!is_first_page(page))
468 return off + obj_idx * class_size;
471 static void reset_page(
struct page *page)
475 set_page_private(page, 0);
478 reset_page_mapcount(page);
481 static void free_zspage(
struct page *
first_page)
483 struct page *
nextp, *
tmp, *head_extra;
485 BUG_ON(!is_first_page(first_page));
488 head_extra = (
struct page *)page_private(first_page);
490 reset_page(first_page);
502 reset_page(head_extra);
507 static void init_zspage(
struct page *first_page,
struct size_class *
class)
509 unsigned long off = 0;
512 BUG_ON(!is_first_page(first_page));
514 struct page *next_page;
516 unsigned int i, objs_on_page;
524 if (page != first_page)
531 for (i = 1; i <= objs_on_page; i++) {
534 link->
next = obj_location_to_handle(page, i);
535 link +=
class->size /
sizeof(*link);
544 next_page = get_next_page(page);
545 link->
next = obj_location_to_handle(next_page, 0);
572 for (i = 0; i <
class->pages_per_zspage; i++) {
579 INIT_LIST_HEAD(&page->
lru);
581 SetPagePrivate(page);
582 set_page_private(page, 0);
584 first_page->
inuse = 0;
591 list_add(&page->
lru, &prev_page->lru);
592 if (i ==
class->pages_per_zspage - 1)
593 SetPagePrivate2(page);
597 init_zspage(first_page,
class);
599 first_page->
freelist = obj_location_to_handle(first_page, 0);
606 if (
unlikely(error) && first_page) {
607 free_zspage(first_page);
614 static struct page *find_get_zspage(
struct size_class *
class)
620 page =
class->fullness_list[
i];
628 #ifdef USE_PGTABLE_MAPPING
629 static inline int __zs_cpu_up(
struct mapping_area *area)
643 static inline void __zs_cpu_down(
struct mapping_area *area)
650 static inline void *__zs_map_object(
struct mapping_area *area,
651 struct page *
pages[2],
int off,
int size)
654 area->
vm_addr = area->vm->addr;
658 static inline void __zs_unmap_object(
struct mapping_area *area,
659 struct page *pages[2],
int off,
int size)
671 static inline int __zs_cpu_up(
struct mapping_area *area)
685 static inline void __zs_cpu_down(
struct mapping_area *area)
693 struct page *pages[2],
int off,
int size)
707 sizes[1] = size - sizes[0];
711 memcpy(buf, addr + off, sizes[0]);
714 memcpy(buf + sizes[0], addr, sizes[1]);
720 static void __zs_unmap_object(
struct mapping_area *area,
721 struct page *pages[2],
int off,
int size)
732 sizes[1] = size - sizes[0];
736 memcpy(addr + off, buf, sizes[0]);
739 memcpy(addr, buf + sizes[0], sizes[1]);
757 area = &
per_cpu(zs_map_area, cpu);
758 ret = __zs_cpu_up(area);
760 return notifier_from_errno(ret);
764 area = &
per_cpu(zs_map_area, cpu);
773 .notifier_call = zs_cpu_notifier
776 static void zs_exit(
void)
782 unregister_cpu_notifier(&zs_cpu_nb);
785 static
int zs_init(
void)
789 register_cpu_notifier(&zs_cpu_nb);
792 if (notifier_to_errno(ret))
798 return notifier_to_errno(ret);
826 class->pages_per_zspage = get_pages_per_zspage(size);
846 if (
class->fullness_list[fg]) {
847 pr_info(
"Freeing non-empty class with size "
848 "%db, fullness group %d\n",
874 unsigned long m_objidx, m_offset;
879 class_idx = get_size_class_index(size);
883 spin_lock(&
class->lock);
884 first_page = find_get_zspage(
class);
887 spin_unlock(&
class->lock);
888 first_page = alloc_zspage(
class, pool->
flags);
893 spin_lock(&
class->lock);
894 class->pages_allocated +=
class->pages_per_zspage;
898 obj_handle_to_location(obj, &m_page, &m_objidx);
899 m_offset = obj_idx_to_offset(m_page, m_objidx,
class->size);
902 m_offset /
sizeof(*link);
909 fix_fullness_group(pool, first_page);
910 spin_unlock(&
class->lock);
920 unsigned long f_objidx, f_offset;
929 obj_handle_to_location(obj, &f_page, &f_objidx);
930 first_page = get_first_page(f_page);
932 get_zspage_mapping(first_page, &class_idx, &fullness);
934 f_offset = obj_idx_to_offset(f_page, f_objidx,
class->size);
936 spin_lock(&
class->lock);
946 fullness = fix_fullness_group(pool, first_page);
949 class->pages_allocated -=
class->pages_per_zspage;
951 spin_unlock(&
class->lock);
954 free_zspage(first_page);
976 unsigned long obj_idx, off;
978 unsigned int class_idx;
982 struct page *pages[2];
993 obj_handle_to_location(handle, &page, &obj_idx);
994 get_zspage_mapping(get_first_page(page), &class_idx, &fg);
996 off = obj_idx_to_offset(page, obj_idx,
class->size);
1008 pages[1] = get_next_page(page);
1011 return __zs_map_object(area, pages, off,
class->size);
1018 unsigned long obj_idx, off;
1020 unsigned int class_idx;
1027 obj_handle_to_location(handle, &page, &obj_idx);
1028 get_zspage_mapping(get_first_page(page), &class_idx, &fg);
1030 off = obj_idx_to_offset(page, obj_idx,
class->size);
1036 struct page *pages[2];
1039 pages[1] = get_next_page(page);
1042 __zs_unmap_object(area, pages, off,
class->size);
1054 npages += pool->
size_class[i].pages_allocated;