78 #include <linux/module.h>
80 #include <linux/list.h>
123 #define CHUNK_SHIFT 6
124 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
125 #define CHUNK_MASK (~(CHUNK_SIZE-1))
126 #define NCHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
127 #define MAX_CHUNK (NCHUNKS-1)
137 struct page *
page = (
struct page *)zbudpage;
148 struct page *
page = (
struct page *)zbudpage;
155 return trylock_page((
struct page *)zbudpage);
158 static inline int zbudpage_is_locked(
struct zbudpage *zbudpage)
160 return PageLocked((
struct page *)zbudpage);
163 static inline void *kmap_zbudpage_atomic(
struct zbudpage *zbudpage)
173 static inline int zbudpage_is_dying(
struct zbudpage *zbudpage)
175 struct page *
page = (
struct page *)zbudpage;
180 static inline void zbudpage_set_dying(
struct zbudpage *zbudpage)
182 struct page *
page = (
struct page *)zbudpage;
187 static inline void zbudpage_clear_dying(
struct zbudpage *zbudpage)
189 struct page *
page = (
struct page *)zbudpage;
199 static inline int zbudpage_is_zombie(
struct zbudpage *zbudpage)
201 struct page *
page = (
struct page *)zbudpage;
206 static inline void zbudpage_set_zombie(
struct zbudpage *zbudpage)
208 struct page *
page = (
struct page *)zbudpage;
213 static inline void zbudpage_clear_zombie(
struct zbudpage *zbudpage)
215 struct page *
page = (
struct page *)zbudpage;
220 static inline void kunmap_zbudpage_atomic(
void *zbpg)
229 static inline struct zbudpage *zbudref_to_zbudpage(
struct zbudref *zref)
231 unsigned long zbud = (
unsigned long)zref;
233 return (
struct zbudpage *)zbud;
236 static inline struct zbudref *zbudpage_to_zbudref(
struct zbudpage *zbudpage,
239 unsigned long zbud = (
unsigned long)zbudpage;
247 unsigned long zbud = (
unsigned long)zbudref;
251 static inline unsigned zbud_max_size(
void)
256 static inline unsigned zbud_size_to_chunks(
unsigned size)
258 BUG_ON(size == 0 || size > zbud_max_size());
263 static inline char *zbud_data(
void *zbpg,
264 unsigned budnum,
unsigned size)
268 BUG_ON(size == 0 || size > zbud_max_size());
281 static unsigned long zbud_eph_pageframes;
282 static unsigned long zbud_pers_pageframes;
283 static unsigned long zbud_eph_zpages;
284 static unsigned long zbud_pers_zpages;
285 static u64 zbud_eph_zbytes;
286 static u64 zbud_pers_zbytes;
287 static unsigned long zbud_eph_evicted_pageframes;
288 static unsigned long zbud_pers_evicted_pageframes;
289 static unsigned long zbud_eph_cumul_zpages;
290 static unsigned long zbud_pers_cumul_zpages;
291 static u64 zbud_eph_cumul_zbytes;
292 static u64 zbud_pers_cumul_zbytes;
293 static unsigned long zbud_eph_cumul_chunk_counts[
NCHUNKS];
294 static unsigned long zbud_pers_cumul_chunk_counts[
NCHUNKS];
295 static unsigned long zbud_eph_buddied_count;
296 static unsigned long zbud_pers_buddied_count;
297 static unsigned long zbud_eph_unbuddied_count;
298 static unsigned long zbud_pers_unbuddied_count;
299 static unsigned long zbud_eph_zombie_count;
300 static unsigned long zbud_pers_zombie_count;
301 static atomic_t zbud_eph_zombie_atomic;
302 static atomic_t zbud_pers_zombie_atomic;
304 #ifdef CONFIG_DEBUG_FS
306 #define zdfs debugfs_create_size_t
307 #define zdfs64 debugfs_create_u64
308 static int zbud_debugfs_init(
void)
319 zdfs64(
"eph_zbytes",
S_IRUGO, root, &zbud_eph_zbytes);
320 zdfs64(
"eph_cumul_zbytes",
S_IRUGO, root, &zbud_eph_cumul_zbytes);
321 zdfs64(
"pers_zbytes",
S_IRUGO, root, &zbud_pers_zbytes);
322 zdfs64(
"pers_cumul_zbytes",
S_IRUGO, root, &zbud_pers_cumul_zbytes);
323 zdfs(
"eph_cumul_zpages",
S_IRUGO, root, &zbud_eph_cumul_zpages);
324 zdfs(
"eph_evicted_pageframes",
S_IRUGO, root,
325 &zbud_eph_evicted_pageframes);
326 zdfs(
"eph_zpages",
S_IRUGO, root, &zbud_eph_zpages);
327 zdfs(
"eph_pageframes",
S_IRUGO, root, &zbud_eph_pageframes);
328 zdfs(
"eph_buddied_count",
S_IRUGO, root, &zbud_eph_buddied_count);
329 zdfs(
"eph_unbuddied_count",
S_IRUGO, root, &zbud_eph_unbuddied_count);
330 zdfs(
"pers_cumul_zpages",
S_IRUGO, root, &zbud_pers_cumul_zpages);
331 zdfs(
"pers_evicted_pageframes",
S_IRUGO, root,
332 &zbud_pers_evicted_pageframes);
333 zdfs(
"pers_zpages",
S_IRUGO, root, &zbud_pers_zpages);
334 zdfs(
"pers_pageframes",
S_IRUGO, root, &zbud_pers_pageframes);
335 zdfs(
"pers_buddied_count",
S_IRUGO, root, &zbud_pers_buddied_count);
336 zdfs(
"pers_unbuddied_count",
S_IRUGO, root, &zbud_pers_unbuddied_count);
337 zdfs(
"pers_zombie_count",
S_IRUGO, root, &zbud_pers_zombie_count);
360 static LIST_HEAD(zbud_pers_buddied_list);
368 static inline struct zbudpage *zbud_init_zbudpage(
struct page *
page,
bool eph)
370 struct zbudpage *zbudpage = (
struct zbudpage *)page;
373 INIT_LIST_HEAD(&zbudpage->
budlist);
374 INIT_LIST_HEAD(&zbudpage->
lru);
379 zbud_eph_pageframes++;
381 zbud_pers_pageframes++;
386 static inline struct page *zbud_unuse_zbudpage(
struct zbudpage *zbudpage,
389 struct page *page = (
struct page *)zbudpage;
395 BUG_ON(!PageLocked(page));
397 BUG_ON(zbudpage_is_dying(zbudpage));
398 BUG_ON(zbudpage_is_zombie(zbudpage));
400 zbud_eph_pageframes--;
402 zbud_pers_pageframes--;
403 zbudpage_spin_unlock(zbudpage);
404 reset_page_mapcount(page);
405 init_page_count(page);
411 static inline void zbud_unuse_zbud(
struct zbudpage *zbudpage,
412 int budnum,
bool eph)
416 BUG_ON(!zbudpage_is_locked(zbudpage));
425 zbud_eph_zbytes -=
size;
428 zbud_pers_zbytes -=
size;
439 static void zbud_init_zbud(
struct zbudpage *zbudpage,
struct tmem_handle *
th,
440 bool eph,
void *
cdata,
441 unsigned budnum,
unsigned size)
446 unsigned nchunks = zbud_size_to_chunks(size);
448 BUG_ON(!zbudpage_is_locked(zbudpage));
449 zbpg = kmap_zbudpage_atomic(zbudpage);
450 to = zbud_data(zbpg, budnum, size);
459 kunmap_zbudpage_atomic(zbpg);
465 zbud_eph_cumul_chunk_counts[nchunks]++;
467 zbud_eph_cumul_zpages++;
468 zbud_eph_zbytes +=
size;
469 zbud_eph_cumul_zbytes +=
size;
471 zbud_pers_cumul_chunk_counts[nchunks]++;
473 zbud_pers_cumul_zpages++;
474 zbud_pers_zbytes +=
size;
475 zbud_pers_cumul_zbytes +=
size;
484 static void zbud_evict_tmem(
struct zbudpage *zbudpage)
496 zbpg = kmap_zbudpage_atomic(zbudpage);
497 for (i = 0, j = 0; i < 2; i++) {
500 th = (
struct tmem_handle *)zbud_data(zbpg, i, size);
506 zbud_unuse_zbud(zbudpage, i,
true);
509 kunmap_zbudpage_atomic(zbpg);
510 zbudpage_spin_unlock(zbudpage);
512 for (i = 0; i <
j; i++) {
531 return zbud_max_size() -
sizeof(
struct tmem_handle);
540 unsigned int *zsize,
unsigned int *zpages)
542 unsigned long budnum = zbudref_budnum(zref);
543 struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
544 struct page *page =
NULL;
545 unsigned chunks, bud_size, other_bud_size;
547 eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
549 eph ? zbud_eph_unbuddied : zbud_pers_unbuddied;
552 spin_lock(lists_lock);
553 zbudpage_spin_lock(zbudpage);
554 if (zbudpage_is_dying(zbudpage)) {
556 zbudpage_spin_unlock(zbudpage);
557 spin_unlock(lists_lock);
571 zbud_unuse_zbud(zbudpage, budnum, eph);
572 if (other_bud_size == 0) {
573 chunks = zbud_size_to_chunks(bud_size) ;
574 if (zbudpage_is_zombie(zbudpage)) {
576 zbud_pers_zombie_count =
579 zbud_pers_zombie_count =
581 zbudpage_clear_zombie(zbudpage);
584 list_del_init(&zbudpage->
budlist);
585 unbud[chunks].
count--;
587 list_del_init(&zbudpage->
lru);
588 spin_unlock(lists_lock);
590 zbud_eph_unbuddied_count--;
592 zbud_pers_unbuddied_count--;
593 page = zbud_unuse_zbudpage(zbudpage, eph);
595 chunks = zbud_size_to_chunks(other_bud_size) ;
596 if (!zbudpage_is_zombie(zbudpage)) {
597 list_del_init(&zbudpage->
budlist);
599 unbud[chunks].
count++;
602 zbud_eph_buddied_count--;
603 zbud_eph_unbuddied_count++;
605 zbud_pers_unbuddied_count++;
606 zbud_pers_buddied_count--;
609 zbudpage_spin_unlock(zbudpage);
610 spin_unlock(lists_lock);
623 void *cdata,
unsigned size)
625 struct zbudpage *zbudpage =
NULL, *zbudpage2;
626 unsigned long budnum = 0
UL;
628 int i, found_good_buddy = 0;
630 eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
632 eph ? zbud_eph_unbuddied : zbud_pers_unbuddied;
635 nchunks = zbud_size_to_chunks(size);
636 for (i =
MAX_CHUNK - nchunks + 1; i > 0; i--) {
637 spin_lock(lists_lock);
638 if (!list_empty(&unbud[i].
list)) {
640 &unbud[i].list, budlist) {
641 if (zbudpage_spin_trylock(zbudpage)) {
642 found_good_buddy =
i;
643 goto found_unbuddied;
647 spin_unlock(lists_lock);
653 BUG_ON(!zbudpage_is_locked(zbudpage));
659 list_del_init(&zbudpage->
budlist);
662 unbud[found_good_buddy].
count--;
663 zbud_eph_unbuddied_count--;
664 zbud_eph_buddied_count++;
666 list_del_init(&zbudpage->
lru);
670 unbud[found_good_buddy].
count--;
671 zbud_pers_unbuddied_count--;
672 zbud_pers_buddied_count++;
674 list_del_init(&zbudpage->
lru);
677 zbud_init_zbud(zbudpage, th, eph, cdata, budnum, size);
680 zbudpage_spin_unlock(zbudpage);
681 spin_unlock(lists_lock);
683 return zbudpage_to_zbudref(zbudpage, budnum);
693 void *cdata,
unsigned size,
694 struct page *newpage)
696 struct zbudpage *zbudpage;
697 unsigned long budnum = 0;
700 eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
702 eph ? zbud_eph_unbuddied : zbud_pers_unbuddied;
707 budnum = counter++ & 1;
716 nchunks = zbud_size_to_chunks(size) ;
717 spin_lock(lists_lock);
718 zbudpage = zbud_init_zbudpage(newpage, eph);
719 zbudpage_spin_lock(zbudpage);
723 zbud_eph_unbuddied_count++;
726 zbud_pers_unbuddied_count++;
728 unbud[nchunks].
count++;
729 zbud_init_zbud(zbudpage, th, eph, cdata, budnum, size);
732 zbudpage_spin_unlock(zbudpage);
733 spin_unlock(lists_lock);
734 return zbudpage_to_zbudref(zbudpage, budnum);
743 struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
745 eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
747 spin_lock(lists_lock);
748 zbudpage_spin_lock(zbudpage);
749 BUG_ON(zbudpage_is_dying(zbudpage));
752 zbudpage_spin_unlock(zbudpage);
753 spin_unlock(lists_lock);
762 void (*
decompress)(
char *,
unsigned int,
char *))
764 struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
765 unsigned long budnum = zbudref_budnum(zref);
767 char *to_va, *from_va;
771 eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
773 spin_lock(lists_lock);
774 zbudpage_spin_lock(zbudpage);
775 if (zbudpage_is_dying(zbudpage)) {
779 zbpg = kmap_zbudpage_atomic(zbudpage);
785 BUG_ON(size == 0 || size > zbud_max_size());
786 from_va = zbud_data(zbpg, budnum, size);
791 kunmap_zbudpage_atomic(zbpg);
794 zbudpage_spin_unlock(zbudpage);
795 spin_unlock(lists_lock);
804 size_t *sizep,
bool eph)
806 struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
807 unsigned long budnum = zbudref_budnum(zref);
813 eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
815 spin_lock(lists_lock);
816 zbudpage_spin_lock(zbudpage);
817 if (zbudpage_is_dying(zbudpage)) {
821 zbpg = kmap_zbudpage_atomic(zbudpage);
826 BUG_ON(size == 0 || size > zbud_max_size());
827 from_va = zbud_data(zbpg, budnum, size);
831 memcpy(to_va, from_va, size);
833 kunmap_zbudpage_atomic(zbpg);
836 zbudpage_spin_unlock(zbudpage);
837 spin_unlock(lists_lock);
847 struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
848 unsigned long budnum = zbudref_budnum(zref);
854 eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
856 spin_lock(lists_lock);
857 zbudpage_spin_lock(zbudpage);
858 if (zbudpage_is_dying(zbudpage)) {
862 zbpg = kmap_zbudpage_atomic(zbudpage);
867 BUG_ON(size == 0 || size > zbud_max_size());
868 to_va = zbud_data(zbpg, budnum, size);
871 memcpy(to_va, from_va, size);
873 kunmap_zbudpage_atomic(zbpg);
876 zbudpage_spin_unlock(zbudpage);
877 spin_unlock(lists_lock);
889 struct zbudpage *zbudpage =
NULL, *zbudpage2;
891 struct page *page =
NULL;
900 spin_lock(&zbud_eph_lists_lock);
902 spin_lock_bh(&zbud_eph_lists_lock);
904 if (list_empty(&zbud_eph_lru_list))
908 if (
unlikely(!zbudpage_spin_trylock(zbudpage)))
912 zbudpage_spin_unlock(zbudpage);
922 spin_unlock(&zbud_eph_lists_lock);
924 spin_unlock_bh(&zbud_eph_lists_lock);
928 list_del_init(&zbudpage->
budlist);
929 list_del_init(&zbudpage->
lru);
930 zbudpage_set_dying(zbudpage);
950 spin_unlock(&zbud_eph_lists_lock);
951 zbud_eph_evicted_pageframes++;
953 zbud_eph_unbuddied_count--;
955 zbud_eph_buddied_count--;
956 zbud_evict_tmem(zbudpage);
957 zbudpage_spin_lock(zbudpage);
958 zbudpage_clear_dying(zbudpage);
959 page = zbud_unuse_zbudpage(zbudpage,
true);
976 unsigned int *zsize,
bool eph)
978 struct zbudpage *zbudpage =
NULL, *zbudpag2;
985 eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
987 eph ? &zbud_eph_lru_list : &zbud_pers_lru_list;
989 spin_lock_bh(lists_lock);
990 if (list_empty(lru_list))
994 if (
unlikely(!zbudpage_spin_trylock(zbudpage)))
998 zbudpage_spin_unlock(zbudpage);
1009 list_del_init(&zbudpage->
budlist);
1010 zbudpage_set_zombie(zbudpage);
1012 list_del_init(&zbudpage->
lru);
1015 zbud_eph_zombie_count =
1019 zbud_pers_zombie_count =
1023 zbpg = kmap_zbudpage_atomic(zbudpage);
1024 for (i = 0; i < 2; i++) {
1027 from_va = zbud_data(zbpg, i, size);
1034 memcpy(data[ret], from_va, size);
1040 kunmap_zbudpage_atomic(zbpg);
1041 zbudpage_spin_unlock(zbudpage);
1043 spin_unlock_bh(lists_lock);
1051 #ifdef CONFIG_DEBUG_FS
1052 zbud_debugfs_init();
1055 BUG_ON(
sizeof(
struct zbudpage) >
sizeof(
struct page));
1056 for (i = 0; i <
NCHUNKS; i++) {
1057 INIT_LIST_HEAD(&zbud_eph_unbuddied[i].
list);
1058 INIT_LIST_HEAD(&zbud_pers_unbuddied[i].list);