34 #define pr_fmt(fmt) "[TTM] " fmt
36 #include <linux/list.h>
40 #include <linux/module.h>
43 #include <linux/slab.h>
55 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
56 #define SMALL_ALLOCATION 16
57 #define FREE_ALL_PAGES (~0U)
59 #define PAGE_FREE_INTERVAL 1000
126 static struct attribute ttm_page_pool_max = {
127 .name =
"pool_max_size",
130 static struct attribute ttm_page_pool_small = {
131 .name =
"pool_small_allocation",
134 static struct attribute ttm_page_pool_alloc_size = {
135 .name =
"pool_allocation_size",
139 static struct attribute *ttm_pool_attrs[] = {
141 &ttm_page_pool_small,
142 &ttm_page_pool_alloc_size,
146 static void ttm_pool_kobj_release(
struct kobject *kobj)
160 chars =
sscanf(buffer,
"%u", &val);
167 if (attr == &ttm_page_pool_max)
169 else if (attr == &ttm_page_pool_small)
171 else if (attr == &ttm_page_pool_alloc_size) {
173 pr_err(
"Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
178 pr_warn(
"Setting allocation size to larger than %lu is not recommended\n",
194 if (attr == &ttm_page_pool_max)
196 else if (attr == &ttm_page_pool_small)
198 else if (attr == &ttm_page_pool_alloc_size)
206 static const struct sysfs_ops ttm_pool_sysfs_ops = {
207 .show = &ttm_pool_show,
208 .store = &ttm_pool_store,
211 static struct kobj_type ttm_pool_kobj_type = {
212 .release = &ttm_pool_kobj_release,
213 .sysfs_ops = &ttm_pool_sysfs_ops,
214 .default_attrs = ttm_pool_attrs,
225 for (i = 0; i < addrinarray; i++)
236 for (i = 0; i < addrinarray; i++)
247 for (i = 0; i < addrinarray; i++)
276 static void ttm_pages_put(
struct page *pages[],
unsigned npages)
280 pr_err(
"Failed to set %d pages to wb!\n", npages);
286 unsigned freed_pages)
288 pool->
npages -= freed_pages;
289 pool->
nfrees += freed_pages;
303 unsigned long irq_flags;
305 struct page **pages_to_free;
306 unsigned freed_pages = 0,
307 npages_to_free = nr_free;
312 pages_to_free =
kmalloc(npages_to_free *
sizeof(
struct page *),
314 if (!pages_to_free) {
315 pr_err(
"Failed to allocate memory for pool free operation\n");
323 if (freed_pages >= npages_to_free)
326 pages_to_free[freed_pages++] =
p;
330 __list_del(p->
lru.prev, &pool->
list);
332 ttm_pool_update_free_locked(pool, freed_pages);
337 spin_unlock_irqrestore(&pool->
lock, irq_flags);
339 ttm_pages_put(pages_to_free, freed_pages);
341 nr_free -= freed_pages;
344 npages_to_free = nr_free;
365 __list_del(&p->
lru, &pool->
list);
367 ttm_pool_update_free_locked(pool, freed_pages);
368 nr_free -= freed_pages;
371 spin_unlock_irqrestore(&pool->
lock, irq_flags);
374 ttm_pages_put(pages_to_free, freed_pages);
376 kfree(pages_to_free);
381 static int ttm_pool_get_num_unused_pages(
void)
386 total += _manager->
pools[i].npages;
406 unsigned nr_free = shrink_pages;
407 if (shrink_pages == 0)
409 pool = &_manager->
pools[(i + pool_offset)%NUM_POOLS];
410 shrink_pages = ttm_page_pool_free(pool, nr_free);
413 return ttm_pool_get_num_unused_pages();
418 manager->
mm_shrink.shrink = &ttm_pool_mm_shrink;
428 static int ttm_set_pages_caching(
struct page **pages,
437 pr_err(
"Failed to set %d pages to uc!\n", cpages);
442 pr_err(
"Failed to set %d pages to wc!\n", cpages);
455 static void ttm_handle_caching_state_failure(
struct list_head *pages,
457 struct page **failed_pages,
unsigned cpages)
461 for (i = 0; i < cpages; ++
i) {
476 struct page **caching_array;
480 unsigned max_cpages =
min(count,
486 if (!caching_array) {
487 pr_err(
"Unable to allocate table for new pages\n");
491 for (i = 0, cpages = 0; i <
count; ++
i) {
495 pr_err(
"Unable to get page %u\n", i);
500 r = ttm_set_pages_caching(caching_array,
503 ttm_handle_caching_state_failure(pages,
505 caching_array, cpages);
511 #ifdef CONFIG_HIGHMEM
518 caching_array[cpages++] =
p;
519 if (cpages == max_cpages) {
521 r = ttm_set_pages_caching(caching_array,
524 ttm_handle_caching_state_failure(pages,
526 caching_array, cpages);
533 list_add(&p->
lru, pages);
537 r = ttm_set_pages_caching(caching_array, cstate, cpages);
539 ttm_handle_caching_state_failure(pages,
541 caching_array, cpages);
544 kfree(caching_array);
553 static void ttm_page_pool_fill_locked(
struct ttm_page_pool *pool,
555 unsigned long *irq_flags)
572 if (count < _manager->
options.small
573 && count > pool->
npages) {
575 unsigned alloc_size = _manager->
options.alloc_size;
581 spin_unlock_irqrestore(&pool->
lock, *irq_flags);
583 INIT_LIST_HEAD(&new_pages);
584 r = ttm_alloc_new_pages(&new_pages, pool->
gfp_flags, ttm_flags,
589 list_splice(&new_pages, &pool->
list);
591 pool->
npages += alloc_size;
593 pr_err(
"Failed to fill pool (%p)\n", pool);
598 list_splice(&new_pages, &pool->
list);
611 static unsigned ttm_page_pool_get_pages(
struct ttm_page_pool *pool,
617 unsigned long irq_flags;
622 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
624 if (count >= pool->
npages) {
626 list_splice_init(&pool->
list, pages);
633 if (count <= pool->npages/2) {
647 list_cut_position(pages, &pool->
list, p);
651 spin_unlock_irqrestore(&pool->
lock, irq_flags);
656 static void ttm_put_pages(
struct page **pages,
unsigned npages,
int flags,
659 unsigned long irq_flags;
665 for (i = 0; i <
npages; i++) {
667 if (page_count(pages[i]) != 1)
668 pr_err(
"Erroneous page count. Leaking pages.\n");
677 for (i = 0; i <
npages; i++) {
679 if (page_count(pages[i]) != 1)
680 pr_err(
"Erroneous page count. Leaking pages.\n");
695 spin_unlock_irqrestore(&pool->
lock, irq_flags);
697 ttm_page_pool_free(pool, npages);
704 static int ttm_get_pages(
struct page **pages,
unsigned npages,
int flags,
725 for (r = 0; r < npages; ++
r) {
729 pr_err(
"Unable to allocate page\n");
742 INIT_LIST_HEAD(&plist);
743 npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
750 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
764 INIT_LIST_HEAD(&plist);
765 r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
772 pr_err(
"Failed to allocate extra pages for large request\n");
773 ttm_put_pages(pages, count, flags, cstate);
781 static void ttm_page_pool_init_locked(
struct ttm_page_pool *pool,
int flags,
786 INIT_LIST_HEAD(&pool->
list);
798 pr_info(
"Initializing pool allocator\n");
800 _manager = kzalloc(
sizeof(*_manager),
GFP_KERNEL);
812 _manager->
options.max_size = max_pages;
817 &glob->
kobj,
"pool");
824 ttm_pool_mm_shrink_init(_manager);
833 pr_info(
"Finalizing pool allocator\n");
834 ttm_pool_mm_shrink_fini(_manager);
849 if (ttm->
state != tt_unpopulated)
853 ret = ttm_get_pages(&ttm->
pages[i], 1,
877 ttm->
state = tt_unbound;
890 ttm_put_pages(&ttm->
pages[i], 1,
895 ttm->
state = tt_unpopulated;
903 char *
h[] = {
"pool",
"refills",
"pages freed",
"size"};
905 seq_printf(m,
"No pool allocator running.\n");
909 h[0], h[1], h[2], h[3]);