36 #define pr_fmt(fmt) "[TTM] " fmt
39 #include <linux/list.h>
41 #include <linux/slab.h>
45 #include <linux/module.h>
48 #include <linux/device.h>
56 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
57 #define SMALL_ALLOCATION 4
58 #define FREE_ALL_PAGES (~0U)
60 #define IS_UNDEFINED (0)
63 #define IS_CACHED (1<<3)
64 #define IS_DMA32 (1<<4)
179 static struct attribute ttm_page_pool_max = {
180 .name =
"pool_max_size",
183 static struct attribute ttm_page_pool_small = {
184 .name =
"pool_small_allocation",
187 static struct attribute ttm_page_pool_alloc_size = {
188 .name =
"pool_allocation_size",
192 static struct attribute *ttm_pool_attrs[] = {
194 &ttm_page_pool_small,
195 &ttm_page_pool_alloc_size,
199 static void ttm_pool_kobj_release(
struct kobject *kobj)
213 chars =
sscanf(buffer,
"%u", &val);
220 if (attr == &ttm_page_pool_max)
222 else if (attr == &ttm_page_pool_small)
224 else if (attr == &ttm_page_pool_alloc_size) {
226 pr_err(
"Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
231 pr_warn(
"Setting allocation size to larger than %lu is not recommended\n",
247 if (attr == &ttm_page_pool_max)
249 else if (attr == &ttm_page_pool_small)
251 else if (attr == &ttm_page_pool_alloc_size)
259 static const struct sysfs_ops ttm_pool_sysfs_ops = {
260 .show = &ttm_pool_show,
261 .store = &ttm_pool_store,
264 static struct kobj_type ttm_pool_kobj_type = {
265 .release = &ttm_pool_kobj_release,
266 .sysfs_ops = &ttm_pool_sysfs_ops,
267 .default_attrs = ttm_pool_attrs,
276 for (i = 0; i < addrinarray; i++)
287 for (i = 0; i < addrinarray; i++)
298 for (i = 0; i < addrinarray; i++)
306 struct page **pages,
unsigned cpages)
313 pr_err(
"%s: Failed to set %d pages to uc!\n",
319 pr_err(
"%s: Failed to set %d pages to wc!\n",
325 static void __ttm_dma_free_page(
struct dma_pool *pool,
struct dma_page *d_page)
368 static void ttm_pool_update_free_locked(
struct dma_pool *pool,
369 unsigned freed_pages)
372 pool->
nfrees += freed_pages;
378 struct page *pages[],
unsigned npages)
385 pr_err(
"%s: Failed to set %d pages to wb!\n",
390 __ttm_dma_free_page(pool, d_page);
394 static void ttm_dma_page_put(
struct dma_pool *pool,
struct dma_page *d_page)
398 pr_err(
"%s: Failed to set %d pages to wb!\n",
402 __ttm_dma_free_page(pool, d_page);
414 static unsigned ttm_dma_page_pool_free(
struct dma_pool *pool,
unsigned nr_free)
416 unsigned long irq_flags;
418 struct page **pages_to_free;
420 unsigned freed_pages = 0,
421 npages_to_free = nr_free;
427 pr_debug(
"%s: (%s:%d) Attempting to free %d (%d) pages\n",
429 npages_to_free, nr_free);
432 pages_to_free =
kmalloc(npages_to_free *
sizeof(
struct page *),
435 if (!pages_to_free) {
436 pr_err(
"%s: Failed to allocate memory for pool free operation\n",
440 INIT_LIST_HEAD(&d_pages);
447 if (freed_pages >= npages_to_free)
453 pages_to_free[freed_pages++] = dma_p->
p;
457 ttm_pool_update_free_locked(pool, freed_pages);
462 spin_unlock_irqrestore(&pool->
lock, irq_flags);
464 ttm_dma_pages_put(pool, &d_pages, pages_to_free,
467 INIT_LIST_HEAD(&d_pages);
470 nr_free -= freed_pages;
473 npages_to_free = nr_free;
494 ttm_pool_update_free_locked(pool, freed_pages);
495 nr_free -= freed_pages;
498 spin_unlock_irqrestore(&pool->
lock, irq_flags);
501 ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
503 kfree(pages_to_free);
520 if (pool->
type != type)
529 if (pool->
type != type)
549 static void ttm_dma_pool_release(
struct device *dev,
void *
res)
554 ttm_dma_free_pool(dev, pool->
type);
557 static int ttm_dma_pool_match(
struct device *dev,
void *res,
void *match_data)
559 return *(
struct dma_pool **)res == match_data;
565 char *
n[] = {
"wc",
"uc",
"cached",
" dma32",
"unknown",};
592 INIT_LIST_HEAD(&sec_pool->
pools);
594 sec_pool->
pool = pool;
598 INIT_LIST_HEAD(&pool->
pools);
608 for (i = 0; i < 5; i++) {
658 if (pool->
type != type)
671 static void ttm_dma_handle_caching_state_failure(
struct dma_pool *pool,
673 struct page **failed_pages,
689 __ttm_dma_free_page(pool, d_page);
704 static int ttm_dma_pool_alloc_new_pages(
struct dma_pool *pool,
708 struct page **caching_array;
713 unsigned max_cpages =
min(count,
719 if (!caching_array) {
720 pr_err(
"%s: Unable to allocate table for new pages\n",
726 pr_debug(
"%s: (%s:%d) Getting %d pages\n",
730 for (i = 0, cpages = 0; i <
count; ++
i) {
731 dma_p = __ttm_dma_alloc_page(pool);
733 pr_err(
"%s: Unable to get page %u\n",
739 r = ttm_set_pages_caching(pool, caching_array,
742 ttm_dma_handle_caching_state_failure(
743 pool, d_pages, caching_array,
750 #ifdef CONFIG_HIGHMEM
757 caching_array[cpages++] =
p;
758 if (cpages == max_cpages) {
760 r = ttm_set_pages_caching(pool, caching_array,
763 ttm_dma_handle_caching_state_failure(
764 pool, d_pages, caching_array,
775 r = ttm_set_pages_caching(pool, caching_array, cpages);
777 ttm_dma_handle_caching_state_failure(pool, d_pages,
778 caching_array, cpages);
781 kfree(caching_array);
788 static int ttm_dma_page_pool_fill_locked(
struct dma_pool *pool,
789 unsigned long *irq_flags)
791 unsigned count = _manager->
options.small;
797 INIT_LIST_HEAD(&d_pages);
799 spin_unlock_irqrestore(&pool->
lock, *irq_flags);
803 r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
816 pr_err(
"%s: Failed to fill %s pool (r:%d)!\n",
822 list_splice_tail(&d_pages, &pool->
free_list);
835 static int ttm_dma_pool_get_pages(
struct dma_pool *pool,
841 unsigned long irq_flags;
845 count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
855 spin_unlock_irqrestore(&pool->
lock, irq_flags);
873 if (ttm->
state != tt_unpopulated)
884 pool = ttm_dma_find_pool(dev, type);
886 pool = ttm_dma_pool_init(dev, gfp_flags, type);
887 if (IS_ERR_OR_NULL(pool)) {
894 ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
916 ttm->
state = tt_unbound;
922 static int ttm_dma_pool_get_num_unused_pages(
void)
929 total += p->pool->npages_free;
937 struct ttm_tt *ttm = &ttm_dma->ttm;
941 bool is_cached =
false;
942 unsigned count = 0,
i, npages = 0;
943 unsigned long irq_flags;
946 pool = ttm_dma_find_pool(dev, type);
950 is_cached = (ttm_dma_find_pool(pool->
dev,
965 list_splice(&ttm_dma->pages_list, &pool->
free_list);
975 spin_unlock_irqrestore(&pool->
lock, irq_flags);
981 ttm_dma_page_put(pool, d_page);
984 for (i = 0; i <
count; i++) {
990 INIT_LIST_HEAD(&ttm_dma->pages_list);
993 ttm_dma->dma_address[
i] = 0;
998 ttm_dma_page_pool_free(pool, npages);
999 ttm->
state = tt_unpopulated;
1015 if (list_empty(&_manager->
pools))
1019 pool_offset = pool_offset % _manager->
npools;
1025 if (shrink_pages == 0)
1028 if (++idx < pool_offset)
1030 nr_free = shrink_pages;
1031 shrink_pages = ttm_dma_page_pool_free(p->
pool, nr_free);
1032 pr_debug(
"%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1034 nr_free, shrink_pages);
1038 return ttm_dma_pool_get_num_unused_pages();
1043 manager->
mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
1059 pr_info(
"Initializing DMA pool allocator\n");
1061 _manager = kzalloc(
sizeof(*_manager),
GFP_KERNEL);
1066 INIT_LIST_HEAD(&_manager->
pools);
1068 _manager->
options.max_size = max_pages;
1074 &glob->
kobj,
"dma_pool");
1079 ttm_dma_pool_mm_shrink_init(_manager);
1089 pr_info(
"Finalizing DMA pool allocator\n");
1090 ttm_dma_pool_mm_shrink_fini(_manager);
1096 ttm_dma_pool_match, p->
pool));
1097 ttm_dma_free_pool(p->
dev, p->
pool->type);
1107 char *
h[] = {
"pool",
"refills",
"pages freed",
"inuse",
"available",
1108 "name",
"virt",
"busaddr"};
1111 seq_printf(m,
"No pool allocator running.\n");
1114 seq_printf(m,
"%13s %12s %13s %8s %8s %8s\n",
1115 h[0], h[1], h[2], h[3], h[4], h[5]);
1122 seq_printf(m,
"%13s %12ld %13ld %8d %8d %8s\n",