31 #define pr_fmt(fmt) "[TTM] " fmt
37 #include <linux/slab.h>
38 #include <linux/sched.h>
41 #include <linux/module.h>
44 #define TTM_ASSERT_LOCKED(param)
45 #define TTM_DEBUG(fmt, arg...)
46 #define TTM_BO_HASH_ORDER 13
50 static void ttm_bo_global_kobj_release(
struct kobject *kobj);
62 if (flags & (1 << i)) {
69 static void ttm_mem_type_debug(
struct ttm_bo_device *bdev,
int mem_type)
89 pr_err(
"No space for %p (%lu pages, %luK, %luM)\n",
90 bo, bo->
mem.num_pages, bo->
mem.size >> 10,
93 ret = ttm_mem_type_from_flags(placement->
placement[i],
97 pr_err(
" placement[%d]=0x%08X (%d)\n",
99 ttm_mem_type_debug(bo->
bdev, mem_type);
114 static struct attribute *ttm_bo_global_attrs[] = {
119 static const struct sysfs_ops ttm_bo_global_ops = {
120 .show = &ttm_bo_global_show
123 static struct kobj_type ttm_bo_glob_kobj_type = {
124 .release = &ttm_bo_global_kobj_release,
125 .sysfs_ops = &ttm_bo_global_ops,
126 .default_attrs = ttm_bo_global_attrs
135 static void ttm_bo_release_list(
struct kref *list_kref)
184 man = &bdev->
man[bo->
mem.mem_type];
199 if (!list_empty(&bo->
swap)) {
200 list_del_init(&bo->
swap);
203 if (!list_empty(&bo->
lru)) {
204 list_del_init(&bo->
lru);
271 static void ttm_bo_ref_bug(
struct kref *list_kref)
280 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
354 bo->
ttm->sg = bo->
sg;
357 pr_err(
"Illegal buffer object type\n");
367 bool evict,
bool interruptible,
368 bool no_wait_reserve,
bool no_wait_gpu)
377 if (old_is_pci || new_is_pci ||
393 ret = ttm_bo_add_ttm(bo, zero);
409 if (bdev->
driver->move_notify)
410 bdev->
driver->move_notify(bo, mem);
417 if (bdev->
driver->move_notify)
418 bdev->
driver->move_notify(bo, mem);
423 else if (bdev->
driver->move)
424 ret = bdev->
driver->move(bo, evict, interruptible,
425 no_wait_reserve, no_wait_gpu, mem);
430 if (bdev->
driver->move_notify) {
434 bdev->
driver->move_notify(bo, mem);
443 ret = bdev->
driver->invalidate_caches(bdev, bo->
mem.placement);
445 pr_err(
"Can not flush read caches\n");
449 if (bo->
mem.mm_node) {
451 bdev->
man[bo->
mem.mem_type].gpu_offset;
459 new_man = &bdev->
man[bo->
mem.mem_type];
479 if (bo->
bdev->driver->move_notify)
480 bo->
bdev->driver->move_notify(bo,
NULL);
503 void *sync_obj =
NULL;
528 ttm_bo_cleanup_memtype_use(bo);
552 ((
HZ / 100) < 1) ? 1 :
HZ / 100);
567 bool no_wait_reserve,
577 ret =
ttm_bo_wait(bo,
false, interruptible, no_wait_gpu);
595 if (
likely(!no_wait_reserve))
625 ttm_bo_cleanup_memtype_use(bo);
637 static int ttm_bo_delayed_delete(
struct ttm_bo_device *bdev,
bool remove_all)
661 ret = ttm_bo_cleanup_refs(entry,
false, !remove_all,
663 kref_put(&entry->
list_kref, ttm_bo_release_list);
678 kref_put(&entry->
list_kref, ttm_bo_release_list);
687 if (ttm_bo_delayed_delete(bdev,
false)) {
689 ((
HZ / 100) < 1) ? 1 :
HZ / 100);
693 static void ttm_bo_release(
struct kref *
kref)
709 ttm_bo_cleanup_refs_or_queue(bo);
710 kref_put(&bo->
list_kref, ttm_bo_release_list);
721 kref_put(&bo->
kref, ttm_bo_release);
736 ((
HZ / 100) < 1) ? 1 :
HZ / 100);
741 bool no_wait_reserve,
bool no_wait_gpu)
749 ret =
ttm_bo_wait(bo,
false, interruptible, no_wait_gpu);
754 pr_err(
"Failed to expire sync object before buffer eviction\n");
762 evict_mem.mm_node =
NULL;
763 evict_mem.bus.io_reserved_vm =
false;
764 evict_mem.bus.io_reserved_count = 0;
770 bdev->
driver->evict_flags(bo, &placement);
772 no_wait_reserve, no_wait_gpu);
775 pr_err(
"Failed to find memory space for buffer 0x%p eviction\n",
777 ttm_bo_mem_space_debug(bo, &placement);
782 ret = ttm_bo_handle_move_mem(bo, &evict_mem,
true, interruptible,
783 no_wait_reserve, no_wait_gpu);
786 pr_err(
"Buffer eviction failed\n");
797 bool interruptible,
bool no_wait_reserve,
803 int ret, put_count = 0;
807 if (list_empty(&man->
lru)) {
817 ret = ttm_bo_cleanup_refs(bo, interruptible,
818 no_wait_reserve, no_wait_gpu);
819 kref_put(&bo->
list_kref, ttm_bo_release_list);
828 if (
likely(!no_wait_reserve))
831 kref_put(&bo->
list_kref, ttm_bo_release_list);
849 ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
852 kref_put(&bo->
list_kref, ttm_bo_release_list);
861 (*man->
func->put_node)(man, mem);
874 bool no_wait_reserve,
882 ret = (*man->
func->get_node)(man, bo, placement, mem);
887 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
888 no_wait_reserve, no_wait_gpu);
909 if ((cur_placement & caching) != 0)
910 result |= (cur_placement & caching);
928 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
938 *masked_placement = cur_flags;
953 bool interruptible,
bool no_wait_reserve,
960 bool type_found =
false;
961 bool type_ok =
false;
962 bool has_erestartsys =
false;
967 ret = ttm_mem_type_from_flags(placement->
placement[i],
973 type_ok = ttm_bo_mt_compatible(man,
981 cur_flags = ttm_bo_select_caching(man, bo->
mem.placement,
987 ttm_flag_masked(&cur_flags, placement->
placement[i],
995 ret = (*man->
func->get_node)(man, bo, placement, mem);
1020 if (!ttm_bo_mt_compatible(man,
1026 cur_flags = ttm_bo_select_caching(man, bo->
mem.placement,
1043 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
1044 interruptible, no_wait_reserve, no_wait_gpu);
1045 if (ret == 0 && mem->
mm_node) {
1050 has_erestartsys =
true;
1069 bool interruptible,
bool no_wait_reserve,
1084 ret =
ttm_bo_wait(bo,
false, interruptible, no_wait_gpu);
1091 mem.
bus.io_reserved_vm =
false;
1092 mem.
bus.io_reserved_count = 0;
1096 ret =
ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
1099 ret = ttm_bo_handle_move_mem(bo, &mem,
false, interruptible, no_wait_reserve, no_wait_gpu);
1106 static int ttm_bo_mem_compat(
struct ttm_placement *placement,
1118 TTM_PL_MASK_CACHING) &&
1128 bool interruptible,
bool no_wait_reserve,
1135 if (placement->
lpfn || placement->
fpfn)
1136 if (placement->
fpfn > placement->
lpfn ||
1142 ret = ttm_bo_mem_compat(placement, &bo->
mem);
1144 ret =
ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
1152 ttm_flag_masked(&bo->
mem.placement, placement->
placement[ret],
1159 ret = ttm_bo_add_ttm(bo,
true);
1171 (bo->
mem.num_pages > (placement->
lpfn - placement->
fpfn)));
1182 unsigned long buffer_start,
1184 struct file *persistent_swap_storage,
1195 pr_err(
"Out of kernel memory\n");
1205 if (num_pages == 0) {
1206 pr_err(
"Illegal buffer object size\n");
1216 kref_init(&bo->
kref);
1221 INIT_LIST_HEAD(&bo->
lru);
1223 INIT_LIST_HEAD(&bo->
swap);
1233 bo->
mem.page_alignment = page_alignment;
1234 bo->
mem.bus.io_reserved_vm =
false;
1235 bo->
mem.bus.io_reserved_count = 0;
1255 ret = ttm_bo_setup_vm(bo);
1276 unsigned long bo_size,
1277 unsigned struct_size)
1290 unsigned long bo_size,
1291 unsigned struct_size)
1309 unsigned long buffer_start,
1311 struct file *persistent_swap_storage,
1323 ret =
ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1324 buffer_start, interruptible,
1325 persistent_swap_storage, acc_size,
NULL,
NULL);
1333 static int ttm_bo_force_list_clean(
struct ttm_bo_device *bdev,
1334 unsigned mem_type,
bool allow_errors)
1345 while (!list_empty(&man->
lru)) {
1347 ret = ttm_mem_evict_first(bdev, mem_type,
false,
false,
false);
1352 pr_err(
"Cleanup eviction failed\n");
1367 pr_err(
"Illegal memory type %d\n", mem_type);
1373 pr_err(
"Trying to take down uninitialized memory manager type %u\n",
1383 ttm_bo_force_list_clean(bdev, mem_type,
false);
1385 ret = (*man->
func->takedown)(man);
1397 pr_err(
"Illegal memory manager memory type %u\n", mem_type);
1402 pr_err(
"Memory type %u has not been initialized\n", mem_type);
1406 return ttm_bo_force_list_clean(bdev, mem_type,
true);
1411 unsigned long p_size)
1424 ret = bdev->
driver->init_mem_type(bdev, type, man);
1431 ret = (*man->
func->init)(man, p_size);
1439 INIT_LIST_HEAD(&man->
lru);
1445 static void ttm_bo_global_kobj_release(
struct kobject *kobj)
1484 ttm_mem_init_shrink(&glob->
shrink, ttm_bo_swapout);
1487 pr_err(
"Could not register buffer object swapout\n");
1515 man = &bdev->
man[
i];
1520 pr_err(
"DRM memory manager type %d is not clean\n",
1533 while (ttm_bo_delayed_delete(bdev,
true))
1538 TTM_DEBUG(
"Delayed destroy list was clean\n");
1540 if (list_empty(&bdev->
man[0].lru))
1577 goto out_no_addr_mm;
1624 loff_t holelen = ((loff_t) bo->
mem.num_pages) <<
PAGE_SHIFT;
1652 unsigned long cur_offset;
1657 cur_offset = cur_bo->
vm_node->start;
1658 if (offset < cur_offset)
1660 else if (offset > cur_offset)
1666 rb_link_node(&bo->
vm_rb, parent, cur);
1693 bo->
mem.num_pages, 0, 0);
1701 bo->
mem.num_pages, 0);
1708 ttm_bo_vm_insert_rb(bo);
1719 bool lazy,
bool interruptible,
bool no_wait)
1749 lazy, interruptible);
1820 while (ret == -
EBUSY) {
1832 (
void) ttm_bo_cleanup_refs(bo,
false,
false,
false);
1833 kref_put(&bo->
list_kref, ttm_bo_release_list);
1848 kref_put(&bo->
list_kref, ttm_bo_release_list);
1863 spin_lock(&bo->
bdev->fence_lock);
1865 spin_unlock(&bo->
bdev->fence_lock);
1870 if ((bo->
mem.placement & swap_placement) != swap_placement) {
1873 evict_mem = bo->
mem;
1874 evict_mem.mm_node =
NULL;
1878 ret = ttm_bo_handle_move_mem(bo, &evict_mem,
true,
1879 false,
false,
false);
1891 if (bo->
bdev->driver->swap_notify)
1892 bo->
bdev->driver->swap_notify(bo);
1905 kref_put(&bo->
list_kref, ttm_bo_release_list);
1911 while (ttm_bo_swapout(&bdev->
glob->shrink) == 0)