53 int i = reg - drm->
tile.reg;
61 pfb->
tile.fini(pfb, i, tile);
64 pfb->
tile.init(pfb, i, addr, size, pitch, flags, tile);
66 pfb->
tile.prog(pfb, i, tile);
75 nv10_bo_get_tile_region(
struct drm_device *dev,
int i)
80 spin_lock(&drm->
tile.lock);
88 spin_unlock(&drm->
tile.lock);
99 spin_lock(&drm->
tile.lock);
107 spin_unlock(&drm->
tile.lock);
120 for (i = 0; i < pfb->
tile.regions; i++) {
121 tile = nv10_bo_get_tile_region(dev, i);
123 if (pitch && !found) {
127 }
else if (tile && pfb->
tile.region[i].pitch) {
129 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
132 nv10_bo_put_tile_region(dev, tile,
NULL);
136 nv10_bo_update_tile_region(dev, found, addr, size,
149 DRM_ERROR(
"bo %p still attached to GEM object\n", bo);
150 nv10_bo_put_tile_region(dev, nvbo->
tile,
NULL);
156 int *
align,
int *size)
167 }
else if (device->
chipset >= 0x30) {
171 }
else if (device->
chipset >= 0x20) {
175 }
else if (device->
chipset >= 0x10) {
206 INIT_LIST_HEAD(&nvbo->
head);
207 INIT_LIST_HEAD(&nvbo->
entry);
211 nvbo->
bo.bdev = &drm->
ttm.bdev;
214 if (drm->
client.base.vm) {
219 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
228 align >> PAGE_SHIFT, 0,
false,
NULL, acc_size, sg,
245 pl[(*n)++] = TTM_PL_FLAG_VRAM |
flags;
247 pl[(*n)++] = TTM_PL_FLAG_TT |
flags;
249 pl[(*n)++] = TTM_PL_FLAG_SYSTEM |
flags;
259 if (nv_device(drm->
device)->card_type == NV_10 &&
260 nvbo->
tile_mode && (type & TTM_PL_FLAG_VRAM) &&
261 nvbo->
bo.mem.num_pages < vram_pages / 4) {
293 set_placement_range(nvbo, type);
303 if (nvbo->
pin_refcnt && !(memtype & (1 << bo->
mem.mem_type))) {
304 NV_ERROR(drm,
"bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
305 1 << bo->
mem.mem_type, memtype);
320 switch (bo->
mem.mem_type) {
322 drm->
gem.vram_available -= bo->
mem.size;
325 drm->
gem.gart_available -= bo->
mem.size;
356 switch (bo->
mem.mem_type) {
358 drm->
gem.vram_available += bo->
mem.size;
361 drm->
gem.gart_available += bo->
mem.size;
395 bool no_wait_reserve,
bool no_wait_gpu)
400 no_wait_reserve, no_wait_gpu);
411 u16 *
mem = ttm_kmap_obj_virtual(&nvbo->
kmap, &is_iomem);
423 u16 *
mem = ttm_kmap_obj_virtual(&nvbo->
kmap, &is_iomem);
435 u32 *
mem = ttm_kmap_obj_virtual(&nvbo->
kmap, &is_iomem);
447 u32 *
mem = ttm_kmap_obj_virtual(&nvbo->
kmap, &is_iomem);
464 return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
465 page_flags, dummy_read);
492 if (nv_device(drm->
device)->card_type >= NV_50) {
506 if (nv_device(drm->
device)->card_type >= NV_50)
538 switch (bo->
mem.mem_type) {
559 bool no_wait_reserve,
bool no_wait_gpu,
570 no_wait_reserve, no_wait_gpu, new_mem);
578 int ret = RING_SPACE(chan, 2);
592 int ret = RING_SPACE(chan, 10);
603 BEGIN_IMC0(chan,
NvSubCopy, 0x0300, 0x0386);
611 int ret = RING_SPACE(chan, 2);
624 u64 src_offset = node->
vma[0].offset;
625 u64 dst_offset = node->
vma[1].offset;
631 int line_count = (page_count > 8191) ? 8191 : page_count;
633 ret = RING_SPACE(chan, 11);
649 page_count -= line_count;
662 u64 src_offset = node->
vma[0].offset;
663 u64 dst_offset = node->
vma[1].offset;
669 int line_count = (page_count > 2047) ? 2047 : page_count;
671 ret = RING_SPACE(chan, 12);
688 page_count -= line_count;
701 u64 src_offset = node->
vma[0].offset;
702 u64 dst_offset = node->
vma[1].offset;
708 int line_count = (page_count > 8191) ? 8191 : page_count;
710 ret = RING_SPACE(chan, 11);
726 page_count -= line_count;
739 int ret = RING_SPACE(chan, 7);
757 int ret = RING_SPACE(chan, 7);
773 int ret = RING_SPACE(chan, 6);
793 u64 src_offset = node->
vma[0].offset;
794 u64 dst_offset = node->
vma[1].offset;
800 amount =
min(length, (
u64)(4 * 1024 * 1024));
802 height = amount / stride;
806 ret = RING_SPACE(chan, 8);
819 ret = RING_SPACE(chan, 2);
828 ret = RING_SPACE(chan, 8);
841 ret = RING_SPACE(chan, 2);
849 ret = RING_SPACE(chan, 14);
869 src_offset += amount;
870 dst_offset += amount;
879 int ret = RING_SPACE(chan, 4);
908 ret = RING_SPACE(chan, 3);
913 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
914 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
918 int line_count = (page_count > 2047) ? 2047 : page_count;
920 ret = RING_SPACE(chan, 11);
937 page_count -= line_count;
968 bool no_wait_reserve,
bool no_wait_gpu,
983 if (nv_device(drm->
device)->card_type >= NV_50) {
986 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->
vma[0]);
990 ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->
vma[1]);
995 ret = drm->
ttm.move(chan, bo, &bo->
mem, new_mem);
997 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
999 no_wait_gpu, new_mem);
1010 static const struct {
1019 {
"COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1020 {
"GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1021 {
"COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1022 {
"COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1023 {
"COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1024 {
"CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1025 {
"M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1026 {
"M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1027 {
"M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
1029 {
"CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
1030 }, *mthd = _methods;
1031 const char *
name =
"CPU";
1037 u32 handle = (mthd->engine << 16) | mthd->oclass;
1039 if (mthd->init == nve0_bo_move_init)
1047 mthd->oclass,
NULL, 0, &
object);
1049 ret = mthd->init(chan, handle);
1056 drm->
ttm.move = mthd->exec;
1060 }
while ((++mthd)->
exec);
1062 NV_INFO(drm,
"MM: using %s for buffer copies\n", name);
1067 bool no_wait_reserve,
bool no_wait_gpu,
1089 ret = nouveau_bo_move_m2mf(bo,
true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
1093 ret =
ttm_bo_move_ttm(bo,
true, no_wait_reserve, no_wait_gpu, new_mem);
1101 bool no_wait_reserve,
bool no_wait_gpu,
1119 ret =
ttm_bo_move_ttm(bo,
true, no_wait_reserve, no_wait_gpu, &tmp_mem);
1123 ret = nouveau_bo_move_m2mf(bo,
true, intr, no_wait_reserve, no_wait_gpu, new_mem);
1139 if (bo->
destroy != nouveau_bo_del_ttm)
1175 if (nv_device(drm->
device)->card_type >= NV_10) {
1176 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->
size,
1192 nv10_bo_put_tile_region(dev, *old_tile, bo->
sync_obj);
1193 *old_tile = new_tile;
1198 bool no_wait_reserve,
bool no_wait_gpu,
1207 if (nv_device(drm->
device)->card_type < NV_50) {
1208 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1222 if (!drm->
ttm.move) {
1229 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
1231 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
1233 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
1242 if (nv_device(drm->
device)->card_type < NV_50) {
1244 nouveau_bo_vm_cleanup(bo,
NULL, &new_tile);
1246 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->
tile);
1267 mem->
bus.offset = 0;
1270 mem->
bus.is_iomem =
false;
1281 mem->
bus.base = drm->
agp.base;
1282 mem->
bus.is_iomem =
true;
1289 mem->
bus.is_iomem =
true;
1290 if (nv_device(drm->
device)->card_type >= NV_50) {
1339 if (bo->
mem.start + bo->
mem.num_pages < mappable)
1350 nouveau_ttm_tt_populate(
struct ttm_tt *ttm)
1359 if (ttm->
state != tt_unpopulated)
1362 if (slave && ttm->
sg) {
1366 ttm->
state = tt_unbound;
1370 drm = nouveau_bdev(ttm->
bdev);
1375 return ttm_agp_tt_populate(ttm);
1379 #ifdef CONFIG_SWIOTLB
1394 if (pci_dma_mapping_error(dev->pdev, ttm_dma->
dma_address[i])) {
1396 pci_unmap_page(dev->pdev, ttm_dma->
dma_address[i],
1408 nouveau_ttm_tt_unpopulate(
struct ttm_tt *ttm)
1419 drm = nouveau_bdev(ttm->
bdev);
1424 ttm_agp_tt_unpopulate(ttm);
1429 #ifdef CONFIG_SWIOTLB
1438 pci_unmap_page(dev->pdev, ttm_dma->
dma_address[i],
1454 spin_lock(&nvbo->
bo.bdev->fence_lock);
1455 old_fence = nvbo->
bo.sync_obj;
1456 nvbo->
bo.sync_obj = fence;
1457 spin_unlock(&nvbo->
bo.bdev->fence_lock);
1463 nouveau_bo_fence_unref(
void **sync_obj)
1469 nouveau_bo_fence_ref(
void *sync_obj)
1475 nouveau_bo_fence_signalled(
void *sync_obj,
void *sync_arg)
1481 nouveau_bo_fence_wait(
void *sync_obj,
void *sync_arg,
bool lazy,
bool intr)
1487 nouveau_bo_fence_flush(
void *sync_obj,
void *sync_arg)
1493 .ttm_tt_create = &nouveau_ttm_tt_create,
1494 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1495 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1496 .invalidate_caches = nouveau_bo_invalidate_caches,
1497 .init_mem_type = nouveau_bo_init_mem_type,
1498 .evict_flags = nouveau_bo_evict_flags,
1499 .move_notify = nouveau_bo_move_ntfy,
1500 .move = nouveau_bo_move,
1501 .verify_access = nouveau_bo_verify_access,
1502 .sync_obj_signaled = nouveau_bo_fence_signalled,
1503 .sync_obj_wait = nouveau_bo_fence_wait,
1504 .sync_obj_flush = nouveau_bo_fence_flush,
1505 .sync_obj_unref = nouveau_bo_fence_unref,
1506 .sync_obj_ref = nouveau_bo_fence_ref,
1507 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1508 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1509 .io_mem_free = &nouveau_ttm_io_mem_free,
1556 spin_lock(&nvbo->
bo.bdev->fence_lock);
1558 spin_unlock(&nvbo->
bo.bdev->fence_lock);