71 &rdev->
gart.table_addr);
109 (
void *)rdev->
gart.ptr,
110 rdev->
gart.table_addr);
112 rdev->
gart.table_addr = 0;
161 radeon_bo_unreserve(rdev->
gart.robj);
167 radeon_bo_unreserve(rdev->
gart.robj);
168 rdev->
gart.table_addr = gpu_addr;
191 radeon_bo_unreserve(rdev->
gart.robj);
235 if (!rdev->
gart.ready) {
236 WARN(1,
"trying to unbind memory from uninitialized GART !\n");
241 for (i = 0; i <
pages; i++, p++) {
242 if (rdev->
gart.pages[p]) {
245 page_base = rdev->
gart.pages_addr[
p];
247 if (rdev->
gart.ptr) {
279 if (!rdev->
gart.ready) {
280 WARN(1,
"trying to bind memory to uninitialized GART !\n");
286 for (i = 0; i <
pages; i++, p++) {
287 rdev->
gart.pages_addr[
p] = dma_addr[
i];
288 rdev->
gart.pages[
p] = pagelist[
i];
289 if (rdev->
gart.ptr) {
290 page_base = rdev->
gart.pages_addr[
p];
315 if (!rdev->
gart.ptr) {
318 for (i = 0, t = 0; i < rdev->
gart.num_cpu_pages; i++) {
319 page_base = rdev->
gart.pages_addr[
i];
341 if (rdev->
gart.pages) {
346 DRM_ERROR(
"Page size is smaller than GPU page size!\n");
355 DRM_INFO(
"GART: num cpu pages %u, num gpu pages %u\n",
356 rdev->
gart.num_cpu_pages, rdev->
gart.num_gpu_pages);
364 rdev->
gart.num_cpu_pages);
365 if (rdev->
gart.pages_addr ==
NULL) {
370 for (i = 0; i < rdev->
gart.num_cpu_pages; i++) {
385 if (rdev->
gart.pages && rdev->
gart.pages_addr && rdev->
gart.ready) {
389 rdev->
gart.ready =
false;
465 size = radeon_vm_directory_size(rdev);
472 dev_err(rdev->
dev,
"failed to allocate vm bo (%dKB)\n",
494 bo_va->
valid =
false;
519 list_del_init(&vm->
list);
523 bo_va->
valid =
false;
529 for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
554 radeon_vm_free_pt(rdev, vm);
592 radeon_vm_free_pt(rdev, vm_evict);
610 unsigned pd_size, pts_size;
628 r = radeon_vm_evict(rdev, vm);
641 memset(pd_addr, 0, pd_size);
643 pts_size = radeon_vm_num_pdes(rdev) *
sizeof(
struct radeon_sa_bo *);
647 DRM_ERROR(
"Cannot allocate memory for page table array\n");
667 list_del_init(&vm->
list);
687 unsigned choices[2] = {};
707 if (radeon_fence_is_earlier(fence, best[fence->
ring])) {
708 best[fence->
ring] = fence;
709 choices[fence->
ring == ring ? 0 : 1] =
i;
713 for (i = 0; i < 2; ++
i) {
766 if (bo_va->
vm == vm) {
801 bo_va->
valid =
false;
803 INIT_LIST_HEAD(&bo_va->
bo_list);
804 INIT_LIST_HEAD(&bo_va->
vm_list);
842 eoffset = soffset +
size;
843 if (soffset >= eoffset) {
849 dev_err(rdev->
dev,
"va above limit (0x%08X > 0x%08X)\n",
855 eoffset = last_pfn = 0;
867 if (soffset >= last_offset && eoffset <= tmp->soffset) {
871 if (eoffset > tmp->
soffset && soffset < tmp->eoffset) {
873 dev_err(rdev->
dev,
"bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
886 bo_va->
valid =
false;
887 list_move(&bo_va->
vm_list, head);
936 uint64_t last_pde = ~0, last_pt = ~0;
945 for (pt_idx = start; pt_idx <=
end; ++pt_idx) {
958 r = radeon_vm_evict(rdev, vm);
968 pt = radeon_sa_bo_gpu_addr(vm->
page_tables[pt_idx]);
970 if (((last_pde + 8 * count) != pde) ||
971 ((last_pt + incr * count) != pt)) {
975 last_pt, count, incr,
1010 static void radeon_vm_update_ptes(
struct radeon_device *rdev,
1017 uint64_t last_pte = ~0, last_dst = ~0;
1025 for (addr = start; addr <
end; ) {
1030 if ((addr & ~mask) == (end & ~mask))
1035 pte = radeon_sa_bo_gpu_addr(vm->
page_tables[pt_idx]);
1036 pte += (addr &
mask) * 8;
1038 if ((last_pte + 8 * count) !=
pte) {
1082 unsigned ridx = rdev->
asic->vm.pt_ring_index;
1086 unsigned nptes, npdes, ndw;
1095 if (bo_va ==
NULL) {
1096 dev_err(rdev->
dev,
"bo %p not in vm %p\n", bo, vm);
1101 dev_err(rdev->
dev,
"bo %p don't has a mapping in vm %p\n",
1115 bo_va->
valid =
true;
1124 bo_va->
valid =
false;
1138 nptes = radeon_bo_ngpu_pages(bo);
1147 if (RADEON_VM_BLOCK_SIZE > 11)
1149 ndw += (nptes >> 11) * 4;
1159 ndw += (npdes >> 11) * 4;
1174 r = radeon_vm_update_pdes(rdev, vm, bo_va->
soffset, bo_va->
eoffset);
1181 addr, bo_va->
flags);
1242 bo_va->
valid =
false;
1259 INIT_LIST_HEAD(&vm->
list);
1260 INIT_LIST_HEAD(&vm->
va);
1279 radeon_vm_free_pt(rdev, vm);
1282 if (!list_empty(&vm->
va)) {
1283 dev_err(rdev->
dev,
"still active bo inside vm\n");
1286 list_del_init(&bo_va->
vm_list);
1289 list_del_init(&bo_va->
bo_list);
1290 radeon_bo_unreserve(bo_va->
bo);