41 u32 pde[2] = { 0, 0 };
44 pde[1] = 0x00000001 | (pgt[0]->
addr >> 8);
46 pde[0] = 0x00000001 | (pgt[1]->
addr >> 8);
48 nv_wo32(pgd, (index * 8) + 0, pde[0]);
49 nv_wo32(pgd, (index * 8) + 4, pde[1]);
61 phys |= ((
u64)target << 32);
62 phys |= ((
u64)memtype << 36);
73 phys = nvc0_vm_addr(vma, phys, mem->
memtype, 0);
91 u64 phys = nvc0_vm_addr(vma, *list++, mem->
memtype, target);
103 nv_wo32(pgt, pte + 0, 0x00000000);
104 nv_wo32(pgt, pte + 4, 0x00000000);
119 if (!
nv_wait_ne(subdev, 0x100c80, 0x00ff0000, 0x00000000)) {
120 nv_error(subdev,
"vm timeout 0: 0x%08x %d\n",
121 nv_rd32(subdev, 0x100c80), type);
124 nv_wr32(subdev, 0x100cb8, addr >> 8);
125 nv_wr32(subdev, 0x100cbc, 0x80000000 | type);
128 if (!
nv_wait(subdev, 0x100c80, 0x00008000, 0x00008000)) {
129 nv_error(subdev,
"vm timeout 1: 0x%08x %d\n",
130 nv_rd32(subdev, 0x100c80), type);
132 spin_unlock_irqrestore(&priv->
lock, flags);
161 *pobject = nv_object(priv);
165 priv->
base.limit = 1ULL << 40;
166 priv->
base.dma_bits = 40;
167 priv->
base.pgt_bits = 27 - 12;
168 priv->
base.spg_shift = 12;
169 priv->
base.lpg_shift = 17;
170 priv->
base.create = nvc0_vm_create;
171 priv->
base.map_pgt = nvc0_vm_map_pgt;
172 priv->
base.map = nvc0_vm_map;
173 priv->
base.map_sg = nvc0_vm_map_sg;
174 priv->
base.unmap = nvc0_vm_unmap;
175 priv->
base.flush = nvc0_vm_flush;
184 .ctor = nvc0_vmmgr_ctor,