41 u64 phys = 0xdeadcafe00000000ULL;
45 phys = 0x00000003 | pgt[0]->
addr;
46 coverage = (pgt[0]->
size >> 3) << 12;
49 phys = 0x00000001 | pgt[1]->
addr;
50 coverage = (pgt[1]->
size >> 3) << 16;
54 if (coverage <= 32 * 1024 * 1024)
56 else if (coverage <= 64 * 1024 * 1024)
58 else if (coverage <= 128 * 1024 * 1024)
70 phys |= (
u64)memtype << 40;
94 phys = vm_addr(vma, phys, mem->
memtype, target);
102 for (i = 7; i >= 0; i--) {
103 block = 1 << (i + 3);
104 if (cnt >= block && !(pte & (block - 1)))
107 offset_l |= (i << 7);
109 phys += block << (vma->
node->type - 3);
112 u32 tag = mem->
tag->offset + ((delta >> 16) * comp);
113 offset_h |= (tag << 17);
114 delta += block << (vma->
node->type - 3);
118 nv_wo32(pgt, pte + 0, offset_l);
119 nv_wo32(pgt, pte + 4, offset_h);
145 nv_wo32(pgt, pte + 0, 0x00000000);
146 nv_wo32(pgt, pte + 4, 0x00000000);
173 nv_wr32(subdev, 0x100c80, (engine << 16) | 1);
174 if (!
nv_wait(subdev, 0x100c80, 0x00000001, 0x00000000))
175 nv_error(subdev,
"vm flush timeout: engine %d\n", engine);
176 spin_unlock_irqrestore(&priv->
lock, flags);
199 *pobject = nv_object(priv);
203 priv->
base.limit = 1ULL << 40;
204 priv->
base.dma_bits = 40;
205 priv->
base.pgt_bits = 29 - 12;
206 priv->
base.spg_shift = 12;
207 priv->
base.lpg_shift = 16;
208 priv->
base.create = nv50_vm_create;
209 priv->
base.map_pgt = nv50_vm_map_pgt;
210 priv->
base.map = nv50_vm_map;
211 priv->
base.map_sg = nv50_vm_map_sg;
212 priv->
base.unmap = nv50_vm_unmap;
213 priv->
base.flush = nv50_vm_flush;
222 .ctor = nv50_vmmgr_ctor,