90 nv_wr32(priv, 0x00330c, 0x00000001);
91 if (!
nv_wait(priv, 0x00330c, 0x00000002, 0x00000000))
92 nv_warn(priv,
"flush timeout\n");
93 spin_unlock_irqrestore(&priv->
lock, flags);
102 nv_wr32(bar, 0x070000, 0x00000001);
103 if (!
nv_wait(priv, 0x070000, 0x00000002, 0x00000000))
104 nv_warn(priv,
"flush timeout\n");
105 spin_unlock_irqrestore(&priv->
lock, flags);
121 *pobject = nv_object(priv);
127 heap = nv_object(priv->
mem);
132 0x1400 : 0x0200, 0, 0, &priv->
pad);
141 start = 0x0100000000ULL;
151 vm->
pgt[0].refcount[0] = 1;
164 nv_wo32(priv->
bar3, 0x00, 0x7fc00000);
169 nv_wo32(priv->
bar3, 0x10, 0x00000000);
170 nv_wo32(priv->
bar3, 0x14, 0x00000000);
173 start = 0x0000000000ULL;
189 nv_wo32(priv->
bar1, 0x00, 0x7fc00000);
194 nv_wo32(priv->
bar1, 0x10, 0x00000000);
195 nv_wo32(priv->
bar1, 0x14, 0x00000000);
198 priv->
base.kmap = nv50_bar_kmap;
199 priv->
base.umap = nv50_bar_umap;
200 priv->
base.unmap = nv50_bar_unmap;
202 priv->
base.flush = nv50_bar_flush;
213 nouveau_gpuobj_ref(
NULL, &priv->
bar1);
215 nouveau_gpuobj_ref(
NULL, &priv->
bar3);
217 nouveau_gpuobj_ref(
NULL, &priv->
bar3_vm->pgt[0].obj[0]);
220 nouveau_gpuobj_ref(
NULL, &priv->
pgd);
221 nouveau_gpuobj_ref(
NULL, &priv->
pad);
222 nouveau_gpuobj_ref(
NULL, &priv->
mem);
236 nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
237 nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
240 nv_wr32(priv, 0x001704, 0x00000000 | priv->
mem->addr >> 12);
241 nv_wr32(priv, 0x001704, 0x40000000 | priv->
mem->addr >> 12);
242 nv_wr32(priv, 0x001708, 0x80000000 | priv->
bar1->node->offset >> 4);
243 nv_wr32(priv, 0x00170c, 0x80000000 | priv->
bar3->node->offset >> 4);
258 .ctor = nv50_bar_ctor,
259 .dtor = nv50_bar_dtor,
260 .init = nv50_bar_init,
261 .fini = nv50_bar_fini,