Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
nvc0.c
Go to the documentation of this file.
1 /*
2  * Copyright 2010 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 
25 #include <core/device.h>
26 #include <core/gpuobj.h>
27 
28 #include <subdev/timer.h>
29 #include <subdev/fb.h>
30 #include <subdev/vm.h>
31 
35 };
36 
37 static void
38 nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
39  struct nouveau_gpuobj *pgt[2])
40 {
41  u32 pde[2] = { 0, 0 };
42 
43  if (pgt[0])
44  pde[1] = 0x00000001 | (pgt[0]->addr >> 8);
45  if (pgt[1])
46  pde[0] = 0x00000001 | (pgt[1]->addr >> 8);
47 
48  nv_wo32(pgd, (index * 8) + 0, pde[0]);
49  nv_wo32(pgd, (index * 8) + 4, pde[1]);
50 }
51 
52 static inline u64
53 nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
54 {
55  phys >>= 8;
56 
57  phys |= 0x00000001; /* present */
58  if (vma->access & NV_MEM_ACCESS_SYS)
59  phys |= 0x00000002;
60 
61  phys |= ((u64)target << 32);
62  phys |= ((u64)memtype << 36);
63 
64  return phys;
65 }
66 
67 static void
68 nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
69  struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
70 {
71  u32 next = 1 << (vma->node->type - 8);
72 
73  phys = nvc0_vm_addr(vma, phys, mem->memtype, 0);
74  pte <<= 3;
75  while (cnt--) {
76  nv_wo32(pgt, pte + 0, lower_32_bits(phys));
77  nv_wo32(pgt, pte + 4, upper_32_bits(phys));
78  phys += next;
79  pte += 8;
80  }
81 }
82 
83 static void
84 nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
85  struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
86 {
87  u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
88 
89  pte <<= 3;
90  while (cnt--) {
91  u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, target);
92  nv_wo32(pgt, pte + 0, lower_32_bits(phys));
93  nv_wo32(pgt, pte + 4, upper_32_bits(phys));
94  pte += 8;
95  }
96 }
97 
98 static void
99 nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
100 {
101  pte <<= 3;
102  while (cnt--) {
103  nv_wo32(pgt, pte + 0, 0x00000000);
104  nv_wo32(pgt, pte + 4, 0x00000000);
105  pte += 8;
106  }
107 }
108 
109 void
111 {
112  struct nvc0_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev);
113  unsigned long flags;
114 
115  /* looks like maybe a "free flush slots" counter, the
116  * faster you write to 0x100cbc to more it decreases
117  */
118  spin_lock_irqsave(&priv->lock, flags);
119  if (!nv_wait_ne(subdev, 0x100c80, 0x00ff0000, 0x00000000)) {
120  nv_error(subdev, "vm timeout 0: 0x%08x %d\n",
121  nv_rd32(subdev, 0x100c80), type);
122  }
123 
124  nv_wr32(subdev, 0x100cb8, addr >> 8);
125  nv_wr32(subdev, 0x100cbc, 0x80000000 | type);
126 
127  /* wait for flush to be queued? */
128  if (!nv_wait(subdev, 0x100c80, 0x00008000, 0x00008000)) {
129  nv_error(subdev, "vm timeout 1: 0x%08x %d\n",
130  nv_rd32(subdev, 0x100c80), type);
131  }
132  spin_unlock_irqrestore(&priv->lock, flags);
133 }
134 
135 static void
136 nvc0_vm_flush(struct nouveau_vm *vm)
137 {
138  struct nouveau_vm_pgd *vpgd;
139 
140  list_for_each_entry(vpgd, &vm->pgd_list, head) {
141  nvc0_vm_flush_engine(nv_subdev(vm->vmm), vpgd->obj->addr, 1);
142  }
143 }
144 
145 static int
146 nvc0_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
147  u64 mm_offset, struct nouveau_vm **pvm)
148 {
149  return nouveau_vm_create(vmm, offset, length, mm_offset, 4096, pvm);
150 }
151 
152 static int
153 nvc0_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
154  struct nouveau_oclass *oclass, void *data, u32 size,
155  struct nouveau_object **pobject)
156 {
157  struct nvc0_vmmgr_priv *priv;
158  int ret;
159 
160  ret = nouveau_vmmgr_create(parent, engine, oclass, "VM", "vm", &priv);
161  *pobject = nv_object(priv);
162  if (ret)
163  return ret;
164 
165  priv->base.limit = 1ULL << 40;
166  priv->base.dma_bits = 40;
167  priv->base.pgt_bits = 27 - 12;
168  priv->base.spg_shift = 12;
169  priv->base.lpg_shift = 17;
170  priv->base.create = nvc0_vm_create;
171  priv->base.map_pgt = nvc0_vm_map_pgt;
172  priv->base.map = nvc0_vm_map;
173  priv->base.map_sg = nvc0_vm_map_sg;
174  priv->base.unmap = nvc0_vm_unmap;
175  priv->base.flush = nvc0_vm_flush;
176  spin_lock_init(&priv->lock);
177  return 0;
178 }
179 
180 struct nouveau_oclass
182  .handle = NV_SUBDEV(VM, 0xc0),
183  .ofuncs = &(struct nouveau_ofuncs) {
184  .ctor = nvc0_vmmgr_ctor,
185  .dtor = _nouveau_vmmgr_dtor,
186  .init = _nouveau_vmmgr_init,
187  .fini = _nouveau_vmmgr_fini,
188  },
189 };