Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
base.c
Go to the documentation of this file.
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 
25 #include <core/object.h>
26 #include <core/device.h>
27 #include <core/client.h>
28 #include <core/device.h>
29 #include <core/option.h>
30 
31 #include <core/class.h>
32 
33 #include <subdev/device.h>
34 
35 static DEFINE_MUTEX(nv_devices_mutex);
36 static LIST_HEAD(nv_devices);
37 
38 struct nouveau_device *
40 {
41  struct nouveau_device *device, *match = NULL;
42  mutex_lock(&nv_devices_mutex);
43  list_for_each_entry(device, &nv_devices, head) {
44  if (device->handle == name) {
45  match = device;
46  break;
47  }
48  }
49  mutex_unlock(&nv_devices_mutex);
50  return match;
51 }
52 
53 /******************************************************************************
54  * nouveau_devobj (0x0080): class implementation
55  *****************************************************************************/
59  bool created;
60 };
61 
62 static const u64 disable_map[] = {
89  [NVDEV_SUBDEV_NR] = 0,
90 };
91 
92 static int
93 nouveau_devobj_ctor(struct nouveau_object *parent,
94  struct nouveau_object *engine,
95  struct nouveau_oclass *oclass, void *data, u32 size,
96  struct nouveau_object **pobject)
97 {
98  struct nouveau_client *client = nv_client(parent);
99  struct nouveau_device *device;
100  struct nouveau_devobj *devobj;
101  struct nv_device_class *args = data;
102  u64 disable, boot0, strap;
103  u64 mmio_base, mmio_size;
104  void __iomem *map;
105  int ret, i, c;
106 
107  if (size < sizeof(struct nv_device_class))
108  return -EINVAL;
109 
110  /* find the device subdev that matches what the client requested */
111  device = nv_device(client->device);
112  if (args->device != ~0) {
113  device = nouveau_device_find(args->device);
114  if (!device)
115  return -ENODEV;
116  }
117 
118  ret = nouveau_parent_create(parent, nv_object(device), oclass, 0, NULL,
119  (1ULL << NVDEV_ENGINE_DMAOBJ) |
120  (1ULL << NVDEV_ENGINE_FIFO) |
121  (1ULL << NVDEV_ENGINE_DISP), &devobj);
122  *pobject = nv_object(devobj);
123  if (ret)
124  return ret;
125 
126  mmio_base = pci_resource_start(device->pdev, 0);
127  mmio_size = pci_resource_len(device->pdev, 0);
128 
129  /* translate api disable mask into internal mapping */
130  disable = args->debug0;
131  for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
132  if (args->disable & disable_map[i])
133  disable |= (1ULL << i);
134  }
135 
136  /* identify the chipset, and determine classes of subdev/engines */
137  if (!(args->disable & NV_DEVICE_DISABLE_IDENTIFY) &&
138  !device->card_type) {
139  map = ioremap(mmio_base, 0x102000);
140  if (map == NULL)
141  return -ENOMEM;
142 
143  /* switch mmio to cpu's native endianness */
144 #ifndef __BIG_ENDIAN
145  if (ioread32_native(map + 0x000004) != 0x00000000)
146 #else
147  if (ioread32_native(map + 0x000004) == 0x00000000)
148 #endif
149  iowrite32_native(0x01000001, map + 0x000004);
150 
151  /* read boot0 and strapping information */
152  boot0 = ioread32_native(map + 0x000000);
153  strap = ioread32_native(map + 0x101000);
154  iounmap(map);
155 
156  /* determine chipset and derive architecture from it */
157  if ((boot0 & 0x0f000000) > 0) {
158  device->chipset = (boot0 & 0xff00000) >> 20;
159  switch (device->chipset & 0xf0) {
160  case 0x10: device->card_type = NV_10; break;
161  case 0x20: device->card_type = NV_20; break;
162  case 0x30: device->card_type = NV_30; break;
163  case 0x40:
164  case 0x60: device->card_type = NV_40; break;
165  case 0x50:
166  case 0x80:
167  case 0x90:
168  case 0xa0: device->card_type = NV_50; break;
169  case 0xc0: device->card_type = NV_C0; break;
170  case 0xd0: device->card_type = NV_D0; break;
171  case 0xe0: device->card_type = NV_E0; break;
172  default:
173  break;
174  }
175  } else
176  if ((boot0 & 0xff00fff0) == 0x20004000) {
177  if (boot0 & 0x00f00000)
178  device->chipset = 0x05;
179  else
180  device->chipset = 0x04;
181  device->card_type = NV_04;
182  }
183 
184  switch (device->card_type) {
185  case NV_04: ret = nv04_identify(device); break;
186  case NV_10: ret = nv10_identify(device); break;
187  case NV_20: ret = nv20_identify(device); break;
188  case NV_30: ret = nv30_identify(device); break;
189  case NV_40: ret = nv40_identify(device); break;
190  case NV_50: ret = nv50_identify(device); break;
191  case NV_C0:
192  case NV_D0: ret = nvc0_identify(device); break;
193  case NV_E0: ret = nve0_identify(device); break;
194  default:
195  ret = -EINVAL;
196  break;
197  }
198 
199  if (ret) {
200  nv_error(device, "unknown chipset, 0x%08x\n", boot0);
201  return ret;
202  }
203 
204  nv_info(device, "BOOT0 : 0x%08x\n", boot0);
205  nv_info(device, "Chipset: %s (NV%02X)\n",
206  device->cname, device->chipset);
207  nv_info(device, "Family : NV%02X\n", device->card_type);
208 
209  /* determine frequency of timing crystal */
210  if ( device->chipset < 0x17 ||
211  (device->chipset >= 0x20 && device->chipset <= 0x25))
212  strap &= 0x00000040;
213  else
214  strap &= 0x00400040;
215 
216  switch (strap) {
217  case 0x00000000: device->crystal = 13500; break;
218  case 0x00000040: device->crystal = 14318; break;
219  case 0x00400000: device->crystal = 27000; break;
220  case 0x00400040: device->crystal = 25000; break;
221  }
222 
223  nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
224  }
225 
226  if (!(args->disable & NV_DEVICE_DISABLE_MMIO) &&
227  !nv_subdev(device)->mmio) {
228  nv_subdev(device)->mmio = ioremap(mmio_base, mmio_size);
229  if (!nv_subdev(device)->mmio) {
230  nv_error(device, "unable to map device registers\n");
231  return -ENOMEM;
232  }
233  }
234 
235  /* ensure requested subsystems are available for use */
236  for (i = 0, c = 0; i < NVDEV_SUBDEV_NR; i++) {
237  if (!(oclass = device->oclass[i]) || (disable & (1ULL << i)))
238  continue;
239 
240  if (!device->subdev[i]) {
241  ret = nouveau_object_ctor(nv_object(device), NULL,
242  oclass, NULL, i,
243  &devobj->subdev[i]);
244  if (ret == -ENODEV)
245  continue;
246  if (ret)
247  return ret;
248 
249  if (nv_iclass(devobj->subdev[i], NV_ENGINE_CLASS))
250  nouveau_subdev_reset(devobj->subdev[i]);
251  } else {
252  nouveau_object_ref(device->subdev[i],
253  &devobj->subdev[i]);
254  }
255 
256  /* note: can't init *any* subdevs until devinit has been run
257  * due to not knowing exactly what the vbios init tables will
258  * mess with. devinit also can't be run until all of its
259  * dependencies have been created.
260  *
261  * this code delays init of any subdev until all of devinit's
262  * dependencies have been created, and then initialises each
263  * subdev in turn as they're created.
264  */
265  while (i >= NVDEV_SUBDEV_DEVINIT_LAST && c <= i) {
266  struct nouveau_object *subdev = devobj->subdev[c++];
267  if (subdev && !nv_iclass(subdev, NV_ENGINE_CLASS)) {
268  ret = nouveau_object_inc(subdev);
269  if (ret)
270  return ret;
271  }
272  }
273  }
274 
275  return 0;
276 }
277 
278 static void
279 nouveau_devobj_dtor(struct nouveau_object *object)
280 {
281  struct nouveau_devobj *devobj = (void *)object;
282  int i;
283 
284  for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
285  nouveau_object_ref(NULL, &devobj->subdev[i]);
286 
287  nouveau_parent_destroy(&devobj->base);
288 }
289 
290 static int
291 nouveau_devobj_init(struct nouveau_object *object)
292 {
293  struct nouveau_devobj *devobj = (void *)object;
294  struct nouveau_object *subdev;
295  int ret, i;
296 
297  ret = nouveau_parent_init(&devobj->base);
298  if (ret)
299  return ret;
300 
301  for (i = 0; devobj->created && i < NVDEV_SUBDEV_NR; i++) {
302  if ((subdev = devobj->subdev[i])) {
303  if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
304  ret = nouveau_object_inc(subdev);
305  if (ret)
306  goto fail;
307  }
308  }
309  }
310 
311  devobj->created = true;
312  return 0;
313 
314 fail:
315  for (--i; i >= 0; i--) {
316  if ((subdev = devobj->subdev[i])) {
317  if (!nv_iclass(subdev, NV_ENGINE_CLASS))
318  nouveau_object_dec(subdev, false);
319  }
320  }
321 
322  return ret;
323 }
324 
325 static int
326 nouveau_devobj_fini(struct nouveau_object *object, bool suspend)
327 {
328  struct nouveau_devobj *devobj = (void *)object;
329  struct nouveau_object *subdev;
330  int ret, i;
331 
332  for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
333  if ((subdev = devobj->subdev[i])) {
334  if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
335  ret = nouveau_object_dec(subdev, suspend);
336  if (ret && suspend)
337  goto fail;
338  }
339  }
340  }
341 
342  ret = nouveau_parent_fini(&devobj->base, suspend);
343 fail:
344  for (; ret && suspend && i < NVDEV_SUBDEV_NR; i++) {
345  if ((subdev = devobj->subdev[i])) {
346  if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
347  ret = nouveau_object_inc(subdev);
348  if (ret) {
349  /* XXX */
350  }
351  }
352  }
353  }
354 
355  return ret;
356 }
357 
358 static u8
359 nouveau_devobj_rd08(struct nouveau_object *object, u32 addr)
360 {
361  return nv_rd08(object->engine, addr);
362 }
363 
364 static u16
365 nouveau_devobj_rd16(struct nouveau_object *object, u32 addr)
366 {
367  return nv_rd16(object->engine, addr);
368 }
369 
370 static u32
371 nouveau_devobj_rd32(struct nouveau_object *object, u32 addr)
372 {
373  return nv_rd32(object->engine, addr);
374 }
375 
376 static void
377 nouveau_devobj_wr08(struct nouveau_object *object, u32 addr, u8 data)
378 {
379  nv_wr08(object->engine, addr, data);
380 }
381 
382 static void
383 nouveau_devobj_wr16(struct nouveau_object *object, u32 addr, u16 data)
384 {
385  nv_wr16(object->engine, addr, data);
386 }
387 
388 static void
389 nouveau_devobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
390 {
391  nv_wr32(object->engine, addr, data);
392 }
393 
394 static struct nouveau_ofuncs
395 nouveau_devobj_ofuncs = {
396  .ctor = nouveau_devobj_ctor,
397  .dtor = nouveau_devobj_dtor,
398  .init = nouveau_devobj_init,
399  .fini = nouveau_devobj_fini,
400  .rd08 = nouveau_devobj_rd08,
401  .rd16 = nouveau_devobj_rd16,
402  .rd32 = nouveau_devobj_rd32,
403  .wr08 = nouveau_devobj_wr08,
404  .wr16 = nouveau_devobj_wr16,
405  .wr32 = nouveau_devobj_wr32,
406 };
407 
408 /******************************************************************************
409  * nouveau_device: engine functions
410  *****************************************************************************/
411 struct nouveau_oclass
413  { 0x0080, &nouveau_devobj_ofuncs },
414  {}
415 };
416 
417 static void
418 nouveau_device_dtor(struct nouveau_object *object)
419 {
420  struct nouveau_device *device = (void *)object;
421 
422  mutex_lock(&nv_devices_mutex);
423  list_del(&device->head);
424  mutex_unlock(&nv_devices_mutex);
425 
426  if (device->base.mmio)
427  iounmap(device->base.mmio);
428 
429  nouveau_subdev_destroy(&device->base);
430 }
431 
432 static struct nouveau_oclass
433 nouveau_device_oclass = {
434  .handle = NV_SUBDEV(DEVICE, 0x00),
435  .ofuncs = &(struct nouveau_ofuncs) {
436  .dtor = nouveau_device_dtor,
437  },
438 };
439 
440 int
441 nouveau_device_create_(struct pci_dev *pdev, u64 name, const char *sname,
442  const char *cfg, const char *dbg,
443  int length, void **pobject)
444 {
445  struct nouveau_device *device;
446  int ret = -EEXIST;
447 
448  mutex_lock(&nv_devices_mutex);
449  list_for_each_entry(device, &nv_devices, head) {
450  if (device->handle == name)
451  goto done;
452  }
453 
454  ret = nouveau_subdev_create_(NULL, NULL, &nouveau_device_oclass, 0,
455  "DEVICE", "device", length, pobject);
456  device = *pobject;
457  if (ret)
458  goto done;
459 
460  atomic_set(&nv_object(device)->usecount, 2);
461  device->pdev = pdev;
462  device->handle = name;
463  device->cfgopt = cfg;
464  device->dbgopt = dbg;
465  device->name = sname;
466 
467  nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE");
468  list_add(&device->head, &nv_devices);
469 done:
470  mutex_unlock(&nv_devices_mutex);
471  return ret;
472 }