Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vmwgfx_drv.c
Go to the documentation of this file.
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include <linux/module.h>
28 
29 #include <drm/drmP.h>
30 #include "vmwgfx_drv.h"
31 #include <drm/ttm/ttm_placement.h>
32 #include <drm/ttm/ttm_bo_driver.h>
33 #include <drm/ttm/ttm_object.h>
34 #include <drm/ttm/ttm_module.h>
35 
36 #define VMWGFX_DRIVER_NAME "vmwgfx"
37 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
38 #define VMWGFX_CHIP_SVGAII 0
39 #define VMW_FB_RESERVATION 0
40 
41 #define VMW_MIN_INITIAL_WIDTH 800
42 #define VMW_MIN_INITIAL_HEIGHT 600
43 
44 
49 #define DRM_IOCTL_VMW_GET_PARAM \
50  DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
51  struct drm_vmw_getparam_arg)
52 #define DRM_IOCTL_VMW_ALLOC_DMABUF \
53  DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
54  union drm_vmw_alloc_dmabuf_arg)
55 #define DRM_IOCTL_VMW_UNREF_DMABUF \
56  DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
57  struct drm_vmw_unref_dmabuf_arg)
58 #define DRM_IOCTL_VMW_CURSOR_BYPASS \
59  DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
60  struct drm_vmw_cursor_bypass_arg)
61 
62 #define DRM_IOCTL_VMW_CONTROL_STREAM \
63  DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
64  struct drm_vmw_control_stream_arg)
65 #define DRM_IOCTL_VMW_CLAIM_STREAM \
66  DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
67  struct drm_vmw_stream_arg)
68 #define DRM_IOCTL_VMW_UNREF_STREAM \
69  DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
70  struct drm_vmw_stream_arg)
71 
72 #define DRM_IOCTL_VMW_CREATE_CONTEXT \
73  DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
74  struct drm_vmw_context_arg)
75 #define DRM_IOCTL_VMW_UNREF_CONTEXT \
76  DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
77  struct drm_vmw_context_arg)
78 #define DRM_IOCTL_VMW_CREATE_SURFACE \
79  DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
80  union drm_vmw_surface_create_arg)
81 #define DRM_IOCTL_VMW_UNREF_SURFACE \
82  DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
83  struct drm_vmw_surface_arg)
84 #define DRM_IOCTL_VMW_REF_SURFACE \
85  DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
86  union drm_vmw_surface_reference_arg)
87 #define DRM_IOCTL_VMW_EXECBUF \
88  DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
89  struct drm_vmw_execbuf_arg)
90 #define DRM_IOCTL_VMW_GET_3D_CAP \
91  DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
92  struct drm_vmw_get_3d_cap_arg)
93 #define DRM_IOCTL_VMW_FENCE_WAIT \
94  DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
95  struct drm_vmw_fence_wait_arg)
96 #define DRM_IOCTL_VMW_FENCE_SIGNALED \
97  DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
98  struct drm_vmw_fence_signaled_arg)
99 #define DRM_IOCTL_VMW_FENCE_UNREF \
100  DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
101  struct drm_vmw_fence_arg)
102 #define DRM_IOCTL_VMW_FENCE_EVENT \
103  DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
104  struct drm_vmw_fence_event_arg)
105 #define DRM_IOCTL_VMW_PRESENT \
106  DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
107  struct drm_vmw_present_arg)
108 #define DRM_IOCTL_VMW_PRESENT_READBACK \
109  DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
110  struct drm_vmw_present_readback_arg)
111 #define DRM_IOCTL_VMW_UPDATE_LAYOUT \
112  DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
113  struct drm_vmw_update_layout_arg)
114 
120 #define VMW_IOCTL_DEF(ioctl, func, flags) \
121  [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
122 
127 static struct drm_ioctl_desc vmw_ioctls[] = {
128  VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
129  DRM_AUTH | DRM_UNLOCKED),
130  VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
131  DRM_AUTH | DRM_UNLOCKED),
132  VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
133  DRM_AUTH | DRM_UNLOCKED),
134  VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
136  DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
137 
138  VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
139  DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
140  VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
141  DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
142  VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
143  DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
144 
145  VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
146  DRM_AUTH | DRM_UNLOCKED),
147  VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
148  DRM_AUTH | DRM_UNLOCKED),
149  VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
150  DRM_AUTH | DRM_UNLOCKED),
151  VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
152  DRM_AUTH | DRM_UNLOCKED),
154  DRM_AUTH | DRM_UNLOCKED),
155  VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
156  DRM_AUTH | DRM_UNLOCKED),
157  VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
158  DRM_AUTH | DRM_UNLOCKED),
159  VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
161  DRM_AUTH | DRM_UNLOCKED),
162  VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
163  DRM_AUTH | DRM_UNLOCKED),
164  VMW_IOCTL_DEF(VMW_FENCE_EVENT,
166  DRM_AUTH | DRM_UNLOCKED),
167  VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
168  DRM_AUTH | DRM_UNLOCKED),
169 
170  /* these allow direct access to the framebuffers mark as master only */
171  VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
172  DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
173  VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
175  DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
176  VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
178  DRM_MASTER | DRM_UNLOCKED),
179 };
180 
181 static struct pci_device_id vmw_pci_id_list[] = {
182  {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
183  {0, 0, 0}
184 };
185 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
186 
187 static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
188 
189 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
190 static void vmw_master_init(struct vmw_master *);
191 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
192  void *ptr);
193 
194 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
195 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
196 
197 static void vmw_print_capabilities(uint32_t capabilities)
198 {
199  DRM_INFO("Capabilities:\n");
200  if (capabilities & SVGA_CAP_RECT_COPY)
201  DRM_INFO(" Rect copy.\n");
202  if (capabilities & SVGA_CAP_CURSOR)
203  DRM_INFO(" Cursor.\n");
204  if (capabilities & SVGA_CAP_CURSOR_BYPASS)
205  DRM_INFO(" Cursor bypass.\n");
206  if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
207  DRM_INFO(" Cursor bypass 2.\n");
208  if (capabilities & SVGA_CAP_8BIT_EMULATION)
209  DRM_INFO(" 8bit emulation.\n");
210  if (capabilities & SVGA_CAP_ALPHA_CURSOR)
211  DRM_INFO(" Alpha cursor.\n");
212  if (capabilities & SVGA_CAP_3D)
213  DRM_INFO(" 3D.\n");
214  if (capabilities & SVGA_CAP_EXTENDED_FIFO)
215  DRM_INFO(" Extended Fifo.\n");
216  if (capabilities & SVGA_CAP_MULTIMON)
217  DRM_INFO(" Multimon.\n");
218  if (capabilities & SVGA_CAP_PITCHLOCK)
219  DRM_INFO(" Pitchlock.\n");
220  if (capabilities & SVGA_CAP_IRQMASK)
221  DRM_INFO(" Irq mask.\n");
222  if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
223  DRM_INFO(" Display Topology.\n");
224  if (capabilities & SVGA_CAP_GMR)
225  DRM_INFO(" GMR.\n");
226  if (capabilities & SVGA_CAP_TRACES)
227  DRM_INFO(" Traces.\n");
228  if (capabilities & SVGA_CAP_GMR2)
229  DRM_INFO(" GMR2.\n");
230  if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
231  DRM_INFO(" Screen Object 2.\n");
232 }
233 
234 
248 static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
249 {
250  struct ttm_bo_kmap_obj map;
251  volatile SVGA3dQueryResult *result;
252  bool dummy;
253  int ret;
254  struct ttm_bo_device *bdev = &dev_priv->bdev;
255  struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
256 
257  ttm_bo_reserve(bo, false, false, false, 0);
258  spin_lock(&bdev->fence_lock);
259  ret = ttm_bo_wait(bo, false, false, false);
260  spin_unlock(&bdev->fence_lock);
261  if (unlikely(ret != 0))
262  (void) vmw_fallback_wait(dev_priv, false, true, 0, false,
263  10*HZ);
264 
265  ret = ttm_bo_kmap(bo, 0, 1, &map);
266  if (likely(ret == 0)) {
267  result = ttm_kmap_obj_virtual(&map, &dummy);
268  result->totalSize = sizeof(*result);
269  result->state = SVGA3D_QUERYSTATE_PENDING;
270  result->result32 = 0xff;
271  ttm_bo_kunmap(&map);
272  } else
273  DRM_ERROR("Dummy query buffer map failed.\n");
274  ttm_bo_unreserve(bo);
275 }
276 
277 
289 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
290 {
291  return ttm_bo_create(&dev_priv->bdev,
292  PAGE_SIZE,
295  0, 0, false, NULL,
296  &dev_priv->dummy_query_bo);
297 }
298 
299 
300 static int vmw_request_device(struct vmw_private *dev_priv)
301 {
302  int ret;
303 
304  ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
305  if (unlikely(ret != 0)) {
306  DRM_ERROR("Unable to initialize FIFO.\n");
307  return ret;
308  }
309  vmw_fence_fifo_up(dev_priv->fman);
310  ret = vmw_dummy_query_bo_create(dev_priv);
311  if (unlikely(ret != 0))
312  goto out_no_query_bo;
313  vmw_dummy_query_bo_prepare(dev_priv);
314 
315  return 0;
316 
317 out_no_query_bo:
318  vmw_fence_fifo_down(dev_priv->fman);
319  vmw_fifo_release(dev_priv, &dev_priv->fifo);
320  return ret;
321 }
322 
323 static void vmw_release_device(struct vmw_private *dev_priv)
324 {
325  /*
326  * Previous destructions should've released
327  * the pinned bo.
328  */
329 
330  BUG_ON(dev_priv->pinned_bo != NULL);
331 
332  ttm_bo_unref(&dev_priv->dummy_query_bo);
333  vmw_fence_fifo_down(dev_priv->fman);
334  vmw_fifo_release(dev_priv, &dev_priv->fifo);
335 }
336 
343 int vmw_3d_resource_inc(struct vmw_private *dev_priv,
344  bool unhide_svga)
345 {
346  int ret = 0;
347 
348  mutex_lock(&dev_priv->release_mutex);
349  if (unlikely(dev_priv->num_3d_resources++ == 0)) {
350  ret = vmw_request_device(dev_priv);
351  if (unlikely(ret != 0))
352  --dev_priv->num_3d_resources;
353  } else if (unhide_svga) {
354  mutex_lock(&dev_priv->hw_mutex);
355  vmw_write(dev_priv, SVGA_REG_ENABLE,
356  vmw_read(dev_priv, SVGA_REG_ENABLE) &
358  mutex_unlock(&dev_priv->hw_mutex);
359  }
360 
361  mutex_unlock(&dev_priv->release_mutex);
362  return ret;
363 }
364 
373 void vmw_3d_resource_dec(struct vmw_private *dev_priv,
374  bool hide_svga)
375 {
376  int32_t n3d;
377 
378  mutex_lock(&dev_priv->release_mutex);
379  if (unlikely(--dev_priv->num_3d_resources == 0))
380  vmw_release_device(dev_priv);
381  else if (hide_svga) {
382  mutex_lock(&dev_priv->hw_mutex);
383  vmw_write(dev_priv, SVGA_REG_ENABLE,
384  vmw_read(dev_priv, SVGA_REG_ENABLE) |
386  mutex_unlock(&dev_priv->hw_mutex);
387  }
388 
389  n3d = (int32_t) dev_priv->num_3d_resources;
390  mutex_unlock(&dev_priv->release_mutex);
391 
392  BUG_ON(n3d < 0);
393 }
394 
404 static void vmw_get_initial_size(struct vmw_private *dev_priv)
405 {
406  uint32_t width;
408 
409  width = vmw_read(dev_priv, SVGA_REG_WIDTH);
410  height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
411 
412  width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
413  height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
414 
415  if (width > dev_priv->fb_max_width ||
416  height > dev_priv->fb_max_height) {
417 
418  /*
419  * This is a host error and shouldn't occur.
420  */
421 
422  width = VMW_MIN_INITIAL_WIDTH;
423  height = VMW_MIN_INITIAL_HEIGHT;
424  }
425 
426  dev_priv->initial_width = width;
427  dev_priv->initial_height = height;
428 }
429 
430 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
431 {
432  struct vmw_private *dev_priv;
433  int ret;
434  uint32_t svga_id;
435 
436  dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
437  if (unlikely(dev_priv == NULL)) {
438  DRM_ERROR("Failed allocating a device private struct.\n");
439  return -ENOMEM;
440  }
441 
442  pci_set_master(dev->pdev);
443 
444  dev_priv->dev = dev;
445  dev_priv->vmw_chipset = chipset;
446  dev_priv->last_read_seqno = (uint32_t) -100;
447  mutex_init(&dev_priv->hw_mutex);
448  mutex_init(&dev_priv->cmdbuf_mutex);
449  mutex_init(&dev_priv->release_mutex);
450  rwlock_init(&dev_priv->resource_lock);
451  idr_init(&dev_priv->context_idr);
452  idr_init(&dev_priv->surface_idr);
453  idr_init(&dev_priv->stream_idr);
454  mutex_init(&dev_priv->init_mutex);
455  init_waitqueue_head(&dev_priv->fence_queue);
456  init_waitqueue_head(&dev_priv->fifo_queue);
457  dev_priv->fence_queue_waiters = 0;
458  atomic_set(&dev_priv->fifo_queue_waiters, 0);
459  INIT_LIST_HEAD(&dev_priv->surface_lru);
460  dev_priv->used_memory_size = 0;
461 
462  dev_priv->io_start = pci_resource_start(dev->pdev, 0);
463  dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
464  dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
465 
466  dev_priv->enable_fb = enable_fbdev;
467 
468  mutex_lock(&dev_priv->hw_mutex);
469 
470  vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
471  svga_id = vmw_read(dev_priv, SVGA_REG_ID);
472  if (svga_id != SVGA_ID_2) {
473  ret = -ENOSYS;
474  DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
475  mutex_unlock(&dev_priv->hw_mutex);
476  goto out_err0;
477  }
478 
479  dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
480 
481  dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
482  dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
483  dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
484  dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
485 
486  vmw_get_initial_size(dev_priv);
487 
488  if (dev_priv->capabilities & SVGA_CAP_GMR) {
489  dev_priv->max_gmr_descriptors =
490  vmw_read(dev_priv,
492  dev_priv->max_gmr_ids =
493  vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
494  }
495  if (dev_priv->capabilities & SVGA_CAP_GMR2) {
496  dev_priv->max_gmr_pages =
497  vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
498  dev_priv->memory_size =
499  vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
500  dev_priv->memory_size -= dev_priv->vram_size;
501  } else {
502  /*
503  * An arbitrary limit of 512MiB on surface
504  * memory. But all HWV8 hardware supports GMR2.
505  */
506  dev_priv->memory_size = 512*1024*1024;
507  }
508 
509  mutex_unlock(&dev_priv->hw_mutex);
510 
511  vmw_print_capabilities(dev_priv->capabilities);
512 
513  if (dev_priv->capabilities & SVGA_CAP_GMR) {
514  DRM_INFO("Max GMR ids is %u\n",
515  (unsigned)dev_priv->max_gmr_ids);
516  DRM_INFO("Max GMR descriptors is %u\n",
517  (unsigned)dev_priv->max_gmr_descriptors);
518  }
519  if (dev_priv->capabilities & SVGA_CAP_GMR2) {
520  DRM_INFO("Max number of GMR pages is %u\n",
521  (unsigned)dev_priv->max_gmr_pages);
522  DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
523  (unsigned)dev_priv->memory_size / 1024);
524  }
525  DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
526  dev_priv->vram_start, dev_priv->vram_size / 1024);
527  DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
528  dev_priv->mmio_start, dev_priv->mmio_size / 1024);
529 
530  ret = vmw_ttm_global_init(dev_priv);
531  if (unlikely(ret != 0))
532  goto out_err0;
533 
534 
535  vmw_master_init(&dev_priv->fbdev_master);
536  ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
537  dev_priv->active_master = &dev_priv->fbdev_master;
538 
539 
540  ret = ttm_bo_device_init(&dev_priv->bdev,
541  dev_priv->bo_global_ref.ref.object,
543  false);
544  if (unlikely(ret != 0)) {
545  DRM_ERROR("Failed initializing TTM buffer object driver.\n");
546  goto out_err1;
547  }
548 
549  ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
550  (dev_priv->vram_size >> PAGE_SHIFT));
551  if (unlikely(ret != 0)) {
552  DRM_ERROR("Failed initializing memory manager for VRAM.\n");
553  goto out_err2;
554  }
555 
556  dev_priv->has_gmr = true;
557  if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
558  dev_priv->max_gmr_ids) != 0) {
559  DRM_INFO("No GMR memory available. "
560  "Graphics memory resources are very limited.\n");
561  dev_priv->has_gmr = false;
562  }
563 
564  dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
565  dev_priv->mmio_size, DRM_MTRR_WC);
566 
567  dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
568  dev_priv->mmio_size);
569 
570  if (unlikely(dev_priv->mmio_virt == NULL)) {
571  ret = -ENOMEM;
572  DRM_ERROR("Failed mapping MMIO.\n");
573  goto out_err3;
574  }
575 
576  /* Need mmio memory to check for fifo pitchlock cap. */
577  if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
578  !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
579  !vmw_fifo_have_pitchlock(dev_priv)) {
580  ret = -ENOSYS;
581  DRM_ERROR("Hardware has no pitchlock\n");
582  goto out_err4;
583  }
584 
585  dev_priv->tdev = ttm_object_device_init
586  (dev_priv->mem_global_ref.object, 12);
587 
588  if (unlikely(dev_priv->tdev == NULL)) {
589  DRM_ERROR("Unable to initialize TTM object management.\n");
590  ret = -ENOMEM;
591  goto out_err4;
592  }
593 
594  dev->dev_private = dev_priv;
595 
596  ret = pci_request_regions(dev->pdev, "vmwgfx probe");
597  dev_priv->stealth = (ret != 0);
598  if (dev_priv->stealth) {
603  DRM_INFO("It appears like vesafb is loaded. "
604  "Ignore above error if any.\n");
605  ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
606  if (unlikely(ret != 0)) {
607  DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
608  goto out_no_device;
609  }
610  }
611 
612  dev_priv->fman = vmw_fence_manager_init(dev_priv);
613  if (unlikely(dev_priv->fman == NULL))
614  goto out_no_fman;
615 
616  /* Need to start the fifo to check if we can do screen objects */
617  ret = vmw_3d_resource_inc(dev_priv, true);
618  if (unlikely(ret != 0))
619  goto out_no_fifo;
620  vmw_kms_save_vga(dev_priv);
621 
622  /* Start kms and overlay systems, needs fifo. */
623  ret = vmw_kms_init(dev_priv);
624  if (unlikely(ret != 0))
625  goto out_no_kms;
626  vmw_overlay_init(dev_priv);
627 
628  /* 3D Depends on Screen Objects being used. */
629  DRM_INFO("Detected %sdevice 3D availability.\n",
630  vmw_fifo_have_3d(dev_priv) ?
631  "" : "no ");
632 
633  /* We might be done with the fifo now */
634  if (dev_priv->enable_fb) {
635  vmw_fb_init(dev_priv);
636  } else {
637  vmw_kms_restore_vga(dev_priv);
638  vmw_3d_resource_dec(dev_priv, true);
639  }
640 
641  if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
642  ret = drm_irq_install(dev);
643  if (unlikely(ret != 0)) {
644  DRM_ERROR("Failed installing irq: %d\n", ret);
645  goto out_no_irq;
646  }
647  }
648 
649  dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
650  register_pm_notifier(&dev_priv->pm_nb);
651 
652  return 0;
653 
654 out_no_irq:
655  if (dev_priv->enable_fb)
656  vmw_fb_close(dev_priv);
657  vmw_overlay_close(dev_priv);
658  vmw_kms_close(dev_priv);
659 out_no_kms:
660  /* We still have a 3D resource reference held */
661  if (dev_priv->enable_fb) {
662  vmw_kms_restore_vga(dev_priv);
663  vmw_3d_resource_dec(dev_priv, false);
664  }
665 out_no_fifo:
666  vmw_fence_manager_takedown(dev_priv->fman);
667 out_no_fman:
668  if (dev_priv->stealth)
669  pci_release_region(dev->pdev, 2);
670  else
671  pci_release_regions(dev->pdev);
672 out_no_device:
673  ttm_object_device_release(&dev_priv->tdev);
674 out_err4:
675  iounmap(dev_priv->mmio_virt);
676 out_err3:
677  drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
678  dev_priv->mmio_size, DRM_MTRR_WC);
679  if (dev_priv->has_gmr)
680  (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
681  (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
682 out_err2:
683  (void)ttm_bo_device_release(&dev_priv->bdev);
684 out_err1:
685  vmw_ttm_global_release(dev_priv);
686 out_err0:
687  idr_destroy(&dev_priv->surface_idr);
688  idr_destroy(&dev_priv->context_idr);
689  idr_destroy(&dev_priv->stream_idr);
690  kfree(dev_priv);
691  return ret;
692 }
693 
694 static int vmw_driver_unload(struct drm_device *dev)
695 {
696  struct vmw_private *dev_priv = vmw_priv(dev);
697 
698  unregister_pm_notifier(&dev_priv->pm_nb);
699 
700  if (dev_priv->ctx.cmd_bounce)
701  vfree(dev_priv->ctx.cmd_bounce);
702  if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
703  drm_irq_uninstall(dev_priv->dev);
704  if (dev_priv->enable_fb) {
705  vmw_fb_close(dev_priv);
706  vmw_kms_restore_vga(dev_priv);
707  vmw_3d_resource_dec(dev_priv, false);
708  }
709  vmw_kms_close(dev_priv);
710  vmw_overlay_close(dev_priv);
711  vmw_fence_manager_takedown(dev_priv->fman);
712  if (dev_priv->stealth)
713  pci_release_region(dev->pdev, 2);
714  else
715  pci_release_regions(dev->pdev);
716 
717  ttm_object_device_release(&dev_priv->tdev);
718  iounmap(dev_priv->mmio_virt);
719  drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
720  dev_priv->mmio_size, DRM_MTRR_WC);
721  if (dev_priv->has_gmr)
722  (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
723  (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
724  (void)ttm_bo_device_release(&dev_priv->bdev);
725  vmw_ttm_global_release(dev_priv);
726  idr_destroy(&dev_priv->surface_idr);
727  idr_destroy(&dev_priv->context_idr);
728  idr_destroy(&dev_priv->stream_idr);
729 
730  kfree(dev_priv);
731 
732  return 0;
733 }
734 
735 static void vmw_preclose(struct drm_device *dev,
736  struct drm_file *file_priv)
737 {
738  struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
739  struct vmw_private *dev_priv = vmw_priv(dev);
740 
741  vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
742 }
743 
744 static void vmw_postclose(struct drm_device *dev,
745  struct drm_file *file_priv)
746 {
747  struct vmw_fpriv *vmw_fp;
748 
749  vmw_fp = vmw_fpriv(file_priv);
750  ttm_object_file_release(&vmw_fp->tfile);
751  if (vmw_fp->locked_master)
752  drm_master_put(&vmw_fp->locked_master);
753  kfree(vmw_fp);
754 }
755 
756 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
757 {
758  struct vmw_private *dev_priv = vmw_priv(dev);
759  struct vmw_fpriv *vmw_fp;
760  int ret = -ENOMEM;
761 
762  vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
763  if (unlikely(vmw_fp == NULL))
764  return ret;
765 
766  INIT_LIST_HEAD(&vmw_fp->fence_events);
767  vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
768  if (unlikely(vmw_fp->tfile == NULL))
769  goto out_no_tfile;
770 
771  file_priv->driver_priv = vmw_fp;
772  dev_priv->bdev.dev_mapping = dev->dev_mapping;
773 
774  return 0;
775 
776 out_no_tfile:
777  kfree(vmw_fp);
778  return ret;
779 }
780 
781 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
782  unsigned long arg)
783 {
784  struct drm_file *file_priv = filp->private_data;
785  struct drm_device *dev = file_priv->minor->dev;
786  unsigned int nr = DRM_IOCTL_NR(cmd);
787 
788  /*
789  * Do extra checking on driver private ioctls.
790  */
791 
792  if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
793  && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
794  struct drm_ioctl_desc *ioctl =
795  &vmw_ioctls[nr - DRM_COMMAND_BASE];
796 
797  if (unlikely(ioctl->cmd_drv != cmd)) {
798  DRM_ERROR("Invalid command format, ioctl %d\n",
799  nr - DRM_COMMAND_BASE);
800  return -EINVAL;
801  }
802  }
803 
804  return drm_ioctl(filp, cmd, arg);
805 }
806 
807 static int vmw_firstopen(struct drm_device *dev)
808 {
809  struct vmw_private *dev_priv = vmw_priv(dev);
810  dev_priv->is_opened = true;
811 
812  return 0;
813 }
814 
815 static void vmw_lastclose(struct drm_device *dev)
816 {
817  struct vmw_private *dev_priv = vmw_priv(dev);
818  struct drm_crtc *crtc;
819  struct drm_mode_set set;
820  int ret;
821 
826  if (!dev_priv->is_opened)
827  return;
828 
829  dev_priv->is_opened = false;
830  set.x = 0;
831  set.y = 0;
832  set.fb = NULL;
833  set.mode = NULL;
834  set.connectors = NULL;
835  set.num_connectors = 0;
836 
837  list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
838  set.crtc = crtc;
839  ret = crtc->funcs->set_config(&set);
840  WARN_ON(ret != 0);
841  }
842 
843 }
844 
845 static void vmw_master_init(struct vmw_master *vmaster)
846 {
847  ttm_lock_init(&vmaster->lock);
848  INIT_LIST_HEAD(&vmaster->fb_surf);
849  mutex_init(&vmaster->fb_surf_mutex);
850 }
851 
852 static int vmw_master_create(struct drm_device *dev,
853  struct drm_master *master)
854 {
855  struct vmw_master *vmaster;
856 
857  vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
858  if (unlikely(vmaster == NULL))
859  return -ENOMEM;
860 
861  vmw_master_init(vmaster);
862  ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
863  master->driver_priv = vmaster;
864 
865  return 0;
866 }
867 
868 static void vmw_master_destroy(struct drm_device *dev,
869  struct drm_master *master)
870 {
871  struct vmw_master *vmaster = vmw_master(master);
872 
873  master->driver_priv = NULL;
874  kfree(vmaster);
875 }
876 
877 
878 static int vmw_master_set(struct drm_device *dev,
879  struct drm_file *file_priv,
880  bool from_open)
881 {
882  struct vmw_private *dev_priv = vmw_priv(dev);
883  struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
884  struct vmw_master *active = dev_priv->active_master;
885  struct vmw_master *vmaster = vmw_master(file_priv->master);
886  int ret = 0;
887 
888  if (!dev_priv->enable_fb) {
889  ret = vmw_3d_resource_inc(dev_priv, true);
890  if (unlikely(ret != 0))
891  return ret;
892  vmw_kms_save_vga(dev_priv);
893  mutex_lock(&dev_priv->hw_mutex);
894  vmw_write(dev_priv, SVGA_REG_TRACES, 0);
895  mutex_unlock(&dev_priv->hw_mutex);
896  }
897 
898  if (active) {
899  BUG_ON(active != &dev_priv->fbdev_master);
900  ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
901  if (unlikely(ret != 0))
902  goto out_no_active_lock;
903 
904  ttm_lock_set_kill(&active->lock, true, SIGTERM);
905  ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
906  if (unlikely(ret != 0)) {
907  DRM_ERROR("Unable to clean VRAM on "
908  "master drop.\n");
909  }
910 
911  dev_priv->active_master = NULL;
912  }
913 
914  ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
915  if (!from_open) {
916  ttm_vt_unlock(&vmaster->lock);
917  BUG_ON(vmw_fp->locked_master != file_priv->master);
918  drm_master_put(&vmw_fp->locked_master);
919  }
920 
921  dev_priv->active_master = vmaster;
922 
923  return 0;
924 
925 out_no_active_lock:
926  if (!dev_priv->enable_fb) {
927  mutex_lock(&dev_priv->hw_mutex);
928  vmw_write(dev_priv, SVGA_REG_TRACES, 1);
929  mutex_unlock(&dev_priv->hw_mutex);
930  vmw_kms_restore_vga(dev_priv);
931  vmw_3d_resource_dec(dev_priv, true);
932  }
933  return ret;
934 }
935 
936 static void vmw_master_drop(struct drm_device *dev,
937  struct drm_file *file_priv,
938  bool from_release)
939 {
940  struct vmw_private *dev_priv = vmw_priv(dev);
941  struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
942  struct vmw_master *vmaster = vmw_master(file_priv->master);
943  int ret;
944 
950  vmw_fp->locked_master = drm_master_get(file_priv->master);
951  ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
952  vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
953 
954  if (unlikely((ret != 0))) {
955  DRM_ERROR("Unable to lock TTM at VT switch.\n");
956  drm_master_put(&vmw_fp->locked_master);
957  }
958 
959  ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
960 
961  if (!dev_priv->enable_fb) {
962  ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
963  if (unlikely(ret != 0))
964  DRM_ERROR("Unable to clean VRAM on master drop.\n");
965  mutex_lock(&dev_priv->hw_mutex);
966  vmw_write(dev_priv, SVGA_REG_TRACES, 1);
967  mutex_unlock(&dev_priv->hw_mutex);
968  vmw_kms_restore_vga(dev_priv);
969  vmw_3d_resource_dec(dev_priv, true);
970  }
971 
972  dev_priv->active_master = &dev_priv->fbdev_master;
973  ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
974  ttm_vt_unlock(&dev_priv->fbdev_master.lock);
975 
976  if (dev_priv->enable_fb)
977  vmw_fb_on(dev_priv);
978 }
979 
980 
981 static void vmw_remove(struct pci_dev *pdev)
982 {
983  struct drm_device *dev = pci_get_drvdata(pdev);
984 
985  drm_put_dev(dev);
986 }
987 
988 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
989  void *ptr)
990 {
991  struct vmw_private *dev_priv =
992  container_of(nb, struct vmw_private, pm_nb);
993  struct vmw_master *vmaster = dev_priv->active_master;
994 
995  switch (val) {
997  case PM_SUSPEND_PREPARE:
998  ttm_suspend_lock(&vmaster->lock);
999 
1004  vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
1005  ttm_bo_swapout_all(&dev_priv->bdev);
1006 
1007  break;
1008  case PM_POST_HIBERNATION:
1009  case PM_POST_SUSPEND:
1010  case PM_POST_RESTORE:
1011  ttm_suspend_unlock(&vmaster->lock);
1012 
1013  break;
1014  case PM_RESTORE_PREPARE:
1015  break;
1016  default:
1017  break;
1018  }
1019  return 0;
1020 }
1021 
1026 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1027 {
1028  struct drm_device *dev = pci_get_drvdata(pdev);
1029  struct vmw_private *dev_priv = vmw_priv(dev);
1030 
1031  if (dev_priv->num_3d_resources != 0) {
1032  DRM_INFO("Can't suspend or hibernate "
1033  "while 3D resources are active.\n");
1034  return -EBUSY;
1035  }
1036 
1037  pci_save_state(pdev);
1038  pci_disable_device(pdev);
1040  return 0;
1041 }
1042 
1043 static int vmw_pci_resume(struct pci_dev *pdev)
1044 {
1045  pci_set_power_state(pdev, PCI_D0);
1046  pci_restore_state(pdev);
1047  return pci_enable_device(pdev);
1048 }
1049 
1050 static int vmw_pm_suspend(struct device *kdev)
1051 {
1052  struct pci_dev *pdev = to_pci_dev(kdev);
1053  struct pm_message dummy;
1054 
1055  dummy.event = 0;
1056 
1057  return vmw_pci_suspend(pdev, dummy);
1058 }
1059 
1060 static int vmw_pm_resume(struct device *kdev)
1061 {
1062  struct pci_dev *pdev = to_pci_dev(kdev);
1063 
1064  return vmw_pci_resume(pdev);
1065 }
1066 
1067 static int vmw_pm_prepare(struct device *kdev)
1068 {
1069  struct pci_dev *pdev = to_pci_dev(kdev);
1070  struct drm_device *dev = pci_get_drvdata(pdev);
1071  struct vmw_private *dev_priv = vmw_priv(dev);
1072 
1077  dev_priv->suspended = true;
1078  if (dev_priv->enable_fb)
1079  vmw_3d_resource_dec(dev_priv, true);
1080 
1081  if (dev_priv->num_3d_resources != 0) {
1082 
1083  DRM_INFO("Can't suspend or hibernate "
1084  "while 3D resources are active.\n");
1085 
1086  if (dev_priv->enable_fb)
1087  vmw_3d_resource_inc(dev_priv, true);
1088  dev_priv->suspended = false;
1089  return -EBUSY;
1090  }
1091 
1092  return 0;
1093 }
1094 
1095 static void vmw_pm_complete(struct device *kdev)
1096 {
1097  struct pci_dev *pdev = to_pci_dev(kdev);
1098  struct drm_device *dev = pci_get_drvdata(pdev);
1099  struct vmw_private *dev_priv = vmw_priv(dev);
1100 
1101  mutex_lock(&dev_priv->hw_mutex);
1102  vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1103  (void) vmw_read(dev_priv, SVGA_REG_ID);
1104  mutex_unlock(&dev_priv->hw_mutex);
1105 
1110  if (dev_priv->enable_fb)
1111  vmw_3d_resource_inc(dev_priv, false);
1112 
1113  dev_priv->suspended = false;
1114 }
1115 
1116 static const struct dev_pm_ops vmw_pm_ops = {
1117  .prepare = vmw_pm_prepare,
1118  .complete = vmw_pm_complete,
1119  .suspend = vmw_pm_suspend,
1120  .resume = vmw_pm_resume,
1121 };
1122 
1123 static const struct file_operations vmwgfx_driver_fops = {
1124  .owner = THIS_MODULE,
1125  .open = drm_open,
1126  .release = drm_release,
1127  .unlocked_ioctl = vmw_unlocked_ioctl,
1128  .mmap = vmw_mmap,
1129  .poll = vmw_fops_poll,
1130  .read = vmw_fops_read,
1131  .fasync = drm_fasync,
1132 #if defined(CONFIG_COMPAT)
1133  .compat_ioctl = drm_compat_ioctl,
1134 #endif
1135  .llseek = noop_llseek,
1136 };
1137 
1138 static struct drm_driver driver = {
1139  .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1140  DRIVER_MODESET,
1141  .load = vmw_driver_load,
1142  .unload = vmw_driver_unload,
1143  .firstopen = vmw_firstopen,
1144  .lastclose = vmw_lastclose,
1145  .irq_preinstall = vmw_irq_preinstall,
1146  .irq_postinstall = vmw_irq_postinstall,
1147  .irq_uninstall = vmw_irq_uninstall,
1148  .irq_handler = vmw_irq_handler,
1149  .get_vblank_counter = vmw_get_vblank_counter,
1150  .enable_vblank = vmw_enable_vblank,
1151  .disable_vblank = vmw_disable_vblank,
1152  .ioctls = vmw_ioctls,
1153  .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
1154  .dma_quiescent = NULL, /*vmw_dma_quiescent, */
1155  .master_create = vmw_master_create,
1156  .master_destroy = vmw_master_destroy,
1157  .master_set = vmw_master_set,
1158  .master_drop = vmw_master_drop,
1159  .open = vmw_driver_open,
1160  .preclose = vmw_preclose,
1161  .postclose = vmw_postclose,
1162 
1163  .dumb_create = vmw_dumb_create,
1164  .dumb_map_offset = vmw_dumb_map_offset,
1165  .dumb_destroy = vmw_dumb_destroy,
1166 
1167  .fops = &vmwgfx_driver_fops,
1168  .name = VMWGFX_DRIVER_NAME,
1169  .desc = VMWGFX_DRIVER_DESC,
1170  .date = VMWGFX_DRIVER_DATE,
1171  .major = VMWGFX_DRIVER_MAJOR,
1172  .minor = VMWGFX_DRIVER_MINOR,
1173  .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1174 };
1175 
1176 static struct pci_driver vmw_pci_driver = {
1177  .name = VMWGFX_DRIVER_NAME,
1178  .id_table = vmw_pci_id_list,
1179  .probe = vmw_probe,
1180  .remove = vmw_remove,
1181  .driver = {
1182  .pm = &vmw_pm_ops
1183  }
1184 };
1185 
1186 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1187 {
1188  return drm_get_pci_dev(pdev, ent, &driver);
1189 }
1190 
1191 static int __init vmwgfx_init(void)
1192 {
1193  int ret;
1194  ret = drm_pci_init(&driver, &vmw_pci_driver);
1195  if (ret)
1196  DRM_ERROR("Failed initializing DRM.\n");
1197  return ret;
1198 }
1199 
1200 static void __exit vmwgfx_exit(void)
1201 {
1202  drm_pci_exit(&driver, &vmw_pci_driver);
1203 }
1204 
1205 module_init(vmwgfx_init);
1206 module_exit(vmwgfx_exit);
1207 
1208 MODULE_AUTHOR("VMware Inc. and others");
1209 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1210 MODULE_LICENSE("GPL and additional rights");
1214  "0");