Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
radeon_kms.c
Go to the documentation of this file.
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  * Alex Deucher
26  * Jerome Glisse
27  */
28 #include <drm/drmP.h>
29 #include "radeon.h"
30 #include <drm/radeon_drm.h>
31 #include "radeon_asic.h"
32 
33 #include <linux/vga_switcheroo.h>
34 #include <linux/slab.h>
35 
48 {
49  struct radeon_device *rdev = dev->dev_private;
50 
51  if (rdev == NULL)
52  return 0;
53  radeon_acpi_fini(rdev);
54  radeon_modeset_fini(rdev);
55  radeon_device_fini(rdev);
56  kfree(rdev);
57  dev->dev_private = NULL;
58  return 0;
59 }
60 
74 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
75 {
76  struct radeon_device *rdev;
77  int r, acpi_status;
78 
79  rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
80  if (rdev == NULL) {
81  return -ENOMEM;
82  }
83  dev->dev_private = (void *)rdev;
84 
85  /* update BUS flag */
86  if (drm_pci_device_is_agp(dev)) {
87  flags |= RADEON_IS_AGP;
88  } else if (pci_is_pcie(dev->pdev)) {
89  flags |= RADEON_IS_PCIE;
90  } else {
91  flags |= RADEON_IS_PCI;
92  }
93 
94  /* radeon_device_init should report only fatal error
95  * like memory allocation failure or iomapping failure,
96  * or memory manager initialization failure, it must
97  * properly initialize the GPU MC controller and permit
98  * VRAM allocation
99  */
100  r = radeon_device_init(rdev, dev, dev->pdev, flags);
101  if (r) {
102  dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
103  goto out;
104  }
105 
106  /* Again modeset_init should fail only on fatal error
107  * otherwise it should provide enough functionalities
108  * for shadowfb to run
109  */
110  r = radeon_modeset_init(rdev);
111  if (r)
112  dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
113 
114  /* Call ACPI methods: require modeset init
115  * but failure is not fatal
116  */
117  if (!r) {
118  acpi_status = radeon_acpi_init(rdev);
119  if (acpi_status)
120  dev_dbg(&dev->pdev->dev,
121  "Error during ACPI methods call\n");
122  }
123 
124 out:
125  if (r)
127  return r;
128 }
129 
140 static void radeon_set_filp_rights(struct drm_device *dev,
141  struct drm_file **owner,
142  struct drm_file *applier,
143  uint32_t *value)
144 {
145  mutex_lock(&dev->struct_mutex);
146  if (*value == 1) {
147  /* wants rights */
148  if (!*owner)
149  *owner = applier;
150  } else if (*value == 0) {
151  /* revokes rights */
152  if (*owner == applier)
153  *owner = NULL;
154  }
155  *value = *owner == applier ? 1 : 0;
156  mutex_unlock(&dev->struct_mutex);
157 }
158 
159 /*
160  * Userspace get information ioctl
161  */
174 int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
175 {
176  struct radeon_device *rdev = dev->dev_private;
177  struct drm_radeon_info *info = data;
178  struct radeon_mode_info *minfo = &rdev->mode_info;
179  uint32_t value, *value_ptr;
180  uint64_t value64, *value_ptr64;
181  struct drm_crtc *crtc;
182  int i, found;
183 
184  /* TIMESTAMP is a 64-bit value, needs special handling. */
185  if (info->request == RADEON_INFO_TIMESTAMP) {
186  if (rdev->family >= CHIP_R600) {
187  value_ptr64 = (uint64_t*)((unsigned long)info->value);
188  if (rdev->family >= CHIP_TAHITI) {
189  value64 = si_get_gpu_clock(rdev);
190  } else {
191  value64 = r600_get_gpu_clock(rdev);
192  }
193 
194  if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) {
195  DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
196  return -EFAULT;
197  }
198  return 0;
199  } else {
200  DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
201  return -EINVAL;
202  }
203  }
204 
205  value_ptr = (uint32_t *)((unsigned long)info->value);
206  if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) {
207  DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
208  return -EFAULT;
209  }
210 
211  switch (info->request) {
213  value = dev->pci_device;
214  break;
216  value = rdev->num_gb_pipes;
217  break;
219  value = rdev->num_z_pipes;
220  break;
222  /* xf86-video-ati 6.13.0 relies on this being false for evergreen */
223  if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
224  value = false;
225  else
226  value = rdev->accel_working;
227  break;
229  for (i = 0, found = 0; i < rdev->num_crtc; i++) {
230  crtc = (struct drm_crtc *)minfo->crtcs[i];
231  if (crtc && crtc->base.id == value) {
232  struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
233  value = radeon_crtc->crtc_id;
234  found = 1;
235  break;
236  }
237  }
238  if (!found) {
239  DRM_DEBUG_KMS("unknown crtc id %d\n", value);
240  return -EINVAL;
241  }
242  break;
244  value = rdev->accel_working;
245  break;
247  if (rdev->family >= CHIP_TAHITI)
248  value = rdev->config.si.tile_config;
249  else if (rdev->family >= CHIP_CAYMAN)
250  value = rdev->config.cayman.tile_config;
251  else if (rdev->family >= CHIP_CEDAR)
252  value = rdev->config.evergreen.tile_config;
253  else if (rdev->family >= CHIP_RV770)
254  value = rdev->config.rv770.tile_config;
255  else if (rdev->family >= CHIP_R600)
256  value = rdev->config.r600.tile_config;
257  else {
258  DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
259  return -EINVAL;
260  }
261  break;
263  /* The "value" here is both an input and output parameter.
264  * If the input value is 1, filp requests hyper-z access.
265  * If the input value is 0, filp revokes its hyper-z access.
266  *
267  * When returning, the value is 1 if filp owns hyper-z access,
268  * 0 otherwise. */
269  if (value >= 2) {
270  DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value);
271  return -EINVAL;
272  }
273  radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value);
274  break;
276  /* The same logic as Hyper-Z. */
277  if (value >= 2) {
278  DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value);
279  return -EINVAL;
280  }
281  radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value);
282  break;
284  /* return clock value in KHz */
285  value = rdev->clock.spll.reference_freq * 10;
286  break;
288  if (rdev->family >= CHIP_TAHITI)
289  value = rdev->config.si.max_backends_per_se *
290  rdev->config.si.max_shader_engines;
291  else if (rdev->family >= CHIP_CAYMAN)
292  value = rdev->config.cayman.max_backends_per_se *
293  rdev->config.cayman.max_shader_engines;
294  else if (rdev->family >= CHIP_CEDAR)
295  value = rdev->config.evergreen.max_backends;
296  else if (rdev->family >= CHIP_RV770)
297  value = rdev->config.rv770.max_backends;
298  else if (rdev->family >= CHIP_R600)
299  value = rdev->config.r600.max_backends;
300  else {
301  return -EINVAL;
302  }
303  break;
305  if (rdev->family >= CHIP_TAHITI)
306  value = rdev->config.si.max_tile_pipes;
307  else if (rdev->family >= CHIP_CAYMAN)
308  value = rdev->config.cayman.max_tile_pipes;
309  else if (rdev->family >= CHIP_CEDAR)
310  value = rdev->config.evergreen.max_tile_pipes;
311  else if (rdev->family >= CHIP_RV770)
312  value = rdev->config.rv770.max_tile_pipes;
313  else if (rdev->family >= CHIP_R600)
314  value = rdev->config.r600.max_tile_pipes;
315  else {
316  return -EINVAL;
317  }
318  break;
320  value = 1;
321  break;
323  if (rdev->family >= CHIP_TAHITI)
324  value = rdev->config.si.backend_map;
325  else if (rdev->family >= CHIP_CAYMAN)
326  value = rdev->config.cayman.backend_map;
327  else if (rdev->family >= CHIP_CEDAR)
328  value = rdev->config.evergreen.backend_map;
329  else if (rdev->family >= CHIP_RV770)
330  value = rdev->config.rv770.backend_map;
331  else if (rdev->family >= CHIP_R600)
332  value = rdev->config.r600.backend_map;
333  else {
334  return -EINVAL;
335  }
336  break;
338  /* this is where we report if vm is supported or not */
339  if (rdev->family < CHIP_CAYMAN)
340  return -EINVAL;
341  value = RADEON_VA_RESERVED_SIZE;
342  break;
344  /* this is where we report if vm is supported or not */
345  if (rdev->family < CHIP_CAYMAN)
346  return -EINVAL;
347  value = RADEON_IB_VM_MAX_SIZE;
348  break;
350  if (rdev->family >= CHIP_TAHITI)
351  value = rdev->config.si.max_cu_per_sh;
352  else if (rdev->family >= CHIP_CAYMAN)
353  value = rdev->config.cayman.max_pipes_per_simd;
354  else if (rdev->family >= CHIP_CEDAR)
355  value = rdev->config.evergreen.max_pipes;
356  else if (rdev->family >= CHIP_RV770)
357  value = rdev->config.rv770.max_pipes;
358  else if (rdev->family >= CHIP_R600)
359  value = rdev->config.r600.max_pipes;
360  else {
361  return -EINVAL;
362  }
363  break;
364  default:
365  DRM_DEBUG_KMS("Invalid request %d\n", info->request);
366  return -EINVAL;
367  }
368  if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
369  DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
370  return -EFAULT;
371  }
372  return 0;
373 }
374 
375 
376 /*
377  * Outdated mess for old drm with Xorg being in charge (void function now).
378  */
388 {
389  return 0;
390 }
391 
400 {
402 }
403 
413 int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
414 {
415  struct radeon_device *rdev = dev->dev_private;
416 
417  file_priv->driver_priv = NULL;
418 
419  /* new gpu have virtual address space support */
420  if (rdev->family >= CHIP_CAYMAN) {
421  struct radeon_fpriv *fpriv;
422  struct radeon_bo_va *bo_va;
423  int r;
424 
425  fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
426  if (unlikely(!fpriv)) {
427  return -ENOMEM;
428  }
429 
430  radeon_vm_init(rdev, &fpriv->vm);
431 
432  /* map the ib pool buffer read only into
433  * virtual address space */
434  bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
435  rdev->ring_tmp_bo.bo);
439  if (r) {
440  radeon_vm_fini(rdev, &fpriv->vm);
441  kfree(fpriv);
442  return r;
443  }
444 
445  file_priv->driver_priv = fpriv;
446  }
447  return 0;
448 }
449 
459  struct drm_file *file_priv)
460 {
461  struct radeon_device *rdev = dev->dev_private;
462 
463  /* new gpu have virtual address space support */
464  if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
465  struct radeon_fpriv *fpriv = file_priv->driver_priv;
466  struct radeon_bo_va *bo_va;
467  int r;
468 
469  r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
470  if (!r) {
471  bo_va = radeon_vm_bo_find(&fpriv->vm,
472  rdev->ring_tmp_bo.bo);
473  if (bo_va)
474  radeon_vm_bo_rmv(rdev, bo_va);
475  radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
476  }
477 
478  radeon_vm_fini(rdev, &fpriv->vm);
479  kfree(fpriv);
480  file_priv->driver_priv = NULL;
481  }
482 }
483 
494  struct drm_file *file_priv)
495 {
496  struct radeon_device *rdev = dev->dev_private;
497  if (rdev->hyperz_filp == file_priv)
498  rdev->hyperz_filp = NULL;
499  if (rdev->cmask_filp == file_priv)
500  rdev->cmask_filp = NULL;
501 }
502 
503 /*
504  * VBlank related functions.
505  */
516 {
517  struct radeon_device *rdev = dev->dev_private;
518 
519  if (crtc < 0 || crtc >= rdev->num_crtc) {
520  DRM_ERROR("Invalid crtc %d\n", crtc);
521  return -EINVAL;
522  }
523 
524  return radeon_get_vblank_counter(rdev, crtc);
525 }
526 
537 {
538  struct radeon_device *rdev = dev->dev_private;
539  unsigned long irqflags;
540  int r;
541 
542  if (crtc < 0 || crtc >= rdev->num_crtc) {
543  DRM_ERROR("Invalid crtc %d\n", crtc);
544  return -EINVAL;
545  }
546 
547  spin_lock_irqsave(&rdev->irq.lock, irqflags);
548  rdev->irq.crtc_vblank_int[crtc] = true;
549  r = radeon_irq_set(rdev);
550  spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
551  return r;
552 }
553 
563 {
564  struct radeon_device *rdev = dev->dev_private;
565  unsigned long irqflags;
566 
567  if (crtc < 0 || crtc >= rdev->num_crtc) {
568  DRM_ERROR("Invalid crtc %d\n", crtc);
569  return;
570  }
571 
572  spin_lock_irqsave(&rdev->irq.lock, irqflags);
573  rdev->irq.crtc_vblank_int[crtc] = false;
574  radeon_irq_set(rdev);
575  spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
576 }
577 
592  int *max_error,
593  struct timeval *vblank_time,
594  unsigned flags)
595 {
596  struct drm_crtc *drmcrtc;
597  struct radeon_device *rdev = dev->dev_private;
598 
599  if (crtc < 0 || crtc >= dev->num_crtcs) {
600  DRM_ERROR("Invalid crtc %d\n", crtc);
601  return -EINVAL;
602  }
603 
604  /* Get associated drm_crtc: */
605  drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
606 
607  /* Helper routine in DRM core does all the work: */
608  return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
609  vblank_time, flags,
610  drmcrtc);
611 }
612 
613 /*
614  * IOCTL.
615  */
616 int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
617  struct drm_file *file_priv)
618 {
619  /* Not valid in KMS. */
620  return -EINVAL;
621 }
622 
623 #define KMS_INVALID_IOCTL(name) \
624 int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
625 { \
626  DRM_ERROR("invalid ioctl with kms %s\n", __func__); \
627  return -EINVAL; \
628 }
629 
630 /*
631  * All these ioctls are invalid in kms world.
632  */
633 KMS_INVALID_IOCTL(radeon_cp_init_kms)
634 KMS_INVALID_IOCTL(radeon_cp_start_kms)
635 KMS_INVALID_IOCTL(radeon_cp_stop_kms)
636 KMS_INVALID_IOCTL(radeon_cp_reset_kms)
637 KMS_INVALID_IOCTL(radeon_cp_idle_kms)
638 KMS_INVALID_IOCTL(radeon_cp_resume_kms)
639 KMS_INVALID_IOCTL(radeon_engine_reset_kms)
640 KMS_INVALID_IOCTL(radeon_fullscreen_kms)
641 KMS_INVALID_IOCTL(radeon_cp_swap_kms)
642 KMS_INVALID_IOCTL(radeon_cp_clear_kms)
643 KMS_INVALID_IOCTL(radeon_cp_vertex_kms)
644 KMS_INVALID_IOCTL(radeon_cp_indices_kms)
645 KMS_INVALID_IOCTL(radeon_cp_texture_kms)
646 KMS_INVALID_IOCTL(radeon_cp_stipple_kms)
647 KMS_INVALID_IOCTL(radeon_cp_indirect_kms)
648 KMS_INVALID_IOCTL(radeon_cp_vertex2_kms)
649 KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms)
650 KMS_INVALID_IOCTL(radeon_cp_getparam_kms)
651 KMS_INVALID_IOCTL(radeon_cp_flip_kms)
652 KMS_INVALID_IOCTL(radeon_mem_alloc_kms)
653 KMS_INVALID_IOCTL(radeon_mem_free_kms)
654 KMS_INVALID_IOCTL(radeon_mem_init_heap_kms)
655 KMS_INVALID_IOCTL(radeon_irq_emit_kms)
656 KMS_INVALID_IOCTL(radeon_irq_wait_kms)
657 KMS_INVALID_IOCTL(radeon_cp_setparam_kms)
658 KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
659 KMS_INVALID_IOCTL(radeon_surface_free_kms)
660 
661 
662 struct drm_ioctl_desc radeon_ioctls_kms[] = {
663  DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
664  DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
665  DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
666  DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
667  DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
668  DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
669  DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
670  DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
671  DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
672  DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
673  DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
674  DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
675  DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
676  DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
677  DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
678  DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
679  DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
680  DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
681  DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
682  DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
683  DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
684  DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
685  DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
686  DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
687  DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
688  DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
689  DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
690  /* KMS */
691  DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
692  DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
693  DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
694  DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
695  DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
696  DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
697  DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
698  DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
699  DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
700  DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
701  DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
702  DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
703  DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED),
704 };