Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
radeon_gem.c
Go to the documentation of this file.
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  * Alex Deucher
26  * Jerome Glisse
27  */
28 #include <drm/drmP.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon.h"
31 
32 int radeon_gem_object_init(struct drm_gem_object *obj)
33 {
34  BUG();
35 
36  return 0;
37 }
38 
39 void radeon_gem_object_free(struct drm_gem_object *gobj)
40 {
41  struct radeon_bo *robj = gem_to_radeon_bo(gobj);
42 
43  if (robj) {
44  if (robj->gem_base.import_attach)
45  drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
46  radeon_bo_unref(&robj);
47  }
48 }
49 
51  int alignment, int initial_domain,
52  bool discardable, bool kernel,
53  struct drm_gem_object **obj)
54 {
55  struct radeon_bo *robj;
56  unsigned long max_size;
57  int r;
58 
59  *obj = NULL;
60  /* At least align on page size */
61  if (alignment < PAGE_SIZE) {
62  alignment = PAGE_SIZE;
63  }
64 
65  /* maximun bo size is the minimun btw visible vram and gtt size */
66  max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
67  if (size > max_size) {
68  printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
69  __func__, __LINE__, size >> 20, max_size >> 20);
70  return -ENOMEM;
71  }
72 
73 retry:
74  r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
75  if (r) {
76  if (r != -ERESTARTSYS) {
77  if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
78  initial_domain |= RADEON_GEM_DOMAIN_GTT;
79  goto retry;
80  }
81  DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
82  size, initial_domain, alignment, r);
83  }
84  return r;
85  }
86  *obj = &robj->gem_base;
87 
88  mutex_lock(&rdev->gem.mutex);
89  list_add_tail(&robj->list, &rdev->gem.objects);
90  mutex_unlock(&rdev->gem.mutex);
91 
92  return 0;
93 }
94 
95 int radeon_gem_set_domain(struct drm_gem_object *gobj,
96  uint32_t rdomain, uint32_t wdomain)
97 {
98  struct radeon_bo *robj;
100  int r;
101 
102  /* FIXME: reeimplement */
103  robj = gem_to_radeon_bo(gobj);
104  /* work out where to validate the buffer to */
105  domain = wdomain;
106  if (!domain) {
107  domain = rdomain;
108  }
109  if (!domain) {
110  /* Do nothings */
111  printk(KERN_WARNING "Set domain without domain !\n");
112  return 0;
113  }
114  if (domain == RADEON_GEM_DOMAIN_CPU) {
115  /* Asking for cpu access wait for object idle */
116  r = radeon_bo_wait(robj, NULL, false);
117  if (r) {
118  printk(KERN_ERR "Failed to wait for object !\n");
119  return r;
120  }
121  }
122  return 0;
123 }
124 
126 {
127  INIT_LIST_HEAD(&rdev->gem.objects);
128  return 0;
129 }
130 
132 {
134 }
135 
136 /*
137  * Call from drm_gem_handle_create which appear in both new and open ioctl
138  * case.
139  */
140 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
141 {
142  struct radeon_bo *rbo = gem_to_radeon_bo(obj);
143  struct radeon_device *rdev = rbo->rdev;
144  struct radeon_fpriv *fpriv = file_priv->driver_priv;
145  struct radeon_vm *vm = &fpriv->vm;
146  struct radeon_bo_va *bo_va;
147  int r;
148 
149  if (rdev->family < CHIP_CAYMAN) {
150  return 0;
151  }
152 
153  r = radeon_bo_reserve(rbo, false);
154  if (r) {
155  return r;
156  }
157 
158  bo_va = radeon_vm_bo_find(vm, rbo);
159  if (!bo_va) {
160  bo_va = radeon_vm_bo_add(rdev, vm, rbo);
161  } else {
162  ++bo_va->ref_count;
163  }
164  radeon_bo_unreserve(rbo);
165 
166  return 0;
167 }
168 
169 void radeon_gem_object_close(struct drm_gem_object *obj,
170  struct drm_file *file_priv)
171 {
172  struct radeon_bo *rbo = gem_to_radeon_bo(obj);
173  struct radeon_device *rdev = rbo->rdev;
174  struct radeon_fpriv *fpriv = file_priv->driver_priv;
175  struct radeon_vm *vm = &fpriv->vm;
176  struct radeon_bo_va *bo_va;
177  int r;
178 
179  if (rdev->family < CHIP_CAYMAN) {
180  return;
181  }
182 
183  r = radeon_bo_reserve(rbo, true);
184  if (r) {
185  dev_err(rdev->dev, "leaking bo va because "
186  "we fail to reserve bo (%d)\n", r);
187  return;
188  }
189  bo_va = radeon_vm_bo_find(vm, rbo);
190  if (bo_va) {
191  if (--bo_va->ref_count == 0) {
192  radeon_vm_bo_rmv(rdev, bo_va);
193  }
194  }
195  radeon_bo_unreserve(rbo);
196 }
197 
198 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
199 {
200  if (r == -EDEADLK) {
201  r = radeon_gpu_reset(rdev);
202  if (!r)
203  r = -EAGAIN;
204  }
205  return r;
206 }
207 
208 /*
209  * GEM ioctls.
210  */
212  struct drm_file *filp)
213 {
214  struct radeon_device *rdev = dev->dev_private;
215  struct drm_radeon_gem_info *args = data;
216  struct ttm_mem_type_manager *man;
217  unsigned i;
218 
219  man = &rdev->mman.bdev.man[TTM_PL_VRAM];
220 
221  args->vram_size = rdev->mc.real_vram_size;
222  args->vram_visible = (u64)man->size << PAGE_SHIFT;
223  if (rdev->stollen_vga_memory)
224  args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
225  args->vram_visible -= radeon_fbdev_total_size(rdev);
226  args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
227  for(i = 0; i < RADEON_NUM_RINGS; ++i)
228  args->gart_size -= rdev->ring[i].ring_size;
229  return 0;
230 }
231 
233  struct drm_file *filp)
234 {
235  /* TODO: implement */
236  DRM_ERROR("unimplemented %s\n", __func__);
237  return -ENOSYS;
238 }
239 
241  struct drm_file *filp)
242 {
243  /* TODO: implement */
244  DRM_ERROR("unimplemented %s\n", __func__);
245  return -ENOSYS;
246 }
247 
249  struct drm_file *filp)
250 {
251  struct radeon_device *rdev = dev->dev_private;
252  struct drm_radeon_gem_create *args = data;
253  struct drm_gem_object *gobj;
255  int r;
256 
257  down_read(&rdev->exclusive_lock);
258  /* create a gem object to contain this object in */
259  args->size = roundup(args->size, PAGE_SIZE);
260  r = radeon_gem_object_create(rdev, args->size, args->alignment,
261  args->initial_domain, false,
262  false, &gobj);
263  if (r) {
264  up_read(&rdev->exclusive_lock);
265  r = radeon_gem_handle_lockup(rdev, r);
266  return r;
267  }
268  r = drm_gem_handle_create(filp, gobj, &handle);
269  /* drop reference from allocate - handle holds it now */
270  drm_gem_object_unreference_unlocked(gobj);
271  if (r) {
272  up_read(&rdev->exclusive_lock);
273  r = radeon_gem_handle_lockup(rdev, r);
274  return r;
275  }
276  args->handle = handle;
277  up_read(&rdev->exclusive_lock);
278  return 0;
279 }
280 
282  struct drm_file *filp)
283 {
284  /* transition the BO to a domain -
285  * just validate the BO into a certain domain */
286  struct radeon_device *rdev = dev->dev_private;
288  struct drm_gem_object *gobj;
289  struct radeon_bo *robj;
290  int r;
291 
292  /* for now if someone requests domain CPU -
293  * just make sure the buffer is finished with */
294  down_read(&rdev->exclusive_lock);
295 
296  /* just do a BO wait for now */
297  gobj = drm_gem_object_lookup(dev, filp, args->handle);
298  if (gobj == NULL) {
299  up_read(&rdev->exclusive_lock);
300  return -ENOENT;
301  }
302  robj = gem_to_radeon_bo(gobj);
303 
304  r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
305 
306  drm_gem_object_unreference_unlocked(gobj);
307  up_read(&rdev->exclusive_lock);
308  r = radeon_gem_handle_lockup(robj->rdev, r);
309  return r;
310 }
311 
312 int radeon_mode_dumb_mmap(struct drm_file *filp,
313  struct drm_device *dev,
314  uint32_t handle, uint64_t *offset_p)
315 {
316  struct drm_gem_object *gobj;
317  struct radeon_bo *robj;
318 
319  gobj = drm_gem_object_lookup(dev, filp, handle);
320  if (gobj == NULL) {
321  return -ENOENT;
322  }
323  robj = gem_to_radeon_bo(gobj);
324  *offset_p = radeon_bo_mmap_offset(robj);
325  drm_gem_object_unreference_unlocked(gobj);
326  return 0;
327 }
328 
330  struct drm_file *filp)
331 {
332  struct drm_radeon_gem_mmap *args = data;
333 
334  return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
335 }
336 
338  struct drm_file *filp)
339 {
340  struct radeon_device *rdev = dev->dev_private;
341  struct drm_radeon_gem_busy *args = data;
342  struct drm_gem_object *gobj;
343  struct radeon_bo *robj;
344  int r;
345  uint32_t cur_placement = 0;
346 
347  gobj = drm_gem_object_lookup(dev, filp, args->handle);
348  if (gobj == NULL) {
349  return -ENOENT;
350  }
351  robj = gem_to_radeon_bo(gobj);
352  r = radeon_bo_wait(robj, &cur_placement, true);
353  switch (cur_placement) {
354  case TTM_PL_VRAM:
356  break;
357  case TTM_PL_TT:
359  break;
360  case TTM_PL_SYSTEM:
362  default:
363  break;
364  }
365  drm_gem_object_unreference_unlocked(gobj);
366  r = radeon_gem_handle_lockup(rdev, r);
367  return r;
368 }
369 
371  struct drm_file *filp)
372 {
373  struct radeon_device *rdev = dev->dev_private;
375  struct drm_gem_object *gobj;
376  struct radeon_bo *robj;
377  int r;
378 
379  gobj = drm_gem_object_lookup(dev, filp, args->handle);
380  if (gobj == NULL) {
381  return -ENOENT;
382  }
383  robj = gem_to_radeon_bo(gobj);
384  r = radeon_bo_wait(robj, NULL, false);
385  /* callback hw specific functions if any */
386  if (rdev->asic->ioctl_wait_idle)
387  robj->rdev->asic->ioctl_wait_idle(rdev, robj);
388  drm_gem_object_unreference_unlocked(gobj);
389  r = radeon_gem_handle_lockup(rdev, r);
390  return r;
391 }
392 
394  struct drm_file *filp)
395 {
397  struct drm_gem_object *gobj;
398  struct radeon_bo *robj;
399  int r = 0;
400 
401  DRM_DEBUG("%d \n", args->handle);
402  gobj = drm_gem_object_lookup(dev, filp, args->handle);
403  if (gobj == NULL)
404  return -ENOENT;
405  robj = gem_to_radeon_bo(gobj);
406  r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
407  drm_gem_object_unreference_unlocked(gobj);
408  return r;
409 }
410 
412  struct drm_file *filp)
413 {
415  struct drm_gem_object *gobj;
416  struct radeon_bo *rbo;
417  int r = 0;
418 
419  DRM_DEBUG("\n");
420  gobj = drm_gem_object_lookup(dev, filp, args->handle);
421  if (gobj == NULL)
422  return -ENOENT;
423  rbo = gem_to_radeon_bo(gobj);
424  r = radeon_bo_reserve(rbo, false);
425  if (unlikely(r != 0))
426  goto out;
427  radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
428  radeon_bo_unreserve(rbo);
429 out:
430  drm_gem_object_unreference_unlocked(gobj);
431  return r;
432 }
433 
435  struct drm_file *filp)
436 {
437  struct drm_radeon_gem_va *args = data;
438  struct drm_gem_object *gobj;
439  struct radeon_device *rdev = dev->dev_private;
440  struct radeon_fpriv *fpriv = filp->driver_priv;
441  struct radeon_bo *rbo;
442  struct radeon_bo_va *bo_va;
443  u32 invalid_flags;
444  int r = 0;
445 
446  if (!rdev->vm_manager.enabled) {
448  return -ENOTTY;
449  }
450 
451  /* !! DONT REMOVE !!
452  * We don't support vm_id yet, to be sure we don't have have broken
453  * userspace, reject anyone trying to use non 0 value thus moving
454  * forward we can use those fields without breaking existant userspace
455  */
456  if (args->vm_id) {
458  return -EINVAL;
459  }
460 
461  if (args->offset < RADEON_VA_RESERVED_SIZE) {
462  dev_err(&dev->pdev->dev,
463  "offset 0x%lX is in reserved area 0x%X\n",
464  (unsigned long)args->offset,
467  return -EINVAL;
468  }
469 
470  /* don't remove, we need to enforce userspace to set the snooped flag
471  * otherwise we will endup with broken userspace and we won't be able
472  * to enable this feature without adding new interface
473  */
475  if ((args->flags & invalid_flags)) {
476  dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
477  args->flags, invalid_flags);
479  return -EINVAL;
480  }
481  if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
482  dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
484  return -EINVAL;
485  }
486 
487  switch (args->operation) {
488  case RADEON_VA_MAP:
489  case RADEON_VA_UNMAP:
490  break;
491  default:
492  dev_err(&dev->pdev->dev, "unsupported operation %d\n",
493  args->operation);
495  return -EINVAL;
496  }
497 
498  gobj = drm_gem_object_lookup(dev, filp, args->handle);
499  if (gobj == NULL) {
501  return -ENOENT;
502  }
503  rbo = gem_to_radeon_bo(gobj);
504  r = radeon_bo_reserve(rbo, false);
505  if (r) {
507  drm_gem_object_unreference_unlocked(gobj);
508  return r;
509  }
510  bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
511  if (!bo_va) {
513  drm_gem_object_unreference_unlocked(gobj);
514  return -ENOENT;
515  }
516 
517  switch (args->operation) {
518  case RADEON_VA_MAP:
519  if (bo_va->soffset) {
521  args->offset = bo_va->soffset;
522  goto out;
523  }
524  r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
525  break;
526  case RADEON_VA_UNMAP:
527  r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
528  break;
529  default:
530  break;
531  }
533  if (r) {
535  }
536 out:
537  radeon_bo_unreserve(rbo);
538  drm_gem_object_unreference_unlocked(gobj);
539  return r;
540 }
541 
542 int radeon_mode_dumb_create(struct drm_file *file_priv,
543  struct drm_device *dev,
544  struct drm_mode_create_dumb *args)
545 {
546  struct radeon_device *rdev = dev->dev_private;
547  struct drm_gem_object *gobj;
549  int r;
550 
551  args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
552  args->size = args->pitch * args->height;
553  args->size = ALIGN(args->size, PAGE_SIZE);
554 
555  r = radeon_gem_object_create(rdev, args->size, 0,
557  false, ttm_bo_type_device,
558  &gobj);
559  if (r)
560  return -ENOMEM;
561 
562  r = drm_gem_handle_create(file_priv, gobj, &handle);
563  /* drop reference from allocate - handle holds it now */
564  drm_gem_object_unreference_unlocked(gobj);
565  if (r) {
566  return r;
567  }
568  args->handle = handle;
569  return 0;
570 }
571 
572 int radeon_mode_dumb_destroy(struct drm_file *file_priv,
573  struct drm_device *dev,
575 {
576  return drm_gem_handle_delete(file_priv, handle);
577 }