Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
drm_vm.c
Go to the documentation of this file.
1 
9 /*
10  * Created: Mon Jan 4 08:58:31 1999 by [email protected]
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35 
36 #include <drm/drmP.h>
37 #include <linux/export.h>
38 #if defined(__ia64__)
39 #include <linux/efi.h>
40 #include <linux/slab.h>
41 #endif
42 
43 static void drm_vm_open(struct vm_area_struct *vma);
44 static void drm_vm_close(struct vm_area_struct *vma);
45 
46 static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
47 {
49 
50 #if defined(__i386__) || defined(__x86_64__)
51  if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
52  pgprot_val(tmp) |= _PAGE_PCD;
53  pgprot_val(tmp) &= ~_PAGE_PWT;
54  }
55 #elif defined(__powerpc__)
56  pgprot_val(tmp) |= _PAGE_NO_CACHE;
57  if (map_type == _DRM_REGISTERS)
58  pgprot_val(tmp) |= _PAGE_GUARDED;
59 #elif defined(__ia64__)
60  if (efi_range_is_wc(vma->vm_start, vma->vm_end -
61  vma->vm_start))
62  tmp = pgprot_writecombine(tmp);
63  else
64  tmp = pgprot_noncached(tmp);
65 #elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
66  tmp = pgprot_noncached(tmp);
67 #endif
68  return tmp;
69 }
70 
71 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
72 {
74 
75 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
76  tmp |= _PAGE_NO_CACHE;
77 #endif
78  return tmp;
79 }
80 
91 #if __OS_HAS_AGP
92 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
93 {
94  struct drm_file *priv = vma->vm_file->private_data;
95  struct drm_device *dev = priv->minor->dev;
96  struct drm_local_map *map = NULL;
97  struct drm_map_list *r_list;
98  struct drm_hash_item *hash;
99 
100  /*
101  * Find the right map
102  */
103  if (!drm_core_has_AGP(dev))
104  goto vm_fault_error;
105 
106  if (!dev->agp || !dev->agp->cant_use_aperture)
107  goto vm_fault_error;
108 
109  if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
110  goto vm_fault_error;
111 
112  r_list = drm_hash_entry(hash, struct drm_map_list, hash);
113  map = r_list->map;
114 
115  if (map && map->type == _DRM_AGP) {
116  /*
117  * Using vm_pgoff as a selector forces us to use this unusual
118  * addressing scheme.
119  */
120  resource_size_t offset = (unsigned long)vmf->virtual_address -
121  vma->vm_start;
122  resource_size_t baddr = map->offset + offset;
123  struct drm_agp_mem *agpmem;
124  struct page *page;
125 
126 #ifdef __alpha__
127  /*
128  * Adjust to a bus-relative address
129  */
130  baddr -= dev->hose->mem_space->start;
131 #endif
132 
133  /*
134  * It's AGP memory - find the real physical page to map
135  */
136  list_for_each_entry(agpmem, &dev->agp->memory, head) {
137  if (agpmem->bound <= baddr &&
138  agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
139  break;
140  }
141 
142  if (&agpmem->head == &dev->agp->memory)
143  goto vm_fault_error;
144 
145  /*
146  * Get the page, inc the use count, and return it
147  */
148  offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
149  page = agpmem->memory->pages[offset];
150  get_page(page);
151  vmf->page = page;
152 
153  DRM_DEBUG
154  ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
155  (unsigned long long)baddr,
156  agpmem->memory->pages[offset],
157  (unsigned long long)offset,
158  page_count(page));
159  return 0;
160  }
161 vm_fault_error:
162  return VM_FAULT_SIGBUS; /* Disallow mremap */
163 }
164 #else /* __OS_HAS_AGP */
165 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
166 {
167  return VM_FAULT_SIGBUS;
168 }
169 #endif /* __OS_HAS_AGP */
170 
181 static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
182 {
183  struct drm_local_map *map = vma->vm_private_data;
184  unsigned long offset;
185  unsigned long i;
186  struct page *page;
187 
188  if (!map)
189  return VM_FAULT_SIGBUS; /* Nothing allocated */
190 
191  offset = (unsigned long)vmf->virtual_address - vma->vm_start;
192  i = (unsigned long)map->handle + offset;
193  page = vmalloc_to_page((void *)i);
194  if (!page)
195  return VM_FAULT_SIGBUS;
196  get_page(page);
197  vmf->page = page;
198 
199  DRM_DEBUG("shm_fault 0x%lx\n", offset);
200  return 0;
201 }
202 
211 static void drm_vm_shm_close(struct vm_area_struct *vma)
212 {
213  struct drm_file *priv = vma->vm_file->private_data;
214  struct drm_device *dev = priv->minor->dev;
215  struct drm_vma_entry *pt, *temp;
216  struct drm_local_map *map;
217  struct drm_map_list *r_list;
218  int found_maps = 0;
219 
220  DRM_DEBUG("0x%08lx,0x%08lx\n",
221  vma->vm_start, vma->vm_end - vma->vm_start);
222  atomic_dec(&dev->vma_count);
223 
224  map = vma->vm_private_data;
225 
226  mutex_lock(&dev->struct_mutex);
227  list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
228  if (pt->vma->vm_private_data == map)
229  found_maps++;
230  if (pt->vma == vma) {
231  list_del(&pt->head);
232  kfree(pt);
233  }
234  }
235 
236  /* We were the only map that was found */
237  if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
238  /* Check to see if we are in the maplist, if we are not, then
239  * we delete this mappings information.
240  */
241  found_maps = 0;
242  list_for_each_entry(r_list, &dev->maplist, head) {
243  if (r_list->map == map)
244  found_maps++;
245  }
246 
247  if (!found_maps) {
248  drm_dma_handle_t dmah;
249 
250  switch (map->type) {
251  case _DRM_REGISTERS:
252  case _DRM_FRAME_BUFFER:
253  if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
254  int retcode;
255  retcode = mtrr_del(map->mtrr,
256  map->offset,
257  map->size);
258  DRM_DEBUG("mtrr_del = %d\n", retcode);
259  }
260  iounmap(map->handle);
261  break;
262  case _DRM_SHM:
263  vfree(map->handle);
264  break;
265  case _DRM_AGP:
266  case _DRM_SCATTER_GATHER:
267  break;
268  case _DRM_CONSISTENT:
269  dmah.vaddr = map->handle;
270  dmah.busaddr = map->offset;
271  dmah.size = map->size;
272  __drm_pci_free(dev, &dmah);
273  break;
274  case _DRM_GEM:
275  DRM_ERROR("tried to rmmap GEM object\n");
276  break;
277  }
278  kfree(map);
279  }
280  }
281  mutex_unlock(&dev->struct_mutex);
282 }
283 
293 static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
294 {
295  struct drm_file *priv = vma->vm_file->private_data;
296  struct drm_device *dev = priv->minor->dev;
297  struct drm_device_dma *dma = dev->dma;
298  unsigned long offset;
299  unsigned long page_nr;
300  struct page *page;
301 
302  if (!dma)
303  return VM_FAULT_SIGBUS; /* Error */
304  if (!dma->pagelist)
305  return VM_FAULT_SIGBUS; /* Nothing allocated */
306 
307  offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
308  page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
309  page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
310 
311  get_page(page);
312  vmf->page = page;
313 
314  DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
315  return 0;
316 }
317 
327 static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
328 {
329  struct drm_local_map *map = vma->vm_private_data;
330  struct drm_file *priv = vma->vm_file->private_data;
331  struct drm_device *dev = priv->minor->dev;
332  struct drm_sg_mem *entry = dev->sg;
333  unsigned long offset;
334  unsigned long map_offset;
335  unsigned long page_offset;
336  struct page *page;
337 
338  if (!entry)
339  return VM_FAULT_SIGBUS; /* Error */
340  if (!entry->pagelist)
341  return VM_FAULT_SIGBUS; /* Nothing allocated */
342 
343  offset = (unsigned long)vmf->virtual_address - vma->vm_start;
344  map_offset = map->offset - (unsigned long)dev->sg->virtual;
345  page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
346  page = entry->pagelist[page_offset];
347  get_page(page);
348  vmf->page = page;
349 
350  return 0;
351 }
352 
353 static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
354 {
355  return drm_do_vm_fault(vma, vmf);
356 }
357 
358 static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
359 {
360  return drm_do_vm_shm_fault(vma, vmf);
361 }
362 
363 static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
364 {
365  return drm_do_vm_dma_fault(vma, vmf);
366 }
367 
368 static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
369 {
370  return drm_do_vm_sg_fault(vma, vmf);
371 }
372 
374 static const struct vm_operations_struct drm_vm_ops = {
375  .fault = drm_vm_fault,
376  .open = drm_vm_open,
377  .close = drm_vm_close,
378 };
379 
381 static const struct vm_operations_struct drm_vm_shm_ops = {
382  .fault = drm_vm_shm_fault,
383  .open = drm_vm_open,
384  .close = drm_vm_shm_close,
385 };
386 
388 static const struct vm_operations_struct drm_vm_dma_ops = {
389  .fault = drm_vm_dma_fault,
390  .open = drm_vm_open,
391  .close = drm_vm_close,
392 };
393 
395 static const struct vm_operations_struct drm_vm_sg_ops = {
396  .fault = drm_vm_sg_fault,
397  .open = drm_vm_open,
398  .close = drm_vm_close,
399 };
400 
410  struct vm_area_struct *vma)
411 {
412  struct drm_vma_entry *vma_entry;
413 
414  DRM_DEBUG("0x%08lx,0x%08lx\n",
415  vma->vm_start, vma->vm_end - vma->vm_start);
416  atomic_inc(&dev->vma_count);
417 
418  vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
419  if (vma_entry) {
420  vma_entry->vma = vma;
421  vma_entry->pid = current->pid;
422  list_add(&vma_entry->head, &dev->vmalist);
423  }
424 }
425 
426 static void drm_vm_open(struct vm_area_struct *vma)
427 {
428  struct drm_file *priv = vma->vm_file->private_data;
429  struct drm_device *dev = priv->minor->dev;
430 
431  mutex_lock(&dev->struct_mutex);
432  drm_vm_open_locked(dev, vma);
433  mutex_unlock(&dev->struct_mutex);
434 }
435 
437  struct vm_area_struct *vma)
438 {
439  struct drm_vma_entry *pt, *temp;
440 
441  DRM_DEBUG("0x%08lx,0x%08lx\n",
442  vma->vm_start, vma->vm_end - vma->vm_start);
443  atomic_dec(&dev->vma_count);
444 
445  list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
446  if (pt->vma == vma) {
447  list_del(&pt->head);
448  kfree(pt);
449  break;
450  }
451  }
452 }
453 
462 static void drm_vm_close(struct vm_area_struct *vma)
463 {
464  struct drm_file *priv = vma->vm_file->private_data;
465  struct drm_device *dev = priv->minor->dev;
466 
467  mutex_lock(&dev->struct_mutex);
468  drm_vm_close_locked(dev, vma);
469  mutex_unlock(&dev->struct_mutex);
470 }
471 
482 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
483 {
484  struct drm_file *priv = filp->private_data;
485  struct drm_device *dev;
486  struct drm_device_dma *dma;
487  unsigned long length = vma->vm_end - vma->vm_start;
488 
489  dev = priv->minor->dev;
490  dma = dev->dma;
491  DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
492  vma->vm_start, vma->vm_end, vma->vm_pgoff);
493 
494  /* Length must match exact page count */
495  if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
496  return -EINVAL;
497  }
498 
499  if (!capable(CAP_SYS_ADMIN) &&
500  (dma->flags & _DRM_DMA_USE_PCI_RO)) {
501  vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
502 #if defined(__i386__) || defined(__x86_64__)
503  pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
504 #else
505  /* Ye gads this is ugly. With more thought
506  we could move this up higher and use
507  `protection_map' instead. */
508  vma->vm_page_prot =
511  (__pte(pgprot_val(vma->vm_page_prot)))));
512 #endif
513  }
514 
515  vma->vm_ops = &drm_vm_dma_ops;
516 
517  vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
518 
519  drm_vm_open_locked(dev, vma);
520  return 0;
521 }
522 
523 static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
524 {
525 #ifdef __alpha__
526  return dev->hose->dense_mem_base;
527 #else
528  return 0;
529 #endif
530 }
531 
545 int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
546 {
547  struct drm_file *priv = filp->private_data;
548  struct drm_device *dev = priv->minor->dev;
549  struct drm_local_map *map = NULL;
550  resource_size_t offset = 0;
551  struct drm_hash_item *hash;
552 
553  DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
554  vma->vm_start, vma->vm_end, vma->vm_pgoff);
555 
556  if (!priv->authenticated)
557  return -EACCES;
558 
559  /* We check for "dma". On Apple's UniNorth, it's valid to have
560  * the AGP mapped at physical address 0
561  * --BenH.
562  */
563  if (!vma->vm_pgoff
564 #if __OS_HAS_AGP
565  && (!dev->agp
566  || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
567 #endif
568  )
569  return drm_mmap_dma(filp, vma);
570 
571  if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
572  DRM_ERROR("Could not find map\n");
573  return -EINVAL;
574  }
575 
576  map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
577  if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
578  return -EPERM;
579 
580  /* Check for valid size. */
581  if (map->size < vma->vm_end - vma->vm_start)
582  return -EINVAL;
583 
584  if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
585  vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
586 #if defined(__i386__) || defined(__x86_64__)
587  pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
588 #else
589  /* Ye gads this is ugly. With more thought
590  we could move this up higher and use
591  `protection_map' instead. */
592  vma->vm_page_prot =
595  (__pte(pgprot_val(vma->vm_page_prot)))));
596 #endif
597  }
598 
599  switch (map->type) {
600 #if !defined(__arm__)
601  case _DRM_AGP:
602  if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
603  /*
604  * On some platforms we can't talk to bus dma address from the CPU, so for
605  * memory of type DRM_AGP, we'll deal with sorting out the real physical
606  * pages and mappings in fault()
607  */
608 #if defined(__powerpc__)
610 #endif
611  vma->vm_ops = &drm_vm_ops;
612  break;
613  }
614  /* fall through to _DRM_FRAME_BUFFER... */
615 #endif
616  case _DRM_FRAME_BUFFER:
617  case _DRM_REGISTERS:
618  offset = drm_core_get_reg_ofs(dev);
619  vma->vm_flags |= VM_IO; /* not in core dump */
620  vma->vm_page_prot = drm_io_prot(map->type, vma);
621  if (io_remap_pfn_range(vma, vma->vm_start,
622  (map->offset + offset) >> PAGE_SHIFT,
623  vma->vm_end - vma->vm_start,
624  vma->vm_page_prot))
625  return -EAGAIN;
626  DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
627  " offset = 0x%llx\n",
628  map->type,
629  vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
630 
631  vma->vm_ops = &drm_vm_ops;
632  break;
633  case _DRM_CONSISTENT:
634  /* Consistent memory is really like shared memory. But
635  * it's allocated in a different way, so avoid fault */
636  if (remap_pfn_range(vma, vma->vm_start,
637  page_to_pfn(virt_to_page(map->handle)),
638  vma->vm_end - vma->vm_start, vma->vm_page_prot))
639  return -EAGAIN;
640  vma->vm_page_prot = drm_dma_prot(map->type, vma);
641  /* fall through to _DRM_SHM */
642  case _DRM_SHM:
643  vma->vm_ops = &drm_vm_shm_ops;
644  vma->vm_private_data = (void *)map;
645  break;
646  case _DRM_SCATTER_GATHER:
647  vma->vm_ops = &drm_vm_sg_ops;
648  vma->vm_private_data = (void *)map;
649  vma->vm_page_prot = drm_dma_prot(map->type, vma);
650  break;
651  default:
652  return -EINVAL; /* This should never happen. */
653  }
654  vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
655 
656  drm_vm_open_locked(dev, vma);
657  return 0;
658 }
659 
660 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
661 {
662  struct drm_file *priv = filp->private_data;
663  struct drm_device *dev = priv->minor->dev;
664  int ret;
665 
666  if (drm_device_is_unplugged(dev))
667  return -ENODEV;
668 
669  mutex_lock(&dev->struct_mutex);
670  ret = drm_mmap_locked(filp, vma);
671  mutex_unlock(&dev->struct_mutex);
672 
673  return ret;
674 }