37 #include <linux/export.h>
40 #include <linux/slab.h>
50 #if defined(__i386__) || defined(__x86_64__)
55 #elif defined(__powerpc__)
59 #elif defined(__ia64__)
65 #elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
75 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
92 static int drm_do_vm_fault(
struct vm_area_struct *vma,
struct vm_fault *vmf)
96 struct drm_local_map *
map =
NULL;
97 struct drm_map_list *r_list;
103 if (!drm_core_has_AGP(dev))
106 if (!dev->agp || !dev->agp->cant_use_aperture)
123 struct drm_agp_mem *agpmem;
130 baddr -= dev->hose->mem_space->start;
137 if (agpmem->bound <= baddr &&
138 agpmem->bound + agpmem->pages *
PAGE_SIZE > baddr)
142 if (&agpmem->head == &dev->agp->memory)
148 offset = (baddr - agpmem->bound) >>
PAGE_SHIFT;
154 (
"baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
155 (
unsigned long long)baddr,
156 agpmem->memory->pages[offset],
157 (
unsigned long long)offset,
162 return VM_FAULT_SIGBUS;
165 static int drm_do_vm_fault(
struct vm_area_struct *vma,
struct vm_fault *vmf)
167 return VM_FAULT_SIGBUS;
181 static int drm_do_vm_shm_fault(
struct vm_area_struct *vma,
struct vm_fault *vmf)
189 return VM_FAULT_SIGBUS;
191 offset = (
unsigned long)vmf->virtual_address - vma->
vm_start;
192 i = (
unsigned long)map->handle +
offset;
195 return VM_FAULT_SIGBUS;
199 DRM_DEBUG(
"shm_fault 0x%lx\n", offset);
213 struct drm_file *priv = vma->
vm_file->private_data;
215 struct drm_vma_entry *pt, *
temp;
216 struct drm_local_map *
map;
217 struct drm_map_list *r_list;
220 DRM_DEBUG(
"0x%08lx,0x%08lx\n",
228 if (pt->vma->vm_private_data == map)
230 if (pt->vma == vma) {
243 if (r_list->map == map)
248 drm_dma_handle_t dmah;
253 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
258 DRM_DEBUG(
"mtrr_del = %d\n", retcode);
269 dmah.vaddr = map->handle;
270 dmah.busaddr = map->offset;
271 dmah.size = map->size;
275 DRM_ERROR(
"tried to rmmap GEM object\n");
293 static int drm_do_vm_dma_fault(
struct vm_area_struct *vma,
struct vm_fault *vmf)
295 struct drm_file *priv = vma->
vm_file->private_data;
297 struct drm_device_dma *
dma = dev->dma;
299 unsigned long page_nr;
303 return VM_FAULT_SIGBUS;
305 return VM_FAULT_SIGBUS;
307 offset = (
unsigned long)vmf->virtual_address - vma->
vm_start;
314 DRM_DEBUG(
"dma_fault 0x%lx (page %lu)\n", offset, page_nr);
327 static int drm_do_vm_sg_fault(
struct vm_area_struct *vma,
struct vm_fault *vmf)
330 struct drm_file *priv = vma->
vm_file->private_data;
332 struct drm_sg_mem *
entry = dev->sg;
334 unsigned long map_offset;
339 return VM_FAULT_SIGBUS;
340 if (!entry->pagelist)
341 return VM_FAULT_SIGBUS;
343 offset = (
unsigned long)vmf->virtual_address - vma->
vm_start;
344 map_offset = map->offset - (
unsigned long)dev->sg->virtual;
353 static int drm_vm_fault(
struct vm_area_struct *vma,
struct vm_fault *vmf)
355 return drm_do_vm_fault(vma, vmf);
358 static int drm_vm_shm_fault(
struct vm_area_struct *vma,
struct vm_fault *vmf)
360 return drm_do_vm_shm_fault(vma, vmf);
363 static int drm_vm_dma_fault(
struct vm_area_struct *vma,
struct vm_fault *vmf)
365 return drm_do_vm_dma_fault(vma, vmf);
368 static int drm_vm_sg_fault(
struct vm_area_struct *vma,
struct vm_fault *vmf)
370 return drm_do_vm_sg_fault(vma, vmf);
374 static const struct vm_operations_struct drm_vm_ops = {
375 .fault = drm_vm_fault,
377 .close = drm_vm_close,
381 static const struct vm_operations_struct drm_vm_shm_ops = {
382 .fault = drm_vm_shm_fault,
384 .close = drm_vm_shm_close,
388 static const struct vm_operations_struct drm_vm_dma_ops = {
389 .fault = drm_vm_dma_fault,
391 .close = drm_vm_close,
395 static const struct vm_operations_struct drm_vm_sg_ops = {
396 .fault = drm_vm_sg_fault,
398 .close = drm_vm_close,
412 struct drm_vma_entry *vma_entry;
414 DRM_DEBUG(
"0x%08lx,0x%08lx\n",
420 vma_entry->vma = vma;
422 list_add(&vma_entry->head, &dev->vmalist);
428 struct drm_file *priv = vma->
vm_file->private_data;
439 struct drm_vma_entry *pt, *
temp;
441 DRM_DEBUG(
"0x%08lx,0x%08lx\n",
446 if (pt->vma == vma) {
464 struct drm_file *priv = vma->
vm_file->private_data;
486 struct drm_device_dma *
dma;
489 dev = priv->minor->dev;
491 DRM_DEBUG(
"start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
495 if (!dma || (length >>
PAGE_SHIFT) != dma->page_count) {
500 (dma->flags & _DRM_DMA_USE_PCI_RO)) {
501 vma->
vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
502 #if defined(__i386__) || defined(__x86_64__)
515 vma->
vm_ops = &drm_vm_dma_ops;
517 vma->
vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
526 return dev->hose->dense_mem_base;
549 struct drm_local_map *map =
NULL;
553 DRM_DEBUG(
"start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
556 if (!priv->authenticated)
569 return drm_mmap_dma(filp, vma);
572 DRM_ERROR(
"Could not find map\n");
585 vma->
vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
586 #if defined(__i386__) || defined(__x86_64__)
600 #if !defined(__arm__)
602 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
608 #if defined(__powerpc__)
611 vma->
vm_ops = &drm_vm_ops;
618 offset = drm_core_get_reg_ofs(dev);
626 DRM_DEBUG(
" Type = %d; start = 0x%lx, end = 0x%lx,"
627 " offset = 0x%llx\n",
629 vma->
vm_start, vma->
vm_end, (
unsigned long long)(map->offset + offset));
631 vma->
vm_ops = &drm_vm_ops;
643 vma->
vm_ops = &drm_vm_shm_ops;
647 vma->
vm_ops = &drm_vm_sg_ops;
654 vma->
vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
666 if (drm_device_is_unplugged(dev))