22 #include <linux/module.h>
23 #include <linux/kernel.h>
28 #include <linux/mman.h>
30 #include <linux/types.h>
32 #include <linux/sched.h>
34 #include <linux/slab.h>
41 #include <xen/events.h>
42 #include <asm/xen/hypervisor.h>
43 #include <asm/xen/hypercall.h>
44 #include <asm/xen/page.h>
51 static int limit = 1024*1024;
58 static int use_ptemod;
65 struct mmu_notifier
mn;
95 char *
text,
int text_index)
100 pr_debug(
"%s: maps list (priv %p)\n", __func__, priv);
104 map->index == text_index && text ? text : "");
108 static void gntdev_free_map(
struct grant_map *map)
147 for (i = 0; i <
count; i++) {
160 gntdev_free_map(add);
178 gntdev_print_maps(priv,
"[new]", add->
index);
182 int index,
int count)
187 if (map->
index != index)
189 if (count && map->
count != count)
196 static void gntdev_put_map(
struct grant_map *map)
207 notify_remote_via_evtchn(map->
notify.event);
211 if (map->
pages && !use_ptemod)
212 unmap_grant_pages(map, 0, map->
count);
213 gntdev_free_map(map);
222 unsigned int pgnr = (addr - map->
vma->vm_start) >>
PAGE_SHIFT;
229 gnttab_set_map_op(&map->
map_ops[pgnr], pte_maddr, flags,
232 gnttab_set_unmap_op(&map->
unmap_ops[pgnr], pte_maddr, flags,
237 static int map_grant_pages(
struct grant_map *map)
243 if (map->
map_ops[0].handle != -1)
245 for (i = 0; i < map->
count; i++) {
246 unsigned long addr = (
unsigned long)
251 gnttab_set_unmap_op(&map->
unmap_ops[i], addr,
261 for (i = 0; i < map->
count; i++) {
271 gnttab_set_map_op(&map->
kmap_ops[i], pte_maddr,
286 for (i = 0; i < map->
count; i++) {
304 if (pgno >= offset && pgno < offset + pages && use_ptemod) {
311 }
else if (pgno >= offset && pgno < offset + pages) {
325 for (i = 0; i <
pages; i++) {
336 static int unmap_grant_pages(
struct grant_map *map,
int offset,
int pages)
345 while (pages && !err) {
346 while (pages && map->
unmap_ops[offset].handle == -1) {
351 while (range < pages) {
352 if (map->
unmap_ops[offset+range].handle == -1) {
358 err = __unmap_grant_pages(map, offset, range);
372 pr_debug(
"gntdev_vma_open %p\n", vma);
380 pr_debug(
"gntdev_vma_close %p\n", vma);
386 static struct vm_operations_struct gntdev_vmops = {
387 .open = gntdev_vma_open,
388 .close = gntdev_vma_close,
393 static void mn_invl_range_start(
struct mmu_notifier *mn,
395 unsigned long start,
unsigned long end)
399 unsigned long mstart, mend;
402 spin_lock(&priv->
lock);
406 if (map->
vma->vm_start >= end)
408 if (map->
vma->vm_end <= start)
410 mstart =
max(start, map->
vma->vm_start);
411 mend =
min(end, map->
vma->vm_end);
412 pr_debug(
"map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
414 map->
vma->vm_start, map->
vma->vm_end,
415 start, end, mstart, mend);
416 err = unmap_grant_pages(map,
421 spin_unlock(&priv->
lock);
424 static void mn_invl_page(
struct mmu_notifier *mn,
426 unsigned long address)
428 mn_invl_range_start(mn, mm, address, address +
PAGE_SIZE);
431 static void mn_release(
struct mmu_notifier *mn,
438 spin_lock(&priv->
lock);
444 map->
vma->vm_start, map->
vma->vm_end);
445 err = unmap_grant_pages(map, 0, map->
count);
448 spin_unlock(&priv->
lock);
451 static struct mmu_notifier_ops gntdev_mmu_ops = {
452 .release = mn_release,
453 .invalidate_page = mn_invl_page,
454 .invalidate_range_start = mn_invl_range_start,
468 INIT_LIST_HEAD(&priv->
maps);
477 priv->
mn.ops = &gntdev_mmu_ops;
493 static int gntdev_release(
struct inode *inode,
struct file *flip)
500 while (!list_empty(&priv->
maps)) {
512 static long gntdev_ioctl_map_grant_ref(
struct gntdev_priv *priv,
521 pr_debug(
"priv %p, add %d\n", priv,
op.count);
526 map = gntdev_alloc_map(priv,
op.count);
531 pr_debug(
"can't map: over limit\n");
537 sizeof(map->
grants[0]) *
op.count) != 0) {
542 spin_lock(&priv->
lock);
543 gntdev_add_map(priv, map);
545 spin_unlock(&priv->
lock);
553 static long gntdev_ioctl_unmap_grant_ref(
struct gntdev_priv *priv,
562 pr_debug(
"priv %p, del %d+%d\n", priv, (
int)
op.index, (
int)
op.count);
564 spin_lock(&priv->
lock);
565 map = gntdev_find_map_index(priv,
op.index >>
PAGE_SHIFT,
op.count);
570 spin_unlock(&priv->
lock);
576 static long gntdev_ioctl_get_offset_for_vaddr(
struct gntdev_priv *priv,
585 pr_debug(
"priv %p, offset for vaddr %lx\n", priv, (
unsigned long)
op.vaddr);
588 if (!vma || vma->
vm_ops != &gntdev_vmops)
603 static long gntdev_ioctl_notify(
struct gntdev_priv *priv,
void __user *u)
609 unsigned int out_event;
629 out_flags =
op.action;
630 out_event =
op.event_channel_port;
632 spin_lock(&priv->
lock);
637 if (
op.index >= begin &&
op.index < end)
650 out_flags = map->
notify.flags;
651 out_event = map->
notify.event;
655 map->
notify.event =
op.event_channel_port;
660 spin_unlock(&priv->
lock);
669 static long gntdev_ioctl(
struct file *flip,
670 unsigned int cmd,
unsigned long arg)
677 return gntdev_ioctl_map_grant_ref(priv, ptr);
680 return gntdev_ioctl_unmap_grant_ref(priv, ptr);
683 return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
686 return gntdev_ioctl_notify(priv, ptr);
689 pr_debug(
"priv %p, unknown cmd %x\n", priv, cmd);
707 pr_debug(
"map %d+%d at %lx (pgoff %lx)\n",
710 spin_lock(&priv->
lock);
711 map = gntdev_find_map_index(priv, index, count);
714 if (use_ptemod && map->
vma)
716 if (use_ptemod && priv->
mm != vma->
vm_mm) {
723 vma->
vm_ops = &gntdev_vmops;
725 vma->
vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
745 spin_unlock(&priv->
lock);
750 find_grant_ptes, map);
757 err = map_grant_pages(map);
762 for (i = 0; i <
count; i++) {
773 spin_unlock(&priv->
lock);
777 spin_unlock(&priv->
lock);
788 .release = gntdev_release,
790 .unlocked_ioctl = gntdev_ioctl
795 .name =
"xen/gntdev",
796 .fops = &gntdev_fops,
801 static int __init gntdev_init(
void)
818 static void __exit gntdev_exit(
void)