28 #include <linux/device.h>
31 #include <linux/module.h>
33 #include <linux/pci.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
37 #include <linux/vfio.h>
40 #define DRIVER_VERSION "0.2"
42 #define DRIVER_DESC "Type1 IOMMU driver for VFIO"
44 static bool allow_unsafe_interrupts;
48 "Enable VFIO IOMMU support for on platforms without interrupt remapping support.");
76 #define NPAGE_TO_SIZE(npage) ((size_t)(npage) << PAGE_SHIFT)
98 static void vfio_lock_acct(
long npage)
107 current->mm->locked_vm += npage;
127 vwork->
npage = npage;
136 static bool is_invalid_reserved_pfn(
unsigned long pfn)
142 reserved = !!(PageReserved(head));
158 return PageReserved(tail);
164 static int put_pfn(
unsigned long pfn,
int prot)
166 if (!is_invalid_reserved_pfn(pfn)) {
178 long npage,
int prot)
180 long i, unlocked = 0;
182 for (i = 0; i < npage; i++, iova +=
PAGE_SIZE) {
188 unlocked += put_pfn(pfn, prot);
195 long npage,
int prot)
199 unlocked = __vfio_dma_do_unmap(iommu, iova, npage, prot);
200 vfio_lock_acct(-unlocked);
203 static int vaddr_get_pfn(
unsigned long vaddr,
int prot,
unsigned long *pfn)
216 vma = find_vma_intersection(
current->mm, vaddr, vaddr + 1);
218 if (vma && vma->
vm_flags & VM_PFNMAP) {
220 if (is_invalid_reserved_pfn(*pfn))
231 unsigned long vaddr,
long npage,
int prot)
238 for (i = 0; i < npage; i++, iova +=
PAGE_SIZE)
259 unsigned long pfn = 0;
261 ret = vaddr_get_pfn(vaddr, prot, &pfn);
263 __vfio_dma_do_unmap(iommu, start, i, prot);
275 if (!is_invalid_reserved_pfn(pfn))
284 __vfio_dma_do_unmap(iommu, start, i, prot);
288 vfio_lock_acct(locked);
292 static inline bool ranges_overlap(
dma_addr_t start1,
size_t size1,
296 return (start2 - start1 < size1);
297 else if (start2 < start1)
298 return (start1 - start2 < size2);
299 return (size1 > 0 && size2 > 0);
319 long npage_lo, npage_hi;
322 if (start <= dma->iova &&
326 npage_lo = dma->
npage;
332 if (start <= dma->iova) {
335 overlap = start + size - dma->
iova;
338 vfio_dma_unmap(iommu, dma->
iova, npage_lo, dma->
prot);
339 dma->
iova += overlap;
340 dma->
vaddr += overlap;
341 dma->
npage -= npage_lo;
352 vfio_dma_unmap(iommu, start, npage_hi, dma->
prot);
353 dma->
npage -= npage_hi;
367 dma->
npage = npage_lo;
369 split->
npage = npage_hi;
377 static int vfio_dma_do_unmap(
struct vfio_iommu *iommu,
386 if (unmap->
iova & mask)
388 if (unmap->
size & mask)
399 ret = vfio_remove_dma_overlap(iommu, unmap->
iova,
403 if (ret < 0 || npage == 0)
408 return ret > 0 ? 0 : (
int)ret;
411 static int vfio_dma_do_map(
struct vfio_iommu *iommu,
416 unsigned long locked, lock_limit, vaddr = map->
vaddr;
417 size_t size = map->
size;
418 int ret = 0, prot = 0;
444 if (iova + size && iova + size < iova)
448 if (vaddr + size && vaddr + size < vaddr)
457 if (vfio_find_dma(iommu, iova, size)) {
466 pr_warn(
"%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
472 ret = __vfio_dma_map(iommu, iova, vaddr, npage, prot);
478 dma = vfio_find_dma(iommu, iova - 1, 1);
479 if (dma && dma->
prot == prot &&
494 dma = vfio_find_dma(iommu, iova + size, 1);
495 if (dma && dma->
prot == prot &&
496 dma->
vaddr == vaddr + size) {
519 vfio_dma_unmap(iommu, iova, npage, prot);
535 static int vfio_iommu_type1_attach_group(
void *iommu_data,
577 static void vfio_iommu_type1_detach_group(
void *iommu_data,
578 struct iommu_group *iommu_group)
597 static void *vfio_iommu_type1_open(
unsigned long arg)
618 return ERR_PTR(-
EIO);
627 if (!allow_unsafe_interrupts &&
629 pr_warn(
"%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
633 return ERR_PTR(-
EPERM);
639 static void vfio_iommu_type1_release(
void *iommu_data)
662 static long vfio_iommu_type1_ioctl(
void *iommu_data,
663 unsigned int cmd,
unsigned long arg)
683 if (
info.argsz < minsz)
688 info.iova_pgsizes = iommu->
domain->ops->pgsize_bitmap;
705 return vfio_dma_do_map(iommu, &map);
718 return vfio_dma_do_unmap(iommu, &unmap);
725 .name =
"vfio-iommu-type1",
727 .open = vfio_iommu_type1_open,
728 .release = vfio_iommu_type1_release,
729 .ioctl = vfio_iommu_type1_ioctl,
730 .attach_group = vfio_iommu_type1_attach_group,
731 .detach_group = vfio_iommu_type1_detach_group,
734 static int __init vfio_iommu_type1_init(
void)
742 static void __exit vfio_iommu_type1_cleanup(
void)