26 #include <linux/list.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/stat.h>
35 static bool allow_unsafe_assigned_interrupts;
39 "Enable device assignment on platforms without interrupt remapping support.");
41 static int kvm_iommu_unmap_memslots(
struct kvm *
kvm);
42 static void kvm_iommu_put_pages(
struct kvm *
kvm,
43 gfn_t base_gfn,
unsigned long npages);
55 if (is_error_pfn(pfn))
77 end_gfn = gfn + slot->
npages;
84 while (gfn < end_gfn) {
97 while ((gfn + (page_size >>
PAGE_SHIFT)) > end_gfn)
108 pfn = kvm_pin_pages(slot, gfn, page_size);
109 if (is_error_pfn(pfn)) {
115 r =
iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
119 "iommu failed to map pfn=%llx\n", pfn);
131 kvm_iommu_put_pages(kvm, slot->
base_gfn, gfn);
135 static int kvm_iommu_map_memslots(
struct kvm *
kvm)
141 idx = srcu_read_lock(&kvm->
srcu);
149 srcu_read_unlock(&kvm->
srcu, idx);
165 pdev = assigned_dev->
dev;
179 last_flags = kvm->
arch.iommu_flags;
185 if ((last_flags ^ kvm->
arch.iommu_flags) ==
187 kvm_iommu_unmap_memslots(kvm);
188 r = kvm_iommu_map_memslots(kvm);
203 kvm_iommu_unmap_memslots(kvm);
217 pdev = assigned_dev->
dev;
246 if (!kvm->
arch.iommu_domain) {
251 if (!allow_unsafe_assigned_interrupts &&
255 " disallowing device assignment."
256 " Re-enble with \"allow_unsafe_assigned_interrupts=1\""
257 " module option.\n", __func__);
264 r = kvm_iommu_map_memslots(kvm);
266 kvm_iommu_unmap_memslots(kvm);
273 static void kvm_unpin_pages(
struct kvm *kvm,
pfn_t pfn,
unsigned long npages)
277 for (i = 0; i < npages; ++
i)
281 static void kvm_iommu_put_pages(
struct kvm *kvm,
282 gfn_t base_gfn,
unsigned long npages)
289 domain = kvm->
arch.iommu_domain;
290 end_gfn = base_gfn + npages;
297 while (gfn < end_gfn) {
298 unsigned long unmap_pages;
316 kvm_unpin_pages(kvm, pfn, unmap_pages);
327 static int kvm_iommu_unmap_memslots(
struct kvm *kvm)
333 idx = srcu_read_lock(&kvm->
srcu);
339 srcu_read_unlock(&kvm->srcu, idx);
353 kvm_iommu_unmap_memslots(kvm);
354 kvm->arch.iommu_domain =
NULL;