23 #include <linux/module.h>
24 #include <linux/sched.h>
26 #include <linux/wait.h>
27 #include <linux/pci.h>
36 #define MAX_DEVICES 0x10000
37 #define PRI_QUEUE_SIZE 512
50 struct mmu_notifier
mn;
98 static u64 *empty_page_table;
108 devid = pdev->
bus->number;
109 devid = (devid << 8) | pdev->
devfn;
120 dev_state = state_table[
devid];
121 if (dev_state !=
NULL)
123 spin_unlock_irqrestore(&state_lock, flags);
128 static void free_device_state(
struct device_state *dev_state)
143 static void put_device_state(
struct device_state *dev_state)
149 static void put_device_state_wait(
struct device_state *dev_state)
158 free_device_state(dev_state);
162 .notifier_call = task_exit,
169 spin_unlock(&ps_lock);
180 __unlink_pasid_state(pasid_state);
181 spin_unlock(&ps_lock);
186 int pasid,
bool alloc)
196 index = (pasid >> (9 *
level)) & 0x1ff;
218 static int set_pasid_state(
struct device_state *dev_state,
222 struct pasid_state **
ptr;
227 ptr = __get_pasid_state_ptr(dev_state, pasid,
true);
242 spin_unlock_irqrestore(&dev_state->
lock, flags);
247 static void clear_pasid_state(
struct device_state *dev_state,
int pasid)
249 struct pasid_state **
ptr;
253 ptr = __get_pasid_state_ptr(dev_state, pasid,
true);
261 spin_unlock_irqrestore(&dev_state->
lock, flags);
264 static struct pasid_state *get_pasid_state(
struct device_state *dev_state,
267 struct pasid_state **
ptr, *ret =
NULL;
271 ptr = __get_pasid_state_ptr(dev_state, pasid,
false);
281 spin_unlock_irqrestore(&dev_state->
lock, flags);
286 static void free_pasid_state(
struct pasid_state *pasid_state)
291 static void put_pasid_state(
struct pasid_state *pasid_state)
299 static void put_pasid_state_wait(
struct pasid_state *pasid_state)
312 free_pasid_state(pasid_state);
315 static void __unbind_pasid(
struct pasid_state *pasid_state)
329 put_pasid_state(pasid_state);
332 static void unbind_pasid(
struct device_state *dev_state,
int pasid)
334 struct pasid_state *pasid_state;
336 pasid_state = get_pasid_state(dev_state, pasid);
337 if (pasid_state ==
NULL)
340 unlink_pasid_state(pasid_state);
341 __unbind_pasid(pasid_state);
342 put_pasid_state_wait(pasid_state);
345 static void free_pasid_states_level1(
struct pasid_state **tbl)
349 for (i = 0; i < 512; ++
i) {
357 static void free_pasid_states_level2(
struct pasid_state **tbl)
359 struct pasid_state **
ptr;
362 for (i = 0; i < 512; ++
i) {
366 ptr = (
struct pasid_state **)tbl[i];
367 free_pasid_states_level1(ptr);
371 static void free_pasid_states(
struct device_state *dev_state)
373 struct pasid_state *pasid_state;
377 pasid_state = get_pasid_state(dev_state, i);
378 if (pasid_state ==
NULL)
381 put_pasid_state(pasid_state);
382 unbind_pasid(dev_state, i);
386 free_pasid_states_level2(dev_state->
states);
388 free_pasid_states_level1(dev_state->
states);
395 static struct pasid_state *mn_to_state(
struct mmu_notifier *
mn)
400 static void __mn_flush_page(
struct mmu_notifier *
mn,
403 struct pasid_state *pasid_state;
406 pasid_state = mn_to_state(mn);
412 static int mn_clear_flush_young(
struct mmu_notifier *mn,
416 __mn_flush_page(mn, address);
421 static void mn_change_pte(
struct mmu_notifier *mn,
426 __mn_flush_page(mn, address);
429 static void mn_invalidate_page(
struct mmu_notifier *mn,
433 __mn_flush_page(mn, address);
436 static void mn_invalidate_range_start(
struct mmu_notifier *mn,
438 unsigned long start,
unsigned long end)
440 struct pasid_state *pasid_state;
443 pasid_state = mn_to_state(mn);
447 __pa(empty_page_table));
450 static void mn_invalidate_range_end(
struct mmu_notifier *mn,
452 unsigned long start,
unsigned long end)
454 struct pasid_state *pasid_state;
457 pasid_state = mn_to_state(mn);
461 __pa(pasid_state->
mm->pgd));
464 static struct mmu_notifier_ops iommu_mn = {
465 .clear_flush_young = mn_clear_flush_young,
466 .change_pte = mn_change_pte,
467 .invalidate_page = mn_invalidate_page,
468 .invalidate_range_start = mn_invalidate_range_start,
469 .invalidate_range_end = mn_invalidate_range_end,
472 static void set_pri_tag_status(
struct pasid_state *pasid_state,
479 spin_unlock_irqrestore(&pasid_state->
lock, flags);
482 static void finish_pri_tag(
struct device_state *dev_state,
483 struct pasid_state *pasid_state,
490 pasid_state->
pri[tag].finish) {
492 pasid_state->
pri[tag].status, tag);
493 pasid_state->
pri[
tag].finish =
false;
496 spin_unlock_irqrestore(&pasid_state->
lock, flags);
512 }
else if (fault->
dev_state->inv_ppr_cb) {
520 case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
523 case AMD_IOMMU_INV_PRI_RSP_INVALID:
526 case AMD_IOMMU_INV_PRI_RSP_FAIL:
538 put_pasid_state(fault->
state);
546 struct pasid_state *pasid_state;
555 tag = iommu_fault->
tag & 0x1ff;
556 finish = (iommu_fault->
tag >> 9) & 1;
559 dev_state = get_device_state(iommu_fault->
device_id);
560 if (dev_state ==
NULL)
563 pasid_state = get_pasid_state(dev_state, iommu_fault->
pasid);
564 if (pasid_state ==
NULL) {
574 pasid_state->
pri[
tag].finish =
true;
575 spin_unlock_irqrestore(&pasid_state->
lock, flags);
580 finish_pri_tag(dev_state, pasid_state, tag);
586 fault->
state = pasid_state;
597 put_device_state(dev_state);
604 .notifier_call = ppr_notifier,
607 static int task_exit(
struct notifier_block *nb,
unsigned long e,
void *data)
609 struct pasid_state *pasid_state;
632 if (pasid_state->
task != task)
636 spin_unlock(&ps_lock);
639 pasid = pasid_state->
pasid;
644 unbind_pasid(dev_state, pasid);
649 spin_unlock(&ps_lock);
657 struct pasid_state *pasid_state;
668 dev_state = get_device_state(devid);
670 if (dev_state ==
NULL)
674 if (pasid < 0 || pasid >= dev_state->
max_pasids)
678 pasid_state = kzalloc(
sizeof(*pasid_state),
GFP_KERNEL);
679 if (pasid_state ==
NULL)
689 pasid_state->
pasid = pasid;
690 pasid_state->
mn.ops = &iommu_mn;
692 if (pasid_state->
mm ==
NULL)
697 ret = set_pasid_state(dev_state, pasid_state, pasid);
702 __pa(pasid_state->
mm->pgd));
704 goto out_clear_state;
706 link_pasid_state(pasid_state);
711 clear_pasid_state(dev_state, pasid);
717 free_pasid_state(pasid_state);
720 put_device_state(dev_state);
737 dev_state = get_device_state(devid);
738 if (dev_state ==
NULL)
741 if (pasid < 0 || pasid >= dev_state->
max_pasids)
744 unbind_pasid(dev_state, pasid);
747 put_device_state(dev_state);
768 dev_state = kzalloc(
sizeof(*dev_state),
GFP_KERNEL);
769 if (dev_state ==
NULL)
777 for (dev_state->
pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
786 goto out_free_dev_state;
790 goto out_free_states;
796 goto out_free_domain;
800 goto out_free_domain;
804 if (state_table[devid] !=
NULL) {
805 spin_unlock_irqrestore(&state_lock, flags);
807 goto out_free_domain;
812 spin_unlock_irqrestore(&state_lock, flags);
842 dev_state = state_table[
devid];
843 if (dev_state ==
NULL) {
844 spin_unlock_irqrestore(&state_lock, flags);
850 spin_unlock_irqrestore(&state_lock, flags);
853 free_pasid_states(dev_state);
855 put_device_state_wait(dev_state);
860 amd_iommu_invalid_ppr_cb
cb)
875 dev_state = state_table[
devid];
876 if (dev_state ==
NULL)
884 spin_unlock_irqrestore(&state_lock, flags);
891 amd_iommu_invalidate_ctx
cb)
906 dev_state = state_table[
devid];
907 if (dev_state ==
NULL)
915 spin_unlock_irqrestore(&state_lock, flags);
921 static int __init amd_iommu_v2_init(
void)
923 size_t state_table_size;
929 pr_info(
"AMD IOMMUv2 functionality not available on this system\n");
942 if (state_table ==
NULL)
947 if (iommu_wq ==
NULL)
952 if (empty_page_table ==
NULL)
969 static void __exit amd_iommu_v2_exit(
void)
972 size_t state_table_size;
988 dev_state = get_device_state(i);
990 if (dev_state ==
NULL)
995 put_device_state(dev_state);
1001 state_table_size = MAX_DEVICES *
sizeof(
struct device_state *);
1004 free_page((
unsigned long)empty_page_table);