19 #define pr_fmt(fmt) "%s: " fmt, __func__
21 #include <linux/device.h>
22 #include <linux/kernel.h>
24 #include <linux/types.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
33 static struct kset *iommu_group_kset;
34 static struct ida iommu_group_ida;
35 static struct mutex iommu_group_mutex;
62 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
63 struct iommu_group_attribute iommu_group_attr_##_name = \
64 __ATTR(_name, _mode, _show, _store)
66 #define to_iommu_group_attr(_attr) \
67 container_of(_attr, struct iommu_group_attribute, attr)
68 #define to_iommu_group(_kobj) \
69 container_of(_kobj, struct iommu_group, kobj)
79 ret = attr->
show(group, buf);
92 ret = attr->
store(group, buf, count);
96 static const struct sysfs_ops iommu_group_sysfs_ops = {
97 .show = iommu_group_attr_show,
98 .store = iommu_group_attr_store,
101 static int iommu_group_create_file(
struct iommu_group *group,
107 static void iommu_group_remove_file(
struct iommu_group *group,
120 static void iommu_group_release(
struct kobject *kobj)
135 static struct kobj_type iommu_group_ktype = {
136 .sysfs_ops = &iommu_group_sysfs_ops,
137 .release = iommu_group_release,
161 group->
kobj.kset = iommu_group_kset;
163 INIT_LIST_HEAD(&group->
devices);
232 void (*
release)(
void *iommu_data))
252 iommu_group_remove_file(group, &iommu_group_attr_name);
263 ret = iommu_group_create_file(group, &iommu_group_attr_name);
287 device = kzalloc(
sizeof(*device),
GFP_KERNEL);
311 if (ret == -
EEXIST && i >= 0) {
317 kobject_name(&dev->
kobj), i++);
336 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
355 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
359 if (tmp_device->
dev == dev) {
399 ret =
fn(device->
dev, data);
483 static int add_iommu_group(
struct device *
dev,
void *data)
487 if (!ops->add_device)
492 ops->add_device(dev);
498 unsigned long action,
void *data)
503 unsigned long group_action = 0;
511 return ops->add_device(dev);
514 ops->remove_device(dev);
529 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
532 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
535 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
538 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
551 .notifier_call = iommu_bus_notifier,
581 iommu_bus_init(bus, ops);
624 domain = kzalloc(
sizeof(*domain),
GFP_KERNEL);
630 ret = domain->
ops->domain_init(domain);
646 domain->
ops->domain_destroy(domain);
657 return domain->
ops->attach_dev(domain, dev);
666 domain->
ops->detach_dev(domain, dev);
680 static int iommu_group_do_attach_device(
struct device *dev,
void *data)
690 iommu_group_do_attach_device);
694 static int iommu_group_do_detach_device(
struct device *dev,
void *data)
715 return domain->
ops->iova_to_phys(domain, iova);
725 return domain->
ops->domain_has_cap(domain, cap);
732 unsigned long orig_iova = iova;
733 unsigned int min_pagesz;
734 size_t orig_size =
size;
741 min_pagesz = 1 <<
__ffs(domain->
ops->pgsize_bitmap);
748 if (!
IS_ALIGNED(iova | paddr | size, min_pagesz)) {
749 pr_err(
"unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
750 "0x%x\n", iova, (
unsigned long)paddr,
751 (
unsigned long)size, min_pagesz);
755 pr_debug(
"map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
756 (
unsigned long)paddr, (
unsigned long)size);
759 unsigned long pgsize, addr_merge = iova |
paddr;
760 unsigned int pgsize_idx;
763 pgsize_idx =
__fls(size);
768 unsigned int align_pgsize_idx =
__ffs(addr_merge);
770 pgsize_idx =
min(pgsize_idx, align_pgsize_idx);
774 pgsize = (1
UL << (pgsize_idx + 1)) - 1;
777 pgsize &= domain->
ops->pgsize_bitmap;
783 pgsize_idx =
__fls(pgsize);
784 pgsize = 1
UL << pgsize_idx;
786 pr_debug(
"mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
787 (
unsigned long)paddr, pgsize);
789 ret = domain->
ops->map(domain, iova, paddr, pgsize, prot);
808 size_t unmapped_page, unmapped = 0;
809 unsigned int min_pagesz;
815 min_pagesz = 1 <<
__ffs(domain->
ops->pgsize_bitmap);
823 pr_err(
"unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
824 iova, (
unsigned long)size, min_pagesz);
828 pr_debug(
"unmap this: iova 0x%lx size 0x%lx\n", iova,
829 (
unsigned long)size);
835 while (unmapped < size) {
836 size_t left = size - unmapped;
838 unmapped_page = domain->
ops->unmap(domain, iova, left);
842 pr_debug(
"unmapped: iova 0x%lx size %lx\n", iova,
843 (
unsigned long)unmapped_page);
845 iova += unmapped_page;
846 unmapped += unmapped_page;
853 static int __init iommu_init(
void)
860 BUG_ON(!iommu_group_kset);
879 if (!domain->
ops->domain_get_attr)
882 ret = domain->
ops->domain_get_attr(domain, attr, data);
892 if (!domain->
ops->domain_set_attr)
895 return domain->
ops->domain_set_attr(domain, attr, data);