11 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
18 #include <linux/slab.h>
24 #include <linux/errno.h>
25 #include <linux/list.h>
27 #include <linux/export.h>
29 #include <asm/cacheflush.h>
30 #include <asm/pgtable.h>
36 #define LPAGE_ORDER 16
37 #define SPAGE_ORDER 12
39 #define SECT_SIZE (1 << SECT_ORDER)
40 #define LPAGE_SIZE (1 << LPAGE_ORDER)
41 #define SPAGE_SIZE (1 << SPAGE_ORDER)
43 #define SECT_MASK (~(SECT_SIZE - 1))
44 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
45 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
47 #define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
48 #define lv1ent_page(sent) ((*(sent) & 3) == 1)
49 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
51 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
52 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
53 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
55 #define section_phys(sent) (*(sent) & SECT_MASK)
56 #define section_offs(iova) ((iova) & 0xFFFFF)
57 #define lpage_phys(pent) (*(pent) & LPAGE_MASK)
58 #define lpage_offs(iova) ((iova) & 0xFFFF)
59 #define spage_phys(pent) (*(pent) & SPAGE_MASK)
60 #define spage_offs(iova) ((iova) & 0xFFF)
62 #define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
63 #define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER)
65 #define NUM_LV1ENTRIES 4096
66 #define NUM_LV2ENTRIES 256
68 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
70 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
72 #define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
74 #define mk_lv1ent_sect(pa) ((pa) | 2)
75 #define mk_lv1ent_page(pa) ((pa) | 1)
76 #define mk_lv2ent_lpage(pa) ((pa) | 1)
77 #define mk_lv2ent_spage(pa) ((pa) | 2)
79 #define CTRL_ENABLE 0x5
80 #define CTRL_BLOCK 0x7
81 #define CTRL_DISABLE 0x0
83 #define REG_MMU_CTRL 0x000
84 #define REG_MMU_CFG 0x004
85 #define REG_MMU_STATUS 0x008
86 #define REG_MMU_FLUSH 0x00C
87 #define REG_MMU_FLUSH_ENTRY 0x010
88 #define REG_PT_BASE_ADDR 0x014
89 #define REG_INT_STATUS 0x018
90 #define REG_INT_CLEAR 0x01C
92 #define REG_PAGE_FAULT_ADDR 0x024
93 #define REG_AW_FAULT_ADDR 0x028
94 #define REG_AR_FAULT_ADDR 0x02C
95 #define REG_DEFAULT_SLAVE_ADDR 0x030
97 #define REG_MMU_VERSION 0x034
99 #define REG_PB0_SADDR 0x04C
100 #define REG_PB0_EADDR 0x050
101 #define REG_PB1_SADDR 0x054
102 #define REG_PB1_EADDR 0x058
104 static unsigned long *section_entry(
unsigned long *pgtable,
unsigned long iova)
109 static unsigned long *page_entry(
unsigned long *sent,
unsigned long iova)
135 unsigned long pgtable_base,
unsigned long fault_addr);
150 "AR MULTI-HIT FAULT",
151 "AW MULTI-HIT FAULT",
153 "AR SECURITY PROTECTION FAULT",
154 "AR ACCESS PROTECTION FAULT",
155 "AW SECURITY PROTECTION FAULT",
156 "AW ACCESS PROTECTION FAULT",
202 static void sysmmu_unblock(
void __iomem *sfrbase)
207 static bool sysmmu_block(
void __iomem *sfrbase)
216 sysmmu_unblock(sfrbase);
223 static void __sysmmu_tlb_invalidate(
void __iomem *sfrbase)
228 static void __sysmmu_tlb_invalidate_entry(
void __iomem *sfrbase,
234 static void __sysmmu_set_ptbase(
void __iomem *sfrbase,
240 __sysmmu_tlb_invalidate(sfrbase);
243 static void __sysmmu_set_prefbuf(
void __iomem *sfrbase,
unsigned long base,
251 unsigned long base0,
unsigned long size0,
252 unsigned long base1,
unsigned long size1)
258 BUG_ON((base0 + size0) <= base0);
259 BUG_ON((size1 > 0) && ((base1 + size1) <= base1));
262 if (!is_sysmmu_active(data))
265 for (i = 0; i < data->
nsfrs; i++) {
267 if (!sysmmu_block(data->
sfrbases[i]))
277 size0 = size0 - size1;
278 base1 = base0 + size0;
282 __sysmmu_set_prefbuf(
283 data->
sfrbases[i], base0, size0, 0);
284 __sysmmu_set_prefbuf(
285 data->
sfrbases[i], base1, size1, 1);
309 __set_fault_handler(data, handler);
313 unsigned long pgtable_base,
unsigned long fault_addr)
320 pr_err(
"%s occurred at 0x%lx(Page table base: 0x%lx)\n",
321 sysmmu_fault_name[itype], fault_addr, pgtable_base);
323 ent = section_entry(
__va(pgtable_base), fault_addr);
324 pr_err(
"\tLv1 entry: 0x%lx\n", *ent);
327 ent = page_entry(ent, fault_addr);
328 pr_err(
"\t Lv2 entry: 0x%lx\n", *ent);
331 pr_err(
"Generating Kernel OOPS... because it is unrecoverable.\n");
345 unsigned long addr = -1;
351 WARN_ON(!is_sysmmu_active(data));
356 if (irqres && ((
int)irqres->
start == irq))
369 data->
sfrbases[i] + fault_reg_offset[itype]);
373 ret = report_iommu_fault(data->
domain, data->
dev,
388 data->
dbgname, sysmmu_fault_name[itype]);
401 bool disabled =
false;
406 if (!set_sysmmu_inactive(data))
409 for (i = 0; i < data->
nsfrs; i++)
446 if (!set_sysmmu_active(data)) {
449 set_sysmmu_inactive(data);
465 for (i = 0; i < data->
nsfrs; i++) {
466 __sysmmu_set_ptbase(data->
sfrbases[i], pgtable);
472 __sysmmu_set_prefbuf(data->
sfrbases[i], 0, -1, 0);
473 __sysmmu_set_prefbuf(data->
sfrbases[i], 0, -1, 1);
495 ret = pm_runtime_get_sync(data->
sysmmu);
501 ret = __exynos_sysmmu_enable(data, pgtable,
NULL);
503 pm_runtime_put(data->
sysmmu);
505 "(%s) Already enabled with page table %#lx\n",
519 disabled = __exynos_sysmmu_disable(data);
520 pm_runtime_put(data->
sysmmu);
525 static void sysmmu_tlb_invalidate_entry(
struct device *
dev,
unsigned long iova)
532 if (is_sysmmu_active(data)) {
534 for (i = 0; i < data->
nsfrs; i++) {
535 if (sysmmu_block(data->
sfrbases[i])) {
536 __sysmmu_tlb_invalidate_entry(
543 "(%s) Disabled. Skipping invalidating TLB.\n",
557 if (is_sysmmu_active(data)) {
559 for (i = 0; i < data->
nsfrs; i++) {
560 if (sysmmu_block(data->
sfrbases[i])) {
561 __sysmmu_tlb_invalidate(data->
sfrbases[i]);
567 "(%s) Disabled. Skipping invalidating TLB.\n",
584 dev_dbg(dev,
"Not enough memory\n");
591 dev_dbg(dev,
"Unabled to initialize driver data\n");
599 dev_dbg(dev,
"Not enough memory\n");
604 for (i = 0; i < data->
nsfrs; i++) {
608 dev_dbg(dev,
"Unable to find IOMEM region\n");
615 dev_dbg(dev,
"Unable to map IOMEM @ PA:%#x\n",
622 for (i = 0; i < data->
nsfrs; i++) {
625 dev_dbg(dev,
"Unable to find IRQ resource\n");
630 dev_name(dev), data);
632 dev_dbg(dev,
"Unabled to register interrupt handler\n");
637 if (dev_get_platdata(dev)) {
643 for (deli = beg; (*deli !=
'\0') && (*deli !=
','); deli++)
652 if (IS_ERR(data->
clk[0])) {
654 dev_dbg(dev,
"No clock descriptor registered\n");
657 if (data->
clk[0] && deli) {
660 if (IS_ERR(data->
clk[1]))
669 INIT_LIST_HEAD(&data->
node);
671 __set_fault_handler(data, &default_fault_handler);
686 while (data->
nsfrs-- > 0)
692 dev_err(dev,
"Failed to initialize\n");
697 .probe = exynos_sysmmu_probe,
700 .name =
"exynos-sysmmu",
704 static inline void pgtable_flush(
void *vastart,
void *vaend)
711 static int exynos_iommu_domain_init(
struct iommu_domain *domain)
733 INIT_LIST_HEAD(&priv->
clients);
735 domain->
geometry.aperture_start = 0;
737 domain->
geometry.force_aperture =
true;
749 static void exynos_iommu_domain_destroy(
struct iommu_domain *domain)
765 spin_unlock_irqrestore(&priv->
lock, flags);
777 static int exynos_iommu_attach_device(
struct iommu_domain *domain,
785 ret = pm_runtime_get_sync(data->
sysmmu);
793 ret = __exynos_sysmmu_enable(data,
__pa(priv->
pgtable), domain);
802 spin_unlock_irqrestore(&priv->
lock, flags);
805 dev_err(dev,
"%s: Failed to attach IOMMU with pgtable %#lx\n",
807 pm_runtime_put(data->
sysmmu);
808 }
else if (ret > 0) {
809 dev_dbg(dev,
"%s: IOMMU with pgtable 0x%lx already attached\n",
812 dev_dbg(dev,
"%s: Attached new IOMMU with pgtable 0x%lx\n",
819 static void exynos_iommu_detach_device(
struct iommu_domain *domain,
840 if (__exynos_sysmmu_disable(data)) {
841 dev_dbg(dev,
"%s: Detached IOMMU with pgtable %#lx\n",
843 list_del_init(&data->
node);
846 dev_dbg(dev,
"%s: Detaching IOMMU with pgtable %#lx delayed",
851 spin_unlock_irqrestore(&priv->
lock, flags);
854 pm_runtime_put(data->
sysmmu);
857 static unsigned long *alloc_lv2entry(
unsigned long *sent,
unsigned long iova,
871 pgtable_flush(sent, sent + 1);
874 return page_entry(sent, iova);
877 static int lv1set_section(
unsigned long *sent,
phys_addr_t paddr,
short *pgcnt)
886 kfree(page_entry(sent, 0));
893 pgtable_flush(sent, sent + 1);
898 static int lv2set_page(
unsigned long *pent,
phys_addr_t paddr,
size_t size,
906 pgtable_flush(pent, pent + 1);
912 memset(pent, 0,
sizeof(*pent) * i);
918 pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
925 static int exynos_iommu_map(
struct iommu_domain *domain,
unsigned long iova,
929 unsigned long *
entry;
937 entry = section_entry(priv->
pgtable, iova);
940 ret = lv1set_section(entry, paddr,
945 pent = alloc_lv2entry(entry, iova,
951 ret = lv2set_page(pent, paddr, size,
956 pr_debug(
"%s: Failed to map iova 0x%lx/0x%x bytes\n",
957 __func__, iova, size);
965 static size_t exynos_iommu_unmap(
struct iommu_domain *domain,
966 unsigned long iova,
size_t size)
977 ent = section_entry(priv->
pgtable, iova);
983 pgtable_flush(ent, ent + 1);
996 ent = page_entry(ent, iova);
1013 memset(ent, 0,
sizeof(*ent) * SPAGES_PER_LPAGE);
1018 spin_unlock_irqrestore(&priv->
pgtablelock, flags);
1022 sysmmu_tlb_invalidate_entry(data->dev, iova);
1023 spin_unlock_irqrestore(&priv->
lock, flags);
1033 unsigned long *
entry;
1034 unsigned long flags;
1039 entry = section_entry(priv->
pgtable, iova);
1044 entry = page_entry(entry, iova);
1052 spin_unlock_irqrestore(&priv->
pgtablelock, flags);
1057 static struct iommu_ops exynos_iommu_ops = {
1058 .domain_init = &exynos_iommu_domain_init,
1059 .domain_destroy = &exynos_iommu_domain_destroy,
1060 .attach_dev = &exynos_iommu_attach_device,
1061 .detach_dev = &exynos_iommu_detach_device,
1062 .map = &exynos_iommu_map,
1063 .unmap = &exynos_iommu_unmap,
1064 .iova_to_phys = &exynos_iommu_iova_to_phys,
1068 static int __init exynos_iommu_init(
void)