Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
msm_iommu.c
Go to the documentation of this file.
1 /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15  * 02110-1301, USA.
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/errno.h>
23 #include <linux/io.h>
24 #include <linux/interrupt.h>
25 #include <linux/list.h>
26 #include <linux/spinlock.h>
27 #include <linux/slab.h>
28 #include <linux/iommu.h>
29 #include <linux/clk.h>
30 
31 #include <asm/cacheflush.h>
32 #include <asm/sizes.h>
33 
34 #include <mach/iommu_hw-8xxx.h>
35 #include <mach/iommu.h>
36 
37 #define MRC(reg, processor, op1, crn, crm, op2) \
38 __asm__ __volatile__ ( \
39 " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
40 : "=r" (reg))
41 
42 #define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
43 #define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
44 
45 /* bitmap of the page sizes currently supported */
46 #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
47 
48 static int msm_iommu_tex_class[4];
49 
50 DEFINE_SPINLOCK(msm_iommu_lock);
51 
52 struct msm_priv {
53  unsigned long *pgtable;
55 };
56 
57 static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
58 {
59  int ret;
60 
61  ret = clk_enable(drvdata->pclk);
62  if (ret)
63  goto fail;
64 
65  if (drvdata->clk) {
66  ret = clk_enable(drvdata->clk);
67  if (ret)
68  clk_disable(drvdata->pclk);
69  }
70 fail:
71  return ret;
72 }
73 
74 static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
75 {
76  if (drvdata->clk)
77  clk_disable(drvdata->clk);
78  clk_disable(drvdata->pclk);
79 }
80 
81 static int __flush_iotlb(struct iommu_domain *domain)
82 {
83  struct msm_priv *priv = domain->priv;
84  struct msm_iommu_drvdata *iommu_drvdata;
85  struct msm_iommu_ctx_drvdata *ctx_drvdata;
86  int ret = 0;
87 #ifndef CONFIG_IOMMU_PGTABLES_L2
88  unsigned long *fl_table = priv->pgtable;
89  int i;
90 
91  if (!list_empty(&priv->list_attached)) {
92  dmac_flush_range(fl_table, fl_table + SZ_16K);
93 
94  for (i = 0; i < NUM_FL_PTE; i++)
95  if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) {
96  void *sl_table = __va(fl_table[i] &
97  FL_BASE_MASK);
98  dmac_flush_range(sl_table, sl_table + SZ_4K);
99  }
100  }
101 #endif
102 
103  list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
104  if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
105  BUG();
106 
107  iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
108  BUG_ON(!iommu_drvdata);
109 
110  ret = __enable_clocks(iommu_drvdata);
111  if (ret)
112  goto fail;
113 
114  SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0);
115  __disable_clocks(iommu_drvdata);
116  }
117 fail:
118  return ret;
119 }
120 
121 static void __reset_context(void __iomem *base, int ctx)
122 {
123  SET_BPRCOSH(base, ctx, 0);
124  SET_BPRCISH(base, ctx, 0);
125  SET_BPRCNSH(base, ctx, 0);
126  SET_BPSHCFG(base, ctx, 0);
127  SET_BPMTCFG(base, ctx, 0);
128  SET_ACTLR(base, ctx, 0);
129  SET_SCTLR(base, ctx, 0);
130  SET_FSRRESTORE(base, ctx, 0);
131  SET_TTBR0(base, ctx, 0);
132  SET_TTBR1(base, ctx, 0);
133  SET_TTBCR(base, ctx, 0);
134  SET_BFBCR(base, ctx, 0);
135  SET_PAR(base, ctx, 0);
136  SET_FAR(base, ctx, 0);
137  SET_CTX_TLBIALL(base, ctx, 0);
138  SET_TLBFLPTER(base, ctx, 0);
139  SET_TLBSLPTER(base, ctx, 0);
140  SET_TLBLKCR(base, ctx, 0);
141  SET_PRRR(base, ctx, 0);
142  SET_NMRR(base, ctx, 0);
143 }
144 
145 static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
146 {
147  unsigned int prrr, nmrr;
148  __reset_context(base, ctx);
149 
150  /* Set up HTW mode */
151  /* TLB miss configuration: perform HTW on miss */
152  SET_TLBMCFG(base, ctx, 0x3);
153 
154  /* V2P configuration: HTW for access */
155  SET_V2PCFG(base, ctx, 0x3);
156 
157  SET_TTBCR(base, ctx, 0);
158  SET_TTBR0_PA(base, ctx, (pgtable >> 14));
159 
160  /* Invalidate the TLB for this context */
161  SET_CTX_TLBIALL(base, ctx, 0);
162 
163  /* Set interrupt number to "secure" interrupt */
164  SET_IRPTNDX(base, ctx, 0);
165 
166  /* Enable context fault interrupt */
167  SET_CFEIE(base, ctx, 1);
168 
169  /* Stall access on a context fault and let the handler deal with it */
170  SET_CFCFG(base, ctx, 1);
171 
172  /* Redirect all cacheable requests to L2 slave port. */
173  SET_RCISH(base, ctx, 1);
174  SET_RCOSH(base, ctx, 1);
175  SET_RCNSH(base, ctx, 1);
176 
177  /* Turn on TEX Remap */
178  SET_TRE(base, ctx, 1);
179 
180  /* Set TEX remap attributes */
181  RCP15_PRRR(prrr);
182  RCP15_NMRR(nmrr);
183  SET_PRRR(base, ctx, prrr);
184  SET_NMRR(base, ctx, nmrr);
185 
186  /* Turn on BFB prefetch */
187  SET_BFBDFE(base, ctx, 1);
188 
189 #ifdef CONFIG_IOMMU_PGTABLES_L2
190  /* Configure page tables as inner-cacheable and shareable to reduce
191  * the TLB miss penalty.
192  */
193  SET_TTBR0_SH(base, ctx, 1);
194  SET_TTBR1_SH(base, ctx, 1);
195 
196  SET_TTBR0_NOS(base, ctx, 1);
197  SET_TTBR1_NOS(base, ctx, 1);
198 
199  SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
200  SET_TTBR0_IRGNL(base, ctx, 1);
201 
202  SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
203  SET_TTBR1_IRGNL(base, ctx, 1);
204 
205  SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
206  SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
207 #endif
208 
209  /* Enable the MMU */
210  SET_M(base, ctx, 1);
211 }
212 
213 static int msm_iommu_domain_init(struct iommu_domain *domain)
214 {
215  struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
216 
217  if (!priv)
218  goto fail_nomem;
219 
220  INIT_LIST_HEAD(&priv->list_attached);
221  priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
222  get_order(SZ_16K));
223 
224  if (!priv->pgtable)
225  goto fail_nomem;
226 
227  memset(priv->pgtable, 0, SZ_16K);
228  domain->priv = priv;
229 
230  domain->geometry.aperture_start = 0;
231  domain->geometry.aperture_end = (1ULL << 32) - 1;
232  domain->geometry.force_aperture = true;
233 
234  return 0;
235 
236 fail_nomem:
237  kfree(priv);
238  return -ENOMEM;
239 }
240 
241 static void msm_iommu_domain_destroy(struct iommu_domain *domain)
242 {
243  struct msm_priv *priv;
244  unsigned long flags;
245  unsigned long *fl_table;
246  int i;
247 
248  spin_lock_irqsave(&msm_iommu_lock, flags);
249  priv = domain->priv;
250  domain->priv = NULL;
251 
252  if (priv) {
253  fl_table = priv->pgtable;
254 
255  for (i = 0; i < NUM_FL_PTE; i++)
256  if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
257  free_page((unsigned long) __va(((fl_table[i]) &
258  FL_BASE_MASK)));
259 
260  free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
261  priv->pgtable = NULL;
262  }
263 
264  kfree(priv);
265  spin_unlock_irqrestore(&msm_iommu_lock, flags);
266 }
267 
268 static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
269 {
270  struct msm_priv *priv;
271  struct msm_iommu_ctx_dev *ctx_dev;
272  struct msm_iommu_drvdata *iommu_drvdata;
273  struct msm_iommu_ctx_drvdata *ctx_drvdata;
274  struct msm_iommu_ctx_drvdata *tmp_drvdata;
275  int ret = 0;
276  unsigned long flags;
277 
278  spin_lock_irqsave(&msm_iommu_lock, flags);
279 
280  priv = domain->priv;
281 
282  if (!priv || !dev) {
283  ret = -EINVAL;
284  goto fail;
285  }
286 
287  iommu_drvdata = dev_get_drvdata(dev->parent);
288  ctx_drvdata = dev_get_drvdata(dev);
289  ctx_dev = dev->platform_data;
290 
291  if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
292  ret = -EINVAL;
293  goto fail;
294  }
295 
296  if (!list_empty(&ctx_drvdata->attached_elm)) {
297  ret = -EBUSY;
298  goto fail;
299  }
300 
301  list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
302  if (tmp_drvdata == ctx_drvdata) {
303  ret = -EBUSY;
304  goto fail;
305  }
306 
307  ret = __enable_clocks(iommu_drvdata);
308  if (ret)
309  goto fail;
310 
311  __program_context(iommu_drvdata->base, ctx_dev->num,
312  __pa(priv->pgtable));
313 
314  __disable_clocks(iommu_drvdata);
315  list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
316  ret = __flush_iotlb(domain);
317 
318 fail:
319  spin_unlock_irqrestore(&msm_iommu_lock, flags);
320  return ret;
321 }
322 
323 static void msm_iommu_detach_dev(struct iommu_domain *domain,
324  struct device *dev)
325 {
326  struct msm_priv *priv;
327  struct msm_iommu_ctx_dev *ctx_dev;
328  struct msm_iommu_drvdata *iommu_drvdata;
329  struct msm_iommu_ctx_drvdata *ctx_drvdata;
330  unsigned long flags;
331  int ret;
332 
333  spin_lock_irqsave(&msm_iommu_lock, flags);
334  priv = domain->priv;
335 
336  if (!priv || !dev)
337  goto fail;
338 
339  iommu_drvdata = dev_get_drvdata(dev->parent);
340  ctx_drvdata = dev_get_drvdata(dev);
341  ctx_dev = dev->platform_data;
342 
343  if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
344  goto fail;
345 
346  ret = __flush_iotlb(domain);
347  if (ret)
348  goto fail;
349 
350  ret = __enable_clocks(iommu_drvdata);
351  if (ret)
352  goto fail;
353 
354  __reset_context(iommu_drvdata->base, ctx_dev->num);
355  __disable_clocks(iommu_drvdata);
356  list_del_init(&ctx_drvdata->attached_elm);
357 
358 fail:
359  spin_unlock_irqrestore(&msm_iommu_lock, flags);
360 }
361 
362 static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
363  phys_addr_t pa, size_t len, int prot)
364 {
365  struct msm_priv *priv;
366  unsigned long flags;
367  unsigned long *fl_table;
368  unsigned long *fl_pte;
369  unsigned long fl_offset;
370  unsigned long *sl_table;
371  unsigned long *sl_pte;
372  unsigned long sl_offset;
373  unsigned int pgprot;
374  int ret = 0, tex, sh;
375 
376  spin_lock_irqsave(&msm_iommu_lock, flags);
377 
378  sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0;
379  tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK];
380 
381  if (tex < 0 || tex > NUM_TEX_CLASS - 1) {
382  ret = -EINVAL;
383  goto fail;
384  }
385 
386  priv = domain->priv;
387  if (!priv) {
388  ret = -EINVAL;
389  goto fail;
390  }
391 
392  fl_table = priv->pgtable;
393 
394  if (len != SZ_16M && len != SZ_1M &&
395  len != SZ_64K && len != SZ_4K) {
396  pr_debug("Bad size: %d\n", len);
397  ret = -EINVAL;
398  goto fail;
399  }
400 
401  if (!fl_table) {
402  pr_debug("Null page table\n");
403  ret = -EINVAL;
404  goto fail;
405  }
406 
407  if (len == SZ_16M || len == SZ_1M) {
408  pgprot = sh ? FL_SHARED : 0;
409  pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
410  pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
411  pgprot |= tex & 0x04 ? FL_TEX0 : 0;
412  } else {
413  pgprot = sh ? SL_SHARED : 0;
414  pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
415  pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
416  pgprot |= tex & 0x04 ? SL_TEX0 : 0;
417  }
418 
419  fl_offset = FL_OFFSET(va); /* Upper 12 bits */
420  fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
421 
422  if (len == SZ_16M) {
423  int i = 0;
424  for (i = 0; i < 16; i++)
425  *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
427  FL_SHARED | FL_NG | pgprot;
428  }
429 
430  if (len == SZ_1M)
431  *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG |
432  FL_TYPE_SECT | FL_SHARED | pgprot;
433 
434  /* Need a 2nd level table */
435  if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) {
436  unsigned long *sl;
437  sl = (unsigned long *) __get_free_pages(GFP_ATOMIC,
438  get_order(SZ_4K));
439 
440  if (!sl) {
441  pr_debug("Could not allocate second level table\n");
442  ret = -ENOMEM;
443  goto fail;
444  }
445 
446  memset(sl, 0, SZ_4K);
447  *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | FL_TYPE_TABLE);
448  }
449 
450  sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
451  sl_offset = SL_OFFSET(va);
452  sl_pte = sl_table + sl_offset;
453 
454 
455  if (len == SZ_4K)
456  *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG |
457  SL_SHARED | SL_TYPE_SMALL | pgprot;
458 
459  if (len == SZ_64K) {
460  int i;
461 
462  for (i = 0; i < 16; i++)
463  *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 |
464  SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot;
465  }
466 
467  ret = __flush_iotlb(domain);
468 fail:
469  spin_unlock_irqrestore(&msm_iommu_lock, flags);
470  return ret;
471 }
472 
473 static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
474  size_t len)
475 {
476  struct msm_priv *priv;
477  unsigned long flags;
478  unsigned long *fl_table;
479  unsigned long *fl_pte;
480  unsigned long fl_offset;
481  unsigned long *sl_table;
482  unsigned long *sl_pte;
483  unsigned long sl_offset;
484  int i, ret = 0;
485 
486  spin_lock_irqsave(&msm_iommu_lock, flags);
487 
488  priv = domain->priv;
489 
490  if (!priv)
491  goto fail;
492 
493  fl_table = priv->pgtable;
494 
495  if (len != SZ_16M && len != SZ_1M &&
496  len != SZ_64K && len != SZ_4K) {
497  pr_debug("Bad length: %d\n", len);
498  goto fail;
499  }
500 
501  if (!fl_table) {
502  pr_debug("Null page table\n");
503  goto fail;
504  }
505 
506  fl_offset = FL_OFFSET(va); /* Upper 12 bits */
507  fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
508 
509  if (*fl_pte == 0) {
510  pr_debug("First level PTE is 0\n");
511  goto fail;
512  }
513 
514  /* Unmap supersection */
515  if (len == SZ_16M)
516  for (i = 0; i < 16; i++)
517  *(fl_pte+i) = 0;
518 
519  if (len == SZ_1M)
520  *fl_pte = 0;
521 
522  sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
523  sl_offset = SL_OFFSET(va);
524  sl_pte = sl_table + sl_offset;
525 
526  if (len == SZ_64K) {
527  for (i = 0; i < 16; i++)
528  *(sl_pte+i) = 0;
529  }
530 
531  if (len == SZ_4K)
532  *sl_pte = 0;
533 
534  if (len == SZ_4K || len == SZ_64K) {
535  int used = 0;
536 
537  for (i = 0; i < NUM_SL_PTE; i++)
538  if (sl_table[i])
539  used = 1;
540  if (!used) {
541  free_page((unsigned long)sl_table);
542  *fl_pte = 0;
543  }
544  }
545 
546  ret = __flush_iotlb(domain);
547 
548 fail:
549  spin_unlock_irqrestore(&msm_iommu_lock, flags);
550 
551  /* the IOMMU API requires us to return how many bytes were unmapped */
552  len = ret ? 0 : len;
553  return len;
554 }
555 
556 static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
557  unsigned long va)
558 {
559  struct msm_priv *priv;
560  struct msm_iommu_drvdata *iommu_drvdata;
561  struct msm_iommu_ctx_drvdata *ctx_drvdata;
562  unsigned int par;
563  unsigned long flags;
564  void __iomem *base;
565  phys_addr_t ret = 0;
566  int ctx;
567 
568  spin_lock_irqsave(&msm_iommu_lock, flags);
569 
570  priv = domain->priv;
571  if (list_empty(&priv->list_attached))
572  goto fail;
573 
574  ctx_drvdata = list_entry(priv->list_attached.next,
576  iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
577 
578  base = iommu_drvdata->base;
579  ctx = ctx_drvdata->num;
580 
581  ret = __enable_clocks(iommu_drvdata);
582  if (ret)
583  goto fail;
584 
585  /* Invalidate context TLB */
586  SET_CTX_TLBIALL(base, ctx, 0);
587  SET_V2PPR(base, ctx, va & V2Pxx_VA);
588 
589  par = GET_PAR(base, ctx);
590 
591  /* We are dealing with a supersection */
592  if (GET_NOFAULT_SS(base, ctx))
593  ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
594  else /* Upper 20 bits from PAR, lower 12 from VA */
595  ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
596 
597  if (GET_FAULT(base, ctx))
598  ret = 0;
599 
600  __disable_clocks(iommu_drvdata);
601 fail:
602  spin_unlock_irqrestore(&msm_iommu_lock, flags);
603  return ret;
604 }
605 
606 static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
607  unsigned long cap)
608 {
609  return 0;
610 }
611 
612 static void print_ctx_regs(void __iomem *base, int ctx)
613 {
614  unsigned int fsr = GET_FSR(base, ctx);
615  pr_err("FAR = %08x PAR = %08x\n",
616  GET_FAR(base, ctx), GET_PAR(base, ctx));
617  pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
618  (fsr & 0x02) ? "TF " : "",
619  (fsr & 0x04) ? "AFF " : "",
620  (fsr & 0x08) ? "APF " : "",
621  (fsr & 0x10) ? "TLBMF " : "",
622  (fsr & 0x20) ? "HTWDEEF " : "",
623  (fsr & 0x40) ? "HTWSEEF " : "",
624  (fsr & 0x80) ? "MHF " : "",
625  (fsr & 0x10000) ? "SL " : "",
626  (fsr & 0x40000000) ? "SS " : "",
627  (fsr & 0x80000000) ? "MULTI " : "");
628 
629  pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
630  GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
631  pr_err("TTBR0 = %08x TTBR1 = %08x\n",
632  GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
633  pr_err("SCTLR = %08x ACTLR = %08x\n",
634  GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
635  pr_err("PRRR = %08x NMRR = %08x\n",
636  GET_PRRR(base, ctx), GET_NMRR(base, ctx));
637 }
638 
640 {
641  struct msm_iommu_drvdata *drvdata = dev_id;
642  void __iomem *base;
643  unsigned int fsr;
644  int i, ret;
645 
646  spin_lock(&msm_iommu_lock);
647 
648  if (!drvdata) {
649  pr_err("Invalid device ID in context interrupt handler\n");
650  goto fail;
651  }
652 
653  base = drvdata->base;
654 
655  pr_err("Unexpected IOMMU page fault!\n");
656  pr_err("base = %08x\n", (unsigned int) base);
657 
658  ret = __enable_clocks(drvdata);
659  if (ret)
660  goto fail;
661 
662  for (i = 0; i < drvdata->ncb; i++) {
663  fsr = GET_FSR(base, i);
664  if (fsr) {
665  pr_err("Fault occurred in context %d.\n", i);
666  pr_err("Interesting registers:\n");
667  print_ctx_regs(base, i);
668  SET_FSR(base, i, 0x4000000F);
669  }
670  }
671  __disable_clocks(drvdata);
672 fail:
673  spin_unlock(&msm_iommu_lock);
674  return 0;
675 }
676 
677 static struct iommu_ops msm_iommu_ops = {
678  .domain_init = msm_iommu_domain_init,
679  .domain_destroy = msm_iommu_domain_destroy,
680  .attach_dev = msm_iommu_attach_dev,
681  .detach_dev = msm_iommu_detach_dev,
682  .map = msm_iommu_map,
683  .unmap = msm_iommu_unmap,
684  .iova_to_phys = msm_iommu_iova_to_phys,
685  .domain_has_cap = msm_iommu_domain_has_cap,
686  .pgsize_bitmap = MSM_IOMMU_PGSIZES,
687 };
688 
689 static int __init get_tex_class(int icp, int ocp, int mt, int nos)
690 {
691  int i = 0;
692  unsigned int prrr = 0;
693  unsigned int nmrr = 0;
694  int c_icp, c_ocp, c_mt, c_nos;
695 
696  RCP15_PRRR(prrr);
697  RCP15_NMRR(nmrr);
698 
699  for (i = 0; i < NUM_TEX_CLASS; i++) {
700  c_nos = PRRR_NOS(prrr, i);
701  c_mt = PRRR_MT(prrr, i);
702  c_icp = NMRR_ICP(nmrr, i);
703  c_ocp = NMRR_OCP(nmrr, i);
704 
705  if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
706  return i;
707  }
708 
709  return -ENODEV;
710 }
711 
712 static void __init setup_iommu_tex_classes(void)
713 {
714  msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
715  get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
716 
717  msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
718  get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
719 
720  msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
721  get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
722 
723  msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
724  get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
725 }
726 
727 static int __init msm_iommu_init(void)
728 {
729  setup_iommu_tex_classes();
730  bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
731  return 0;
732 }
733 
734 subsys_initcall(msm_iommu_init);
735 
736 MODULE_LICENSE("GPL v2");
737 MODULE_AUTHOR("Stepan Moskovchenko <[email protected]>");