Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
iommu.c
Go to the documentation of this file.
1 /* iommu.c: Generic sparc64 IOMMU support.
2  *
3  * Copyright (C) 1999, 2007, 2008 David S. Miller ([email protected])
4  * Copyright (C) 1999, 2000 Jakub Jelinek ([email protected])
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/export.h>
9 #include <linux/slab.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/errno.h>
14 #include <linux/iommu-helper.h>
15 #include <linux/bitmap.h>
16 
17 #ifdef CONFIG_PCI
18 #include <linux/pci.h>
19 #endif
20 
21 #include <asm/iommu.h>
22 
23 #include "iommu_common.h"
24 
25 #define STC_CTXMATCH_ADDR(STC, CTX) \
26  ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
27 #define STC_FLUSHFLAG_INIT(STC) \
28  (*((STC)->strbuf_flushflag) = 0UL)
29 #define STC_FLUSHFLAG_SET(STC) \
30  (*((STC)->strbuf_flushflag) != 0UL)
31 
32 #define iommu_read(__reg) \
33 ({ u64 __ret; \
34  __asm__ __volatile__("ldxa [%1] %2, %0" \
35  : "=r" (__ret) \
36  : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
37  : "memory"); \
38  __ret; \
39 })
40 #define iommu_write(__reg, __val) \
41  __asm__ __volatile__("stxa %0, [%1] %2" \
42  : /* no outputs */ \
43  : "r" (__val), "r" (__reg), \
44  "i" (ASI_PHYS_BYPASS_EC_E))
45 
46 /* Must be invoked under the IOMMU lock. */
47 static void iommu_flushall(struct iommu *iommu)
48 {
49  if (iommu->iommu_flushinv) {
50  iommu_write(iommu->iommu_flushinv, ~(u64)0);
51  } else {
52  unsigned long tag;
53  int entry;
54 
55  tag = iommu->iommu_tags;
56  for (entry = 0; entry < 16; entry++) {
57  iommu_write(tag, 0);
58  tag += 8;
59  }
60 
61  /* Ensure completion of previous PIO writes. */
63  }
64 }
65 
66 #define IOPTE_CONSISTENT(CTX) \
67  (IOPTE_VALID | IOPTE_CACHE | \
68  (((CTX) << 47) & IOPTE_CONTEXT))
69 
70 #define IOPTE_STREAMING(CTX) \
71  (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
72 
73 /* Existing mappings are never marked invalid, instead they
74  * are pointed to a dummy page.
75  */
76 #define IOPTE_IS_DUMMY(iommu, iopte) \
77  ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
78 
79 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
80 {
81  unsigned long val = iopte_val(*iopte);
82 
83  val &= ~IOPTE_PAGE;
84  val |= iommu->dummy_page_pa;
85 
86  iopte_val(*iopte) = val;
87 }
88 
89 /* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
90  * facility it must all be done in one pass while under the iommu lock.
91  *
92  * On sun4u platforms, we only flush the IOMMU once every time we've passed
93  * over the entire page table doing allocations. Therefore we only ever advance
94  * the hint and cannot backtrack it.
95  */
96 unsigned long iommu_range_alloc(struct device *dev,
97  struct iommu *iommu,
98  unsigned long npages,
99  unsigned long *handle)
100 {
101  unsigned long n, end, start, limit, boundary_size;
102  struct iommu_arena *arena = &iommu->arena;
103  int pass = 0;
104 
105  /* This allocator was derived from x86_64's bit string search */
106 
107  /* Sanity check */
108  if (unlikely(npages == 0)) {
109  if (printk_ratelimit())
110  WARN_ON(1);
111  return DMA_ERROR_CODE;
112  }
113 
114  if (handle && *handle)
115  start = *handle;
116  else
117  start = arena->hint;
118 
119  limit = arena->limit;
120 
121  /* The case below can happen if we have a small segment appended
122  * to a large, or when the previous alloc was at the very end of
123  * the available space. If so, go back to the beginning and flush.
124  */
125  if (start >= limit) {
126  start = 0;
127  if (iommu->flush_all)
128  iommu->flush_all(iommu);
129  }
130 
131  again:
132 
133  if (dev)
134  boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
135  1 << IO_PAGE_SHIFT);
136  else
137  boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
138 
139  n = iommu_area_alloc(arena->map, limit, start, npages,
141  boundary_size >> IO_PAGE_SHIFT, 0);
142  if (n == -1) {
143  if (likely(pass < 1)) {
144  /* First failure, rescan from the beginning. */
145  start = 0;
146  if (iommu->flush_all)
147  iommu->flush_all(iommu);
148  pass++;
149  goto again;
150  } else {
151  /* Second failure, give up */
152  return DMA_ERROR_CODE;
153  }
154  }
155 
156  end = n + npages;
157 
158  arena->hint = end;
159 
160  /* Update handle for SG allocations */
161  if (handle)
162  *handle = end;
163 
164  return n;
165 }
166 
167 void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
168 {
169  struct iommu_arena *arena = &iommu->arena;
170  unsigned long entry;
171 
172  entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
173 
174  bitmap_clear(arena->map, entry, npages);
175 }
176 
177 int iommu_table_init(struct iommu *iommu, int tsbsize,
178  u32 dma_offset, u32 dma_addr_mask,
179  int numa_node)
180 {
181  unsigned long i, order, sz, num_tsb_entries;
182  struct page *page;
183 
184  num_tsb_entries = tsbsize / sizeof(iopte_t);
185 
186  /* Setup initial software IOMMU state. */
187  spin_lock_init(&iommu->lock);
188  iommu->ctx_lowest_free = 1;
189  iommu->page_table_map_base = dma_offset;
190  iommu->dma_addr_mask = dma_addr_mask;
191 
192  /* Allocate and initialize the free area map. */
193  sz = num_tsb_entries / 8;
194  sz = (sz + 7UL) & ~7UL;
195  iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
196  if (!iommu->arena.map) {
197  printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
198  return -ENOMEM;
199  }
200  memset(iommu->arena.map, 0, sz);
201  iommu->arena.limit = num_tsb_entries;
202 
203  if (tlb_type != hypervisor)
204  iommu->flush_all = iommu_flushall;
205 
206  /* Allocate and initialize the dummy page which we
207  * set inactive IO PTEs to point to.
208  */
209  page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
210  if (!page) {
211  printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
212  goto out_free_map;
213  }
214  iommu->dummy_page = (unsigned long) page_address(page);
215  memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
216  iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
217 
218  /* Now allocate and setup the IOMMU page table itself. */
219  order = get_order(tsbsize);
220  page = alloc_pages_node(numa_node, GFP_KERNEL, order);
221  if (!page) {
222  printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
223  goto out_free_dummy_page;
224  }
225  iommu->page_table = (iopte_t *)page_address(page);
226 
227  for (i = 0; i < num_tsb_entries; i++)
228  iopte_make_dummy(iommu, &iommu->page_table[i]);
229 
230  return 0;
231 
232 out_free_dummy_page:
233  free_page(iommu->dummy_page);
234  iommu->dummy_page = 0UL;
235 
236 out_free_map:
237  kfree(iommu->arena.map);
238  iommu->arena.map = NULL;
239 
240  return -ENOMEM;
241 }
242 
243 static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
244  unsigned long npages)
245 {
246  unsigned long entry;
247 
248  entry = iommu_range_alloc(dev, iommu, npages, NULL);
249  if (unlikely(entry == DMA_ERROR_CODE))
250  return NULL;
251 
252  return iommu->page_table + entry;
253 }
254 
255 static int iommu_alloc_ctx(struct iommu *iommu)
256 {
257  int lowest = iommu->ctx_lowest_free;
258  int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
259 
260  if (unlikely(n == IOMMU_NUM_CTXS)) {
261  n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
262  if (unlikely(n == lowest)) {
263  printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
264  n = 0;
265  }
266  }
267  if (n)
268  __set_bit(n, iommu->ctx_bitmap);
269 
270  return n;
271 }
272 
273 static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
274 {
275  if (likely(ctx)) {
276  __clear_bit(ctx, iommu->ctx_bitmap);
277  if (ctx < iommu->ctx_lowest_free)
278  iommu->ctx_lowest_free = ctx;
279  }
280 }
281 
282 static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
283  dma_addr_t *dma_addrp, gfp_t gfp,
284  struct dma_attrs *attrs)
285 {
286  unsigned long flags, order, first_page;
287  struct iommu *iommu;
288  struct page *page;
289  int npages, nid;
290  iopte_t *iopte;
291  void *ret;
292 
293  size = IO_PAGE_ALIGN(size);
294  order = get_order(size);
295  if (order >= 10)
296  return NULL;
297 
298  nid = dev->archdata.numa_node;
299  page = alloc_pages_node(nid, gfp, order);
300  if (unlikely(!page))
301  return NULL;
302 
303  first_page = (unsigned long) page_address(page);
304  memset((char *)first_page, 0, PAGE_SIZE << order);
305 
306  iommu = dev->archdata.iommu;
307 
308  spin_lock_irqsave(&iommu->lock, flags);
309  iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
310  spin_unlock_irqrestore(&iommu->lock, flags);
311 
312  if (unlikely(iopte == NULL)) {
313  free_pages(first_page, order);
314  return NULL;
315  }
316 
317  *dma_addrp = (iommu->page_table_map_base +
318  ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
319  ret = (void *) first_page;
320  npages = size >> IO_PAGE_SHIFT;
321  first_page = __pa(first_page);
322  while (npages--) {
323  iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
324  IOPTE_WRITE |
325  (first_page & IOPTE_PAGE));
326  iopte++;
327  first_page += IO_PAGE_SIZE;
328  }
329 
330  return ret;
331 }
332 
333 static void dma_4u_free_coherent(struct device *dev, size_t size,
334  void *cpu, dma_addr_t dvma,
335  struct dma_attrs *attrs)
336 {
337  struct iommu *iommu;
338  unsigned long flags, order, npages;
339 
340  npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
341  iommu = dev->archdata.iommu;
342 
343  spin_lock_irqsave(&iommu->lock, flags);
344 
345  iommu_range_free(iommu, dvma, npages);
346 
347  spin_unlock_irqrestore(&iommu->lock, flags);
348 
349  order = get_order(size);
350  if (order < 10)
351  free_pages((unsigned long)cpu, order);
352 }
353 
354 static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
355  unsigned long offset, size_t sz,
357  struct dma_attrs *attrs)
358 {
359  struct iommu *iommu;
360  struct strbuf *strbuf;
361  iopte_t *base;
362  unsigned long flags, npages, oaddr;
363  unsigned long i, base_paddr, ctx;
364  u32 bus_addr, ret;
365  unsigned long iopte_protection;
366 
367  iommu = dev->archdata.iommu;
368  strbuf = dev->archdata.stc;
369 
370  if (unlikely(direction == DMA_NONE))
371  goto bad_no_ctx;
372 
373  oaddr = (unsigned long)(page_address(page) + offset);
374  npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
375  npages >>= IO_PAGE_SHIFT;
376 
377  spin_lock_irqsave(&iommu->lock, flags);
378  base = alloc_npages(dev, iommu, npages);
379  ctx = 0;
380  if (iommu->iommu_ctxflush)
381  ctx = iommu_alloc_ctx(iommu);
382  spin_unlock_irqrestore(&iommu->lock, flags);
383 
384  if (unlikely(!base))
385  goto bad;
386 
387  bus_addr = (iommu->page_table_map_base +
388  ((base - iommu->page_table) << IO_PAGE_SHIFT));
389  ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
390  base_paddr = __pa(oaddr & IO_PAGE_MASK);
391  if (strbuf->strbuf_enabled)
392  iopte_protection = IOPTE_STREAMING(ctx);
393  else
394  iopte_protection = IOPTE_CONSISTENT(ctx);
395  if (direction != DMA_TO_DEVICE)
396  iopte_protection |= IOPTE_WRITE;
397 
398  for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
399  iopte_val(*base) = iopte_protection | base_paddr;
400 
401  return ret;
402 
403 bad:
404  iommu_free_ctx(iommu, ctx);
405 bad_no_ctx:
406  if (printk_ratelimit())
407  WARN_ON(1);
408  return DMA_ERROR_CODE;
409 }
410 
411 static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
412  u32 vaddr, unsigned long ctx, unsigned long npages,
413  enum dma_data_direction direction)
414 {
415  int limit;
416 
417  if (strbuf->strbuf_ctxflush &&
418  iommu->iommu_ctxflush) {
419  unsigned long matchreg, flushreg;
420  u64 val;
421 
422  flushreg = strbuf->strbuf_ctxflush;
423  matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
424 
425  iommu_write(flushreg, ctx);
426  val = iommu_read(matchreg);
427  val &= 0xffff;
428  if (!val)
429  goto do_flush_sync;
430 
431  while (val) {
432  if (val & 0x1)
433  iommu_write(flushreg, ctx);
434  val >>= 1;
435  }
436  val = iommu_read(matchreg);
437  if (unlikely(val)) {
438  printk(KERN_WARNING "strbuf_flush: ctx flush "
439  "timeout matchreg[%llx] ctx[%lx]\n",
440  val, ctx);
441  goto do_page_flush;
442  }
443  } else {
444  unsigned long i;
445 
446  do_page_flush:
447  for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
448  iommu_write(strbuf->strbuf_pflush, vaddr);
449  }
450 
451 do_flush_sync:
452  /* If the device could not have possibly put dirty data into
453  * the streaming cache, no flush-flag synchronization needs
454  * to be performed.
455  */
456  if (direction == DMA_TO_DEVICE)
457  return;
458 
459  STC_FLUSHFLAG_INIT(strbuf);
460  iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
462 
463  limit = 100000;
464  while (!STC_FLUSHFLAG_SET(strbuf)) {
465  limit--;
466  if (!limit)
467  break;
468  udelay(1);
469  rmb();
470  }
471  if (!limit)
472  printk(KERN_WARNING "strbuf_flush: flushflag timeout "
473  "vaddr[%08x] ctx[%lx] npages[%ld]\n",
474  vaddr, ctx, npages);
475 }
476 
477 static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
478  size_t sz, enum dma_data_direction direction,
479  struct dma_attrs *attrs)
480 {
481  struct iommu *iommu;
482  struct strbuf *strbuf;
483  iopte_t *base;
484  unsigned long flags, npages, ctx, i;
485 
486  if (unlikely(direction == DMA_NONE)) {
487  if (printk_ratelimit())
488  WARN_ON(1);
489  return;
490  }
491 
492  iommu = dev->archdata.iommu;
493  strbuf = dev->archdata.stc;
494 
495  npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
496  npages >>= IO_PAGE_SHIFT;
497  base = iommu->page_table +
498  ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
499  bus_addr &= IO_PAGE_MASK;
500 
501  spin_lock_irqsave(&iommu->lock, flags);
502 
503  /* Record the context, if any. */
504  ctx = 0;
505  if (iommu->iommu_ctxflush)
506  ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
507 
508  /* Step 1: Kick data out of streaming buffers if necessary. */
509  if (strbuf->strbuf_enabled)
510  strbuf_flush(strbuf, iommu, bus_addr, ctx,
511  npages, direction);
512 
513  /* Step 2: Clear out TSB entries. */
514  for (i = 0; i < npages; i++)
515  iopte_make_dummy(iommu, base + i);
516 
517  iommu_range_free(iommu, bus_addr, npages);
518 
519  iommu_free_ctx(iommu, ctx);
520 
521  spin_unlock_irqrestore(&iommu->lock, flags);
522 }
523 
524 static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
525  int nelems, enum dma_data_direction direction,
526  struct dma_attrs *attrs)
527 {
528  struct scatterlist *s, *outs, *segstart;
529  unsigned long flags, handle, prot, ctx;
530  dma_addr_t dma_next = 0, dma_addr;
531  unsigned int max_seg_size;
532  unsigned long seg_boundary_size;
533  int outcount, incount, i;
534  struct strbuf *strbuf;
535  struct iommu *iommu;
536  unsigned long base_shift;
537 
538  BUG_ON(direction == DMA_NONE);
539 
540  iommu = dev->archdata.iommu;
541  strbuf = dev->archdata.stc;
542  if (nelems == 0 || !iommu)
543  return 0;
544 
545  spin_lock_irqsave(&iommu->lock, flags);
546 
547  ctx = 0;
548  if (iommu->iommu_ctxflush)
549  ctx = iommu_alloc_ctx(iommu);
550 
551  if (strbuf->strbuf_enabled)
552  prot = IOPTE_STREAMING(ctx);
553  else
554  prot = IOPTE_CONSISTENT(ctx);
555  if (direction != DMA_TO_DEVICE)
556  prot |= IOPTE_WRITE;
557 
558  outs = s = segstart = &sglist[0];
559  outcount = 1;
560  incount = nelems;
561  handle = 0;
562 
563  /* Init first segment length for backout at failure */
564  outs->dma_length = 0;
565 
566  max_seg_size = dma_get_max_seg_size(dev);
567  seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
569  base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
570  for_each_sg(sglist, s, nelems, i) {
571  unsigned long paddr, npages, entry, out_entry = 0, slen;
572  iopte_t *base;
573 
574  slen = s->length;
575  /* Sanity check */
576  if (slen == 0) {
577  dma_next = 0;
578  continue;
579  }
580  /* Allocate iommu entries for that segment */
581  paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
582  npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
583  entry = iommu_range_alloc(dev, iommu, npages, &handle);
584 
585  /* Handle failure */
586  if (unlikely(entry == DMA_ERROR_CODE)) {
587  if (printk_ratelimit())
588  printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
589  " npages %lx\n", iommu, paddr, npages);
590  goto iommu_map_failed;
591  }
592 
593  base = iommu->page_table + entry;
594 
595  /* Convert entry to a dma_addr_t */
596  dma_addr = iommu->page_table_map_base +
597  (entry << IO_PAGE_SHIFT);
598  dma_addr |= (s->offset & ~IO_PAGE_MASK);
599 
600  /* Insert into HW table */
601  paddr &= IO_PAGE_MASK;
602  while (npages--) {
603  iopte_val(*base) = prot | paddr;
604  base++;
605  paddr += IO_PAGE_SIZE;
606  }
607 
608  /* If we are in an open segment, try merging */
609  if (segstart != s) {
610  /* We cannot merge if:
611  * - allocated dma_addr isn't contiguous to previous allocation
612  */
613  if ((dma_addr != dma_next) ||
614  (outs->dma_length + s->length > max_seg_size) ||
615  (is_span_boundary(out_entry, base_shift,
616  seg_boundary_size, outs, s))) {
617  /* Can't merge: create a new segment */
618  segstart = s;
619  outcount++;
620  outs = sg_next(outs);
621  } else {
622  outs->dma_length += s->length;
623  }
624  }
625 
626  if (segstart == s) {
627  /* This is a new segment, fill entries */
628  outs->dma_address = dma_addr;
629  outs->dma_length = slen;
630  out_entry = entry;
631  }
632 
633  /* Calculate next page pointer for contiguous check */
634  dma_next = dma_addr + slen;
635  }
636 
637  spin_unlock_irqrestore(&iommu->lock, flags);
638 
639  if (outcount < incount) {
640  outs = sg_next(outs);
641  outs->dma_address = DMA_ERROR_CODE;
642  outs->dma_length = 0;
643  }
644 
645  return outcount;
646 
647 iommu_map_failed:
648  for_each_sg(sglist, s, nelems, i) {
649  if (s->dma_length != 0) {
650  unsigned long vaddr, npages, entry, j;
651  iopte_t *base;
652 
653  vaddr = s->dma_address & IO_PAGE_MASK;
654  npages = iommu_num_pages(s->dma_address, s->dma_length,
655  IO_PAGE_SIZE);
656  iommu_range_free(iommu, vaddr, npages);
657 
658  entry = (vaddr - iommu->page_table_map_base)
659  >> IO_PAGE_SHIFT;
660  base = iommu->page_table + entry;
661 
662  for (j = 0; j < npages; j++)
663  iopte_make_dummy(iommu, base + j);
664 
666  s->dma_length = 0;
667  }
668  if (s == outs)
669  break;
670  }
671  spin_unlock_irqrestore(&iommu->lock, flags);
672 
673  return 0;
674 }
675 
676 /* If contexts are being used, they are the same in all of the mappings
677  * we make for a particular SG.
678  */
679 static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
680 {
681  unsigned long ctx = 0;
682 
683  if (iommu->iommu_ctxflush) {
684  iopte_t *base;
685  u32 bus_addr;
686 
687  bus_addr = sg->dma_address & IO_PAGE_MASK;
688  base = iommu->page_table +
689  ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
690 
691  ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
692  }
693  return ctx;
694 }
695 
696 static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
697  int nelems, enum dma_data_direction direction,
698  struct dma_attrs *attrs)
699 {
700  unsigned long flags, ctx;
701  struct scatterlist *sg;
702  struct strbuf *strbuf;
703  struct iommu *iommu;
704 
705  BUG_ON(direction == DMA_NONE);
706 
707  iommu = dev->archdata.iommu;
708  strbuf = dev->archdata.stc;
709 
710  ctx = fetch_sg_ctx(iommu, sglist);
711 
712  spin_lock_irqsave(&iommu->lock, flags);
713 
714  sg = sglist;
715  while (nelems--) {
717  unsigned int len = sg->dma_length;
718  unsigned long npages, entry;
719  iopte_t *base;
720  int i;
721 
722  if (!len)
723  break;
724  npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
725  iommu_range_free(iommu, dma_handle, npages);
726 
727  entry = ((dma_handle - iommu->page_table_map_base)
728  >> IO_PAGE_SHIFT);
729  base = iommu->page_table + entry;
730 
731  dma_handle &= IO_PAGE_MASK;
732  if (strbuf->strbuf_enabled)
733  strbuf_flush(strbuf, iommu, dma_handle, ctx,
734  npages, direction);
735 
736  for (i = 0; i < npages; i++)
737  iopte_make_dummy(iommu, base + i);
738 
739  sg = sg_next(sg);
740  }
741 
742  iommu_free_ctx(iommu, ctx);
743 
744  spin_unlock_irqrestore(&iommu->lock, flags);
745 }
746 
747 static void dma_4u_sync_single_for_cpu(struct device *dev,
748  dma_addr_t bus_addr, size_t sz,
749  enum dma_data_direction direction)
750 {
751  struct iommu *iommu;
752  struct strbuf *strbuf;
753  unsigned long flags, ctx, npages;
754 
755  iommu = dev->archdata.iommu;
756  strbuf = dev->archdata.stc;
757 
758  if (!strbuf->strbuf_enabled)
759  return;
760 
761  spin_lock_irqsave(&iommu->lock, flags);
762 
763  npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
764  npages >>= IO_PAGE_SHIFT;
765  bus_addr &= IO_PAGE_MASK;
766 
767  /* Step 1: Record the context, if any. */
768  ctx = 0;
769  if (iommu->iommu_ctxflush &&
770  strbuf->strbuf_ctxflush) {
771  iopte_t *iopte;
772 
773  iopte = iommu->page_table +
774  ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
775  ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
776  }
777 
778  /* Step 2: Kick data out of streaming buffers. */
779  strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
780 
781  spin_unlock_irqrestore(&iommu->lock, flags);
782 }
783 
784 static void dma_4u_sync_sg_for_cpu(struct device *dev,
785  struct scatterlist *sglist, int nelems,
786  enum dma_data_direction direction)
787 {
788  struct iommu *iommu;
789  struct strbuf *strbuf;
790  unsigned long flags, ctx, npages, i;
791  struct scatterlist *sg, *sgprv;
792  u32 bus_addr;
793 
794  iommu = dev->archdata.iommu;
795  strbuf = dev->archdata.stc;
796 
797  if (!strbuf->strbuf_enabled)
798  return;
799 
800  spin_lock_irqsave(&iommu->lock, flags);
801 
802  /* Step 1: Record the context, if any. */
803  ctx = 0;
804  if (iommu->iommu_ctxflush &&
805  strbuf->strbuf_ctxflush) {
806  iopte_t *iopte;
807 
808  iopte = iommu->page_table +
809  ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
810  ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
811  }
812 
813  /* Step 2: Kick data out of streaming buffers. */
814  bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
815  sgprv = NULL;
816  for_each_sg(sglist, sg, nelems, i) {
817  if (sg->dma_length == 0)
818  break;
819  sgprv = sg;
820  }
821 
822  npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
823  - bus_addr) >> IO_PAGE_SHIFT;
824  strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
825 
826  spin_unlock_irqrestore(&iommu->lock, flags);
827 }
828 
829 static struct dma_map_ops sun4u_dma_ops = {
830  .alloc = dma_4u_alloc_coherent,
831  .free = dma_4u_free_coherent,
832  .map_page = dma_4u_map_page,
833  .unmap_page = dma_4u_unmap_page,
834  .map_sg = dma_4u_map_sg,
835  .unmap_sg = dma_4u_unmap_sg,
836  .sync_single_for_cpu = dma_4u_sync_single_for_cpu,
837  .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
838 };
839 
840 struct dma_map_ops *dma_ops = &sun4u_dma_ops;
841 EXPORT_SYMBOL(dma_ops);
842 
843 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
844 
845 int dma_supported(struct device *dev, u64 device_mask)
846 {
847  struct iommu *iommu = dev->archdata.iommu;
849 
850  if (device_mask >= (1UL << 32UL))
851  return 0;
852 
853  if ((device_mask & dma_addr_mask) == dma_addr_mask)
854  return 1;
855 
856 #ifdef CONFIG_PCI
857  if (dev->bus == &pci_bus_type)
858  return pci64_dma_supported(to_pci_dev(dev), device_mask);
859 #endif
860 
861  return 0;
862 }