27 #include <linux/types.h>
28 #include <linux/slab.h>
31 #include <linux/string.h>
33 #include <linux/bitmap.h>
36 #include <linux/hash.h>
38 #include <linux/pci.h>
41 #include <asm/iommu.h>
42 #include <asm/pci-bridge.h>
43 #include <asm/machdep.h>
56 if (!
strcmp(str,
"novmerge"))
58 else if (!
strcmp(str,
"vmerge"))
74 static int __init setup_iommu_pool_hash(
void)
79 per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
85 #ifdef CONFIG_FAIL_IOMMU
87 static DECLARE_FAULT_ATTR(fail_iommu);
89 static int __init setup_fail_iommu(
char *str)
93 __setup(
"fail_iommu=", setup_fail_iommu);
95 static bool should_fail_iommu(
struct device *
dev)
100 static int __init fail_iommu_debugfs(
void)
102 struct dentry *
dir = fault_create_debugfs_attr(
"fail_iommu",
105 return IS_ERR(dir) ? PTR_ERR(dir) : 0;
121 if (count > 0 &&
sscanf(buf,
"%d", &i) > 0)
122 dev->
archdata.fail_iommu = (i == 0) ? 0 : 1;
137 pr_warn(
"Unable to create IOMMU fault injection sysfs "
150 static int __init fail_iommu_setup(
void)
167 static inline bool should_fail_iommu(
struct device *dev)
173 static unsigned long iommu_range_alloc(
struct device *dev,
175 unsigned long npages,
178 unsigned int align_order)
182 int largealloc = npages > 15;
184 unsigned long align_mask;
185 unsigned long boundary_size;
187 unsigned int pool_nr;
188 struct iommu_pool *
pool;
190 align_mask = 0xffffffffffffffff
l >> (64 - align_order);
196 if (printk_ratelimit())
201 if (should_fail_iommu(dev))
211 pool = &(tbl->large_pool);
213 pool = &(tbl->pools[pool_nr]);
218 if ((pass == 0) && handle && *handle &&
233 if (limit + tbl->it_offset > mask) {
234 limit = mask - tbl->it_offset + 1;
239 if ((start & mask) >= limit || pass > 0) {
240 spin_unlock(&(pool->lock));
241 pool = &(tbl->pools[0]);
242 spin_lock(&(pool->lock));
250 boundary_size =
ALIGN(dma_get_seg_boundary(dev) + 1,
262 pool->hint = pool->start;
266 }
else if (pass <= tbl->nr_pools) {
268 spin_unlock(&(pool->lock));
269 pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
270 pool = &tbl->pools[pool_nr];
271 spin_lock(&(pool->lock));
272 pool->hint = pool->start;
278 spin_unlock_irqrestore(&(pool->lock), flags);
291 pool->hint = (end + tbl->it_blocksize - 1) &
292 ~(tbl->it_blocksize - 1);
299 spin_unlock_irqrestore(&(pool->lock), flags);
305 void *
page,
unsigned int npages,
307 unsigned long mask,
unsigned int align_order,
314 entry = iommu_range_alloc(dev, tbl, npages,
NULL, mask, align_order);
319 entry += tbl->it_offset;
323 build_fail =
ppc_md.tce_build(tbl, entry, npages,
333 __iommu_free(tbl, ret, npages);
350 unsigned long entry, free_entry;
353 free_entry = entry - tbl->it_offset;
355 if (((free_entry + npages) > tbl->
it_size) ||
357 if (printk_ratelimit()) {
375 static struct iommu_pool *get_pool(
struct iommu_table *tbl,
378 struct iommu_pool *
p;
379 unsigned long largepool_start = tbl->large_pool.start;
382 if (entry >= largepool_start) {
383 p = &tbl->large_pool;
385 unsigned int pool_nr = entry / tbl->poolsize;
387 BUG_ON(pool_nr > tbl->nr_pools);
388 p = &tbl->pools[pool_nr];
397 unsigned long entry, free_entry;
399 struct iommu_pool *pool;
402 free_entry = entry - tbl->it_offset;
404 pool = get_pool(tbl, free_entry);
406 if (!iommu_free_check(tbl, dma_addr, npages))
409 ppc_md.tce_free(tbl, entry, npages);
413 spin_unlock_irqrestore(&(pool->lock), flags);
419 __iommu_free(tbl, dma_addr, npages);
436 int outcount, incount,
i, build_fail = 0;
439 unsigned int max_seg_size;
443 if ((nelems == 0) || !tbl)
446 outs = s = segstart = &sglist[0];
452 outs->dma_length = 0;
454 DBG(
"sg mapping %d elements:\n", nelems);
456 max_seg_size = dma_get_max_seg_size(dev);
467 vaddr = (
unsigned long) sg_virt(s);
468 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE);
470 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >=
PAGE_SIZE &&
473 entry = iommu_range_alloc(dev, tbl, npages, &handle,
474 mask >> IOMMU_PAGE_SHIFT, align);
476 DBG(
" - vaddr: %lx, size: %lx\n", vaddr, slen);
480 if (printk_ratelimit())
481 dev_info(dev,
"iommu_alloc failed, tbl %p "
482 "vaddr %lx npages %lu\n", tbl, vaddr,
488 entry += tbl->it_offset;
490 dma_addr |= (s->
offset & ~IOMMU_PAGE_MASK);
492 DBG(
" - %lu pages, entry: %lx, dma_addr: %lx\n",
493 npages, entry, dma_addr);
496 build_fail =
ppc_md.tce_build(tbl, entry, npages,
497 vaddr & IOMMU_PAGE_MASK,
504 DBG(
" - trying merge...\n");
508 if (novmerge || (dma_addr != dma_next) ||
509 (outs->dma_length + s->
length > max_seg_size)) {
514 DBG(
" can't merge, new segment.\n");
516 outs->dma_length += s->
length;
517 DBG(
" merged, new len: %ux\n", outs->dma_length);
523 DBG(
" - filling new segment.\n");
525 outs->dma_length = slen;
529 dma_next = dma_addr + slen;
531 DBG(
" - dma next is: %lx\n", dma_next);
538 DBG(
"mapped %d elements:\n", outcount);
543 if (outcount < incount) {
546 outs->dma_length = 0;
556 if (s->dma_length != 0) {
557 unsigned long vaddr, npages;
560 npages = iommu_num_pages(s->
dma_address, s->dma_length,
562 __iommu_free(tbl, vaddr, npages);
589 if (sg->dma_length == 0)
591 npages = iommu_num_pages(dma_handle, sg->dma_length,
593 __iommu_free(tbl, dma_handle, npages);
605 static void iommu_table_clear(
struct iommu_table *tbl)
618 #ifdef CONFIG_CRASH_DUMP
620 unsigned long index, tceval, tcecount = 0;
623 for (index = 0; index < tbl->
it_size; index++) {
624 tceval =
ppc_md.tce_get(tbl, index + tbl->it_offset);
628 if (tceval && (tceval != 0x7fffffffffffffffUL)) {
634 if ((tbl->
it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
637 KDUMP_MIN_TCE_ENTRIES);
638 for (index = tbl->
it_size - KDUMP_MIN_TCE_ENTRIES;
639 index < tbl->it_size; index++)
653 static int welcomed = 0;
656 struct iommu_pool *
p;
663 panic(
"iommu_init_table: Can't allocate %ld bytes\n", sz);
672 if (tbl->it_offset == 0)
676 if ((tbl->
it_size << IOMMU_PAGE_SHIFT) >= (1
UL * 1024 * 1024 * 1024))
677 tbl->nr_pools = IOMMU_NR_POOLS;
682 tbl->poolsize = (tbl->
it_size * 3 / 4) / tbl->nr_pools;
684 for (i = 0; i < tbl->nr_pools; i++) {
687 p->start = tbl->poolsize *
i;
689 p->end = p->start + tbl->poolsize;
692 p = &tbl->large_pool;
694 p->start = tbl->poolsize *
i;
698 iommu_table_clear(tbl);
702 novmerge ?
"disabled" :
"enabled");
711 unsigned long bitmap_sz,
i;
714 if (!tbl || !tbl->
it_map) {
722 for (i = 0; i < (tbl->
it_size/64); i++) {
723 if (tbl->
it_map[i] != 0) {
725 __func__, node_name);
731 bitmap_sz = (tbl->
it_size + 7) / 8;
747 struct page *page,
unsigned long offset,
size_t size,
754 unsigned int npages,
align;
759 uaddr = (
unsigned long)vaddr;
760 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
764 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >=
PAGE_SIZE &&
765 ((
unsigned long)vaddr & ~
PAGE_MASK) == 0)
768 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
769 mask >> IOMMU_PAGE_SHIFT, align,
772 if (printk_ratelimit()) {
773 dev_info(dev,
"iommu_alloc failed, tbl %p "
774 "vaddr %p npages %d\n", tbl, vaddr,
778 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
793 npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE);
794 iommu_free(tbl, dma_handle, npages);
809 unsigned int nio_pages, io_order;
820 if (order >= IOMAP_MAX_ORDER) {
821 dev_info(dev,
"iommu_alloc_consistent size too large: 0x%lx\n",
830 page = alloc_pages_node(node, flag, order);
838 io_order = get_iommu_order(size);
840 mask >> IOMMU_PAGE_SHIFT, io_order,
NULL);
853 unsigned int nio_pages;
857 iommu_free(tbl, dma_handle, nio_pages);