22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
26 #include <linux/slab.h>
29 #include <linux/string.h>
30 #include <linux/pci.h>
36 #include <linux/bitops.h>
40 #include <linux/prefetch.h>
42 #include <asm/delay.h>
57 #undef PDIR_SEARCH_TIMING
68 #define ALLOW_IOV_BYPASS
77 #undef ALLOW_IOV_BYPASS_SG
88 #undef FULL_VALID_PDIR
90 #define ENABLE_MARK_CLEAN
100 #undef DEBUG_SBA_RUN_SG
101 #undef DEBUG_SBA_RESOURCE
102 #undef ASSERT_PDIR_SANITY
103 #undef DEBUG_LARGE_SG_ENTRIES
106 #if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
107 #error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
110 #define SBA_INLINE __inline__
113 #ifdef DEBUG_SBA_INIT
114 #define DBG_INIT(x...) printk(x)
116 #define DBG_INIT(x...)
120 #define DBG_RUN(x...) printk(x)
122 #define DBG_RUN(x...)
125 #ifdef DEBUG_SBA_RUN_SG
126 #define DBG_RUN_SG(x...) printk(x)
128 #define DBG_RUN_SG(x...)
132 #ifdef DEBUG_SBA_RESOURCE
133 #define DBG_RES(x...) printk(x)
135 #define DBG_RES(x...)
139 #define DBG_BYPASS(x...) printk(x)
141 #define DBG_BYPASS(x...)
144 #ifdef ASSERT_PDIR_SANITY
145 #define ASSERT(expr) \
147 printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
161 #define DELAYED_RESOURCE_CNT 64
163 #define PCI_DEVICE_ID_HP_SX2000_IOC 0x12ec
165 #define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
166 #define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)
167 #define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
168 #define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
169 #define SX2000_IOC_ID ((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP)
171 #define ZX1_IOC_OFFSET 0x1000
173 #define IOC_FUNC_ID 0x000
174 #define IOC_FCLASS 0x008
175 #define IOC_IBASE 0x300
176 #define IOC_IMASK 0x308
177 #define IOC_PCOM 0x310
178 #define IOC_TCNFG 0x318
179 #define IOC_PDIR_BASE 0x320
181 #define IOC_ROPE0_CFG 0x500
182 #define IOC_ROPE_AO 0x10
186 #define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
198 static unsigned long iovp_size;
199 static unsigned long iovp_shift;
200 static unsigned long iovp_mask;
218 #if DELAYED_RESOURCE_CNT > 0
228 #ifdef PDIR_SEARCH_TIMING
229 #define SBA_SEARCH_SAMPLE 0x100
230 unsigned long avg_search[SBA_SEARCH_SAMPLE];
231 unsigned long avg_idx;
245 static struct ioc *ioc_list;
246 static int reserve_sba_gart = 1;
251 #define sba_sg_address(sg) sg_virt((sg))
253 #ifdef FULL_VALID_PDIR
254 static u64 prefetch_spill_page;
258 # define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \
259 ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
261 # define GET_IOC(dev) NULL
271 #define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size)
273 #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
282 #define READ_REG(addr) __raw_readq(addr)
283 #define WRITE_REG(val, addr) __raw_writeq(val, addr)
285 #ifdef DEBUG_SBA_INIT
296 DBG_INIT(
"IO TLB at 0x%p\n", (
void *)hpa);
306 #ifdef ASSERT_PDIR_SANITY
321 unsigned long *
rptr = (
unsigned long *) &ioc->
res_map[(pide >>3) & -
sizeof(
unsigned long)];
332 rcnt, ptr, (
unsigned long long) *ptr );
348 sba_check_pdir(
struct ioc *ioc,
char *msg)
355 while (rptr < rptr_end) {
364 u32 pde = ((
u32)((*pptr >> (63)) & 0x1));
365 if ((rval & 0x1) ^ pde)
371 sba_dump_pdir_entry(ioc, msg, pide);
395 sba_dump_sg(
struct ioc *ioc,
struct scatterlist *startsg,
int nents)
397 while (nents-- > 0) {
406 sba_check_sg(
struct ioc *ioc,
struct scatterlist *startsg,
int nents)
409 int the_nents = nents;
411 while (the_nents-- > 0) {
413 sba_dump_sg(
NULL, startsg, nents);
432 #define PAGES_PER_RANGE 1
435 #define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
436 #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
438 #define PDIR_ENTRY_SIZE sizeof(u64)
440 #define PDIR_INDEX(iovp) ((iovp)>>iovp_shift)
442 #define RESMAP_MASK(n) ~(~0UL << (n))
443 #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
453 get_iovp_order (
unsigned long size)
455 long double d = size - 1;
459 order = order - iovp_shift - 0xffff + 1;
465 static unsigned long ptr_to_pide(
struct ioc *ioc,
unsigned long *res_ptr,
466 unsigned int bitshiftcnt)
468 return (((
unsigned long)res_ptr - (
unsigned long)ioc->
res_map) << 3)
483 sba_search_bitmap(
struct ioc *ioc,
struct device *
dev,
484 unsigned long bits_wanted,
int use_hint)
486 unsigned long *res_ptr;
487 unsigned long *res_end = (
unsigned long *) &(ioc->
res_map[ioc->
res_size]);
488 unsigned long flags, pide = ~0
UL, tpide;
489 unsigned long boundary_size;
493 ASSERT(((
unsigned long) ioc->
res_hint & (
sizeof(
unsigned long) - 1
UL)) == 0);
494 ASSERT(res_ptr < res_end);
496 boundary_size = (
unsigned long long)dma_get_seg_boundary(dev) + 1;
497 boundary_size =
ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift;
500 shift = ioc->
ibase >> iovp_shift;
518 bits_wanted = 1
UL << get_iovp_order(bits_wanted << iovp_shift);
520 if (
likely(bits_wanted == 1)) {
521 unsigned int bitshiftcnt;
522 for(; res_ptr < res_end ; res_ptr++) {
524 bitshiftcnt =
ffz(*res_ptr);
525 *res_ptr |= (1
UL << bitshiftcnt);
526 pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
542 unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift);
544 unsigned long mask, base_mask;
547 mask = base_mask << bitshiftcnt;
549 DBG_RES(
"%s() o %ld %p", __func__, o, res_ptr);
550 for(; res_ptr < res_end ; res_ptr++)
552 DBG_RES(
" %p %lx %lx\n", res_ptr, mask, *res_ptr);
554 for (;
mask ; mask <<= o, bitshiftcnt += o) {
555 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
559 if ((0 == ((*res_ptr) & mask)) && !ret) {
576 qwords = bits_wanted >> 6;
581 for (; res_ptr <
end; res_ptr++) {
582 tpide = ptr_to_pide(ioc, res_ptr, 0);
584 shift, boundary_size);
587 for (i = 0 ; i <
qwords ; i++) {
591 if (bits && res_ptr[i] && (
__ffs(res_ptr[i]) < bits))
595 for (i = 0 ; i <
qwords ; i++)
612 spin_unlock_irqrestore(&ioc->
res_lock, flags);
617 spin_unlock_irqrestore(&ioc->
res_lock, flags);
631 sba_alloc_range(
struct ioc *ioc,
struct device *dev,
size_t size)
633 unsigned int pages_needed = size >> iovp_shift;
634 #ifdef PDIR_SEARCH_TIMING
635 unsigned long itc_start;
640 ASSERT(0 == (size & ~iovp_mask));
642 #ifdef PDIR_SEARCH_TIMING
643 itc_start = ia64_get_itc();
648 pide = sba_search_bitmap(ioc, dev, pages_needed, 1);
650 pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
652 #if DELAYED_RESOURCE_CNT > 0
662 struct sba_dma_pair *
d;
669 sba_mark_invalid(ioc, d->iova, d->size);
670 sba_free_range(ioc, d->iova, d->size);
677 spin_unlock_irqrestore(&ioc->
saved_lock, flags);
679 pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
682 "out of mapping resources, %u %u %lx\n",
684 pages_needed, dma_get_seg_boundary(dev));
689 "out of mapping resources, %u %u %lx\n",
691 pages_needed, dma_get_seg_boundary(dev));
697 #ifdef PDIR_SEARCH_TIMING
698 ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed;
699 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
704 #ifdef ASSERT_PDIR_SANITY
707 sba_dump_pdir_entry(ioc,
"sba_search_bitmap() botched it?", pide);
711 DBG_RES(
"%s(%x) %d -> %lx hint %x/%x\n",
712 __func__, size, pages_needed, pide,
731 unsigned long iovp =
SBA_IOVP(ioc, iova);
733 unsigned int ridx = pide >> 3;
734 unsigned long *res_ptr = (
unsigned long *) &((ioc)->res_map[ridx & ~
RESMAP_IDX_MASK]);
735 int bits_not_wanted = size >> iovp_shift;
739 bits_not_wanted = 1
UL << get_iovp_order(bits_not_wanted << iovp_shift);
740 for (; bits_not_wanted > 0 ; res_ptr++) {
755 DBG_RES(
"%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (
uint) iova, size,
756 bits_not_wanted, m, pide, res_ptr, *res_ptr);
760 ASSERT((*res_ptr & m) == m);
799 #define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
800 | 0x8000000000000000ULL)
805 *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
809 #ifdef ENABLE_MARK_CLEAN
816 mark_clean (
void *
addr,
size_t size)
818 unsigned long pg_addr,
end;
821 end = (
unsigned long) addr + size;
847 sba_mark_invalid(
struct ioc *ioc,
dma_addr_t iova,
size_t byte_cnt)
855 ASSERT(0 == (byte_cnt & ~iovp_mask));
857 #ifdef ASSERT_PDIR_SANITY
860 sba_dump_pdir_entry(ioc,
"sba_mark_invalid()",
PDIR_INDEX(iovp));
864 if (byte_cnt <= iovp_size)
866 ASSERT(off < ioc->pdir_size);
870 #ifndef FULL_VALID_PDIR
877 ioc->
pdir_base[off] &= ~(0x80000000000000FFULL);
884 ioc->
pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
887 u32 t = get_iovp_order(byte_cnt) + iovp_shift;
895 #ifndef FULL_VALID_PDIR
897 ioc->
pdir_base[off] &= ~(0x80000000000000FFULL);
899 ioc->
pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
902 byte_cnt -= iovp_size;
903 }
while (byte_cnt > 0);
920 unsigned long poff,
size_t size,
930 #ifdef ASSERT_PDIR_SANITY
933 #ifdef ALLOW_IOV_BYPASS
937 #ifdef ALLOW_IOV_BYPASS
947 DBG_BYPASS(
"sba_map_single_attrs() bypass mask/addr: "
965 size = (size + offset + ~iovp_mask) & iovp_mask;
967 #ifdef ASSERT_PDIR_SANITY
969 if (sba_check_pdir(ioc,
"Check before sba_map_single_attrs()"))
970 panic(
"Sanity check failed");
971 spin_unlock_irqrestore(&ioc->
res_lock, flags);
974 pide = sba_alloc_range(ioc, dev, size);
980 DBG_RUN(
"%s() 0x%p -> 0x%lx\n", __func__, addr, (
long) iovp | offset);
988 DBG_RUN(
" pdir 0x%p %lx\n", pdir_start, *pdir_start);
998 #ifdef ASSERT_PDIR_SANITY
1000 sba_check_pdir(ioc,
"Check after sba_map_single_attrs()");
1001 spin_unlock_irqrestore(&ioc->
res_lock, flags);
1003 return SBA_IOVA(ioc, iovp, offset);
1011 (
unsigned long)addr & ~
PAGE_MASK, size, dir, attrs);
1014 #ifdef ENABLE_MARK_CLEAN
1016 sba_mark_clean(
struct ioc *ioc,
dma_addr_t iova,
size_t size)
1022 if (size <= iovp_size) {
1024 ~0xE000000000000FFFULL);
1025 mark_clean(addr, size);
1029 ~0xE000000000000FFFULL);
1030 mark_clean(addr,
min(size, iovp_size));
1048 static void sba_unmap_page(
struct device *dev,
dma_addr_t iova,
size_t size,
1052 #if DELAYED_RESOURCE_CNT > 0
1053 struct sba_dma_pair *
d;
1055 unsigned long flags;
1061 #ifdef ALLOW_IOV_BYPASS
1066 DBG_BYPASS(
"sba_unmap_single_attrs() bypass addr: 0x%lx\n",
1069 #ifdef ENABLE_MARK_CLEAN
1077 offset = iova & ~iovp_mask;
1079 DBG_RUN(
"%s() iovp 0x%lx/%x\n", __func__, (
long) iova, size);
1083 size =
ROUNDUP(size, iovp_size);
1085 #ifdef ENABLE_MARK_CLEAN
1087 sba_mark_clean(ioc, iova, size);
1090 #if DELAYED_RESOURCE_CNT > 0
1099 sba_mark_invalid(ioc, d->iova, d->size);
1100 sba_free_range(ioc, d->iova, d->size);
1107 spin_unlock_irqrestore(&ioc->
saved_lock, flags);
1110 sba_mark_invalid(ioc, iova, size);
1111 sba_free_range(ioc, iova, size);
1113 spin_unlock_irqrestore(&ioc->
res_lock, flags);
1120 sba_unmap_page(dev, iova, size, dir, attrs);
1144 page = alloc_pages_exact_node(ioc->node ==
MAX_NUMNODES ?
1162 #ifdef ALLOW_IOV_BYPASS
1168 DBG_BYPASS(
"sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",
1179 *dma_handle = sba_map_single_attrs(&ioc->
sac_only_dev->dev, addr,
1195 static void sba_free_coherent(
struct device *dev,
size_t size,
void *
vaddr,
1208 #define PIDE_FLAG 0x1UL
1210 #ifdef DEBUG_LARGE_SG_ENTRIES
1211 int dump_run_sg = 0;
1234 unsigned long dma_offset = 0;
1236 while (nents-- > 0) {
1237 int cnt = startsg->dma_length;
1238 startsg->dma_length = 0;
1240 #ifdef DEBUG_LARGE_SG_ENTRIES
1242 printk(
" %2d : %08lx/%05x %p\n",
1255 dma_offset = (
unsigned long) pide & ~iovp_mask;
1260 pdirp = &(ioc->
pdir_base[pide >> iovp_shift]);
1274 dma_sg->dma_length +=
cnt;
1277 cnt =
ROUNDUP(cnt, iovp_size);
1290 #ifdef DEBUG_LARGE_SG_ENTRIES
1303 #define DMA_CONTIG(__X, __Y) \
1304 (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)
1322 sba_coalesce_chunks(
struct ioc *ioc,
struct device *dev,
1327 unsigned long vcontig_len;
1328 unsigned long vcontig_end;
1330 unsigned long dma_offset, dma_len;
1332 unsigned int max_seg_size = dma_get_max_seg_size(dev);
1341 dma_sg = vcontig_sg = startsg;
1342 dma_len = vcontig_len = vcontig_end = startsg->
length;
1343 vcontig_end +=
vaddr;
1344 dma_offset = vaddr & ~iovp_mask;
1353 while (--nents > 0) {
1354 unsigned long vaddr;
1369 if (((dma_len + dma_offset + startsg->
length + ~iovp_mask) & iovp_mask)
1373 if (dma_len + startsg->
length > max_seg_size)
1382 if (vcontig_end == vaddr)
1384 vcontig_len += startsg->
length;
1385 vcontig_end += startsg->
length;
1386 dma_len += startsg->
length;
1390 #ifdef DEBUG_LARGE_SG_ENTRIES
1391 dump_run_sg = (vcontig_len > iovp_size);
1405 vcontig_sg->dma_length = vcontig_len;
1407 vcontig_sg = startsg;
1408 vcontig_len = startsg->
length;
1416 vcontig_end = vcontig_len +
vaddr;
1417 dma_len += vcontig_len;
1429 vcontig_sg->dma_length = vcontig_len;
1430 dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
1432 idx = sba_alloc_range(ioc, dev, dma_len);
1434 dma_sg->dma_length = 0;
1463 int coalesced, filled = 0;
1464 #ifdef ASSERT_PDIR_SANITY
1465 unsigned long flags;
1467 #ifdef ALLOW_IOV_BYPASS_SG
1471 DBG_RUN_SG(
"%s() START %d entries\n", __func__, nents);
1475 #ifdef ALLOW_IOV_BYPASS_SG
1479 sg->dma_length = sg->
length;
1487 sglist->dma_length = sglist->
length;
1492 #ifdef ASSERT_PDIR_SANITY
1494 if (sba_check_pdir(ioc,
"Check before sba_map_sg_attrs()"))
1496 sba_dump_sg(ioc, sglist, nents);
1497 panic(
"Check before sba_map_sg_attrs()");
1499 spin_unlock_irqrestore(&ioc->
res_lock, flags);
1512 coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
1513 if (coalesced < 0) {
1514 sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
1526 filled = sba_fill_pdir(ioc, sglist, nents);
1528 #ifdef ASSERT_PDIR_SANITY
1530 if (sba_check_pdir(ioc,
"Check after sba_map_sg_attrs()"))
1532 sba_dump_sg(ioc, sglist, nents);
1533 panic(
"Check after sba_map_sg_attrs()\n");
1535 spin_unlock_irqrestore(&ioc->
res_lock, flags);
1538 ASSERT(coalesced == filled);
1539 DBG_RUN_SG(
"%s() DONE %d mappings\n", __func__, filled);
1558 #ifdef ASSERT_PDIR_SANITY
1560 unsigned long flags;
1566 #ifdef ASSERT_PDIR_SANITY
1571 sba_check_pdir(ioc,
"Check before sba_unmap_sg_attrs()");
1572 spin_unlock_irqrestore(&ioc->
res_lock, flags);
1575 while (nents && sglist->dma_length) {
1578 sglist->dma_length, dir, attrs);
1583 DBG_RUN_SG(
"%s() DONE (nents %d)\n", __func__, nents);
1585 #ifdef ASSERT_PDIR_SANITY
1587 sba_check_pdir(ioc,
"Check after sba_unmap_sg_attrs()");
1588 spin_unlock_irqrestore(&ioc->
res_lock, flags);
1600 ioc_iova_init(
struct ioc *ioc)
1605 #ifdef FULL_VALID_PDIR
1606 unsigned long index;
1619 DBG_INIT(
"%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
1623 switch (iovp_size) {
1624 case 4*1024: tcnfg = 0;
break;
1625 case 8*1024: tcnfg = 1;
break;
1626 case 16*1024: tcnfg = 2;
break;
1627 case 64*1024: tcnfg = 3;
break;
1629 panic(
PFX "Unsupported IOTLB page size %ldK",
1639 panic(
PFX "Couldn't allocate I/O Page Table\n");
1643 DBG_INIT(
"%s() IOV page size %ldK pdir %p size %x\n", __func__,
1660 if (agp_found && reserve_sba_gart) {
1666 #ifdef FULL_VALID_PDIR
1671 if (!prefetch_spill_page) {
1672 char *spill_poison =
"SBAIOMMU POISON";
1673 int poison_size = 16;
1674 void *poison_addr, *
addr;
1678 panic(
PFX "Couldn't allocate PDIR spill page\n");
1681 for ( ; (
u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
1682 memcpy(poison_addr, spill_poison, poison_size);
1686 DBG_INIT(
"%s() prefetch spill addr: 0x%lx\n", __func__, prefetch_spill_page);
1692 ((
u64 *)ioc->
pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
1705 ioc_resource_init(
struct ioc *ioc)
1708 #if DELAYED_RESOURCE_CNT > 0
1720 panic(
PFX "Couldn't allocate resource map\n");
1726 #ifdef ASSERT_PDIR_SANITY
1731 #ifdef FULL_VALID_PDIR
1735 | prefetch_spill_page);
1738 DBG_INIT(
"%s() res_map %x %p\n", __func__,
1743 ioc_sac_init(
struct ioc *ioc)
1755 panic(
PFX "Couldn't allocate struct pci_dev");
1757 controller = kzalloc(
sizeof(*controller),
GFP_KERNEL);
1759 panic(
PFX "Couldn't allocate struct pci_controller");
1761 controller->
iommu = ioc;
1771 ioc_zx1_init(
struct ioc *ioc)
1773 unsigned long rope_config;
1776 if (ioc->
rev < 0x20)
1777 panic(
PFX "IOC 2.0 or later required for IOMMU support\n");
1788 for (i=0; i<(8*8); i+=8) {
1810 static struct ioc *
__init
1811 ioc_init(
unsigned long hpa,
void *
handle)
1820 ioc->
next = ioc_list;
1830 for (info = ioc_iommu_info; info < ioc_iommu_info +
ARRAY_SIZE(ioc_iommu_info); info++) {
1838 iovp_size = (1 << iovp_shift);
1839 iovp_mask = ~(iovp_size - 1);
1841 DBG_INIT(
"%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__,
1847 sprintf((
char *) ioc->
name,
"Unknown (%04x:%04x)",
1850 ioc->
name =
"Unknown";
1854 ioc_resource_init(ioc);
1858 ia64_max_iommu_merge_mask = ~iovp_mask;
1861 "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
1862 ioc->
name, (ioc->
rev >> 4) & 0xF, ioc->
rev & 0xF,
1879 #ifdef CONFIG_PROC_FS
1886 for (ioc = ioc_list; ioc; ioc = ioc->
next)
1894 ioc_next(
struct seq_file *s,
void *
v, loff_t *pos)
1896 struct ioc *ioc =
v;
1910 struct ioc *ioc =
v;
1911 unsigned long *res_ptr = (
unsigned long *)ioc->
res_map;
1914 seq_printf(s,
"Hewlett Packard %s IOC rev %d.%d\n",
1915 ioc->
name, ((ioc->
rev >> 4) & 0xF), (ioc->
rev & 0xF));
1918 seq_printf(s,
"NUMA node : %d\n", ioc->node);
1921 seq_printf(s,
"IOVA page size : %ld kb\n", iovp_size/1024);
1923 for (i = 0; i < (ioc->
res_size /
sizeof(
unsigned long)); ++
i, ++res_ptr)
1929 #ifdef PDIR_SEARCH_TIMING
1932 min = max = ioc->avg_search[0];
1933 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1934 avg += ioc->avg_search[
i];
1935 if (ioc->avg_search[i] > max) max = ioc->avg_search[
i];
1936 if (ioc->avg_search[i] <
min)
min = ioc->avg_search[
i];
1938 avg /= SBA_SEARCH_SAMPLE;
1939 seq_printf(s,
"Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
1943 #ifndef ALLOW_IOV_BYPASS
1959 return seq_open(file, &ioc_seq_ops);
1978 proc_create(ioc_list->
name, 0, dir, &ioc_fops);
2005 for (ioc = ioc_list; ioc; ioc = ioc->
next)
2006 if (ioc->
handle == handle) {
2041 #define sba_map_ioc_to_node(ioc, handle)
2045 acpi_sba_ioc_add(
struct acpi_device *device)
2079 ioc = ioc_init(hpa, device->handle);
2093 static struct acpi_driver acpi_sba_ioc_driver = {
2094 .name =
"IOC IOMMU Driver",
2095 .ids = hp_ioc_iommu_device_ids,
2097 .add = acpi_sba_ioc_add,
2109 #if defined(CONFIG_IA64_GENERIC)
2115 if (is_kdump_kernel()) {
2118 panic(
"Unable to initialize software I/O TLB:"
2119 " Try machvec=dig boot option");
2127 #ifdef CONFIG_IA64_GENERIC
2134 panic(
"Unable to find SBA IOMMU or initialize "
2135 "software I/O TLB: Try machvec=dig boot option");
2138 panic(
"Unable to find SBA IOMMU: Try a generic or DIG kernel");
2143 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB)
2164 #ifdef CONFIG_PROC_FS
2173 nosbagart(
char *
str)
2175 reserve_sba_gart = 0;
2179 static int sba_dma_supported (
struct device *dev,
u64 mask)
2182 return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
2190 __setup(
"nosbagart", nosbagart);
2193 sba_page_override(
char *
str)
2198 switch (page_size) {
2203 iovp_shift =
ffs(page_size) - 1;
2206 printk(
"%s: unknown/unsupported iommu page size %ld\n",
2207 __func__, page_size);
2213 __setup(
"sbapagesize=",sba_page_override);
2216 .alloc = sba_alloc_coherent,
2217 .free = sba_free_coherent,
2218 .map_page = sba_map_page,
2219 .unmap_page = sba_unmap_page,
2220 .map_sg = sba_map_sg_attrs,
2221 .unmap_sg = sba_unmap_sg_attrs,
2226 .dma_supported = sba_dma_supported,
2227 .mapping_error = sba_dma_mapping_error,