9 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/pci.h>
14 #include <asm/sn/addrs.h>
15 #include <asm/sn/io.h>
64 mmr_base = kern->
ce_common->ce_pcibus.bs_base;
65 mmr_offset = (
unsigned long)mmr_addr - mmr_base;
67 if (mmr_offset < 0x45000) {
70 if (mmr_offset == 0 || mmr_offset == 0x80)
71 mmr_war_offset = 0xc0;
72 else if (mmr_offset == 0x148 || mmr_offset == 0x200)
73 mmr_war_offset = 0x28;
75 mmr_war_offset = 0x158;
90 mmr_base = kern->
ce_common->ce_pcibus.bs_base;
91 mmr_offset = (
unsigned long)mmr_addr - mmr_base;
93 if (mmr_offset < 0x45000) {
94 if (mmr_offset == 0x100)
101 #define tioce_mmr_load(kern, mmrp, varp) do {\
102 tioce_mmr_war_pre(kern, mmrp); \
103 *(varp) = readq_relaxed(mmrp); \
104 tioce_mmr_war_post(kern, mmrp); \
108 #define tioce_mmr_store(kern, mmrp, varp) do {\
109 tioce_mmr_war_pre(kern, mmrp); \
110 writeq(*varp, mmrp); \
111 tioce_mmr_war_post(kern, mmrp); \
115 #define tioce_mmr_storei(kern, mmrp, val) do {\
116 tioce_mmr_war_pre(kern, mmrp); \
118 tioce_mmr_war_post(kern, mmrp); \
122 #define tioce_mmr_seti(kern, mmrp, bits) do {\
124 tioce_mmr_load(kern, mmrp, &tmp); \
126 tioce_mmr_store(kern, mmrp, &tmp); \
130 #define tioce_mmr_clri(kern, mmrp, bits) do { \
132 tioce_mmr_load(kern, mmrp, &tmp); \
134 tioce_mmr_store(kern, mmrp, &tmp); \
141 #define TIOCE_D64_MIN 0x8000000000000000UL
142 #define TIOCE_D64_MAX 0xffffffffffffffffUL
143 #define TIOCE_D64_ADDR(a) ((a) >= TIOCE_D64_MIN)
145 #define TIOCE_D32_MIN 0x0000000080000000UL
146 #define TIOCE_D32_MAX 0x00000000ffffffffUL
147 #define TIOCE_D32_ADDR(a) ((a) >= TIOCE_D32_MIN && (a) <= TIOCE_D32_MAX)
149 #define TIOCE_M32_MIN 0x0000000000000000UL
150 #define TIOCE_M32_MAX 0x000000007fffffffUL
151 #define TIOCE_M32_ADDR(a) ((a) >= TIOCE_M32_MIN && (a) <= TIOCE_M32_MAX)
153 #define TIOCE_M40_MIN 0x0000004000000000UL
154 #define TIOCE_M40_MAX 0x0000007fffffffffUL
155 #define TIOCE_M40_ADDR(a) ((a) >= TIOCE_M40_MIN && (a) <= TIOCE_M40_MAX)
157 #define TIOCE_M40S_MIN 0x0000008000000000UL
158 #define TIOCE_M40S_MAX 0x000000ffffffffffUL
159 #define TIOCE_M40S_ADDR(a) ((a) >= TIOCE_M40S_MIN && (a) <= TIOCE_M40S_MAX)
165 #define ATE_PAGESHIFT(ps) (__ffs(ps))
166 #define ATE_PAGEMASK(ps) ((ps)-1)
168 #define ATE_PAGE(x, ps) ((x) >> ATE_PAGESHIFT(ps))
169 #define ATE_NPAGES(start, len, pagesize) \
170 (ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1)
172 #define ATE_VALID(ate) ((ate) & (1UL << 63))
173 #define ATE_MAKE(addr, ps, msi) \
174 (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63) | ((msi)?(1UL << 62):0))
180 #define TIOCE_ATE_M32 1
181 #define TIOCE_ATE_M40 2
182 #define TIOCE_ATE_M40S 3
184 #define KB(x) ((u64)(x) << 10)
185 #define MB(x) ((u64)(x) << 20)
186 #define GB(x) ((u64)(x) << 30)
204 tioce_dma_d64(
unsigned long ct_addr,
int dma_flags)
208 bus_addr = ct_addr | (1
UL << 63);
210 bus_addr |= (1
UL << 61);
267 u64 ct_addr,
int len,
int dma_flags)
276 int msi_capable, msi_wanted;
327 if (msi_wanted && !msi_capable)
334 last = first + entries - nates;
335 for (i = first; i <= last; i++) {
339 for (j = i; j < i + nates; j++)
355 for (j = 0; j < nates; j++) {
358 ate =
ATE_MAKE(addr, pagesize, msi_wanted);
359 ate_shadow[i +
j] = ate;
365 map->
nbytes = nates * pagesize;
367 map->
pci_start = bus_base + (i * pagesize);
385 tioce_dma_d32(
struct pci_dev *pdev,
u64 ct_addr,
int dma_flags)
395 if (dma_flags & SN_DMA_MSI)
398 ct_upper = ct_addr & ~0x3fffffff
UL;
399 ct_lower = ct_addr & 0x3fffffff
UL;
401 pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port);
403 if (ce_kern->
ce_port[port].dirmap_refcnt == 0) {
412 dma_ok = (ce_kern->
ce_port[
port].dirmap_shadow == ct_upper);
432 tioce_dma_barrier(
u64 bus_addr,
int on)
441 barrier_bit = (1UL << 62);
443 barrier_bit = (1UL << 30);
445 return (on) ? (bus_addr | barrier_bit) : (bus_addr & ~barrier_bit);
464 struct tioce __iomem *ce_mmr;
467 bus_addr = tioce_dma_barrier(bus_addr, 0);
468 pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port);
478 if (--ce_kern->
ce_port[port].dirmap_refcnt == 0) {
491 if (bus_addr >= map->
pci_start && bus_addr <= last)
497 "%s: %s - no map found for bus_addr 0x%llx\n",
498 __func__, pci_name(pdev), bus_addr);
499 }
else if (--map->
refcnt == 0) {
510 spin_unlock_irqrestore(&ce_kern->
ce_lock, flags);
537 if (dma_mask < 0x7fffffffUL)
548 if (dma_mask == ~0UL) {
549 mapaddr = tioce_dma_d64(ct_addr, dma_flags);
554 pcidev_to_tioce(pdev,
NULL, &ce_kern, &port);
569 ct_addr + byte_count - 1 <= last &&
583 if (!mapaddr && !barrier && dma_mask >= 0xffffffffffUL) {
592 if (byte_count >
MB(64)) {
594 port, ct_addr, byte_count,
608 port, ct_addr, byte_count,
616 if (!mapaddr && dma_mask >= 0xffffffffUL)
617 mapaddr = tioce_dma_d32(pdev, ct_addr, dma_flags);
625 byte_count, dma_flags);
627 spin_unlock_irqrestore(&ce_kern->
ce_lock, flags);
630 if (mapaddr && barrier)
631 mapaddr = tioce_dma_barrier(mapaddr, 1);
646 tioce_dma(
struct pci_dev *pdev,
unsigned long paddr,
size_t byte_count,
int dma_flags)
648 return tioce_do_dma_map(pdev, paddr, byte_count, 0, dma_flags);
661 tioce_dma_consistent(
struct pci_dev *pdev,
unsigned long paddr,
size_t byte_count,
int dma_flags)
663 return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags);
675 tioce_error_intr_handler(
int irq,
void *
arg)
684 soft->
ce_pcibus.bs_persist_busnum, 0, 0, 0, 0, 0);
687 panic(
"tioce_error_intr_handler: Fatal TIOCE error");
704 int ate_index, last_ate,
ps;
710 last_ate = ate_index +
ATE_NPAGES(base, limit-base+1, ps) - 1;
718 while (ate_index <= last_ate) {
761 seg = tioce_common->
ce_pcibus.bs_persist_segment;
762 bus = tioce_common->
ce_pcibus.bs_persist_busnum;
800 for (dev = 1; dev <= 2; dev++) {
807 base = (
u64)tmp << 16;
811 limit = (
u64)tmp << 16;
815 tioce_reserve_m32(tioce_kern, base, limit);
829 base |= (
u64)tmp << 32;
834 limit = ((
u64)tmp & PCI_PREF_RANGE_MASK) << 16;
839 limit |= (
u64)tmp << 32;
842 tioce_reserve_m32(tioce_kern, base, limit);
860 struct pcidev_info *pcidev_info;
861 struct tioce_common *ce_common;
872 pcidev_info = (
struct pcidev_info *)sn_irq_info->
irq_pciioinfo;
889 if (status & int_bit_mask) {
890 u64 force_irq = (1 << 8) | sn_irq_info->
irq_irq;
953 tioce_target_interrupt(
struct sn_irq_info *sn_irq_info)
955 struct pcidev_info *pcidev_info;
956 struct tioce_common *ce_common;
962 pcidev_info = (
struct pcidev_info *)sn_irq_info->
irq_pciioinfo;
978 tioce_force_interrupt(sn_irq_info);
995 struct tioce_common *tioce_common;
1003 tioce_common = kzalloc(
sizeof(
struct tioce_common),
GFP_KERNEL);
1007 memcpy(tioce_common, prom_bussoft,
sizeof(
struct tioce_common));
1010 sizeof(
struct tioce_common));
1012 tioce_kern = tioce_kern_init(tioce_common);
1013 if (tioce_kern ==
NULL) {
1014 kfree(tioce_common);
1030 tioce_error_intr_handler,
1031 IRQF_SHARED,
"TIOCE error", (
void *)tioce_common))
1033 "%s: Unable to get irq %d. "
1034 "Error interrupts won't be routed for "
1035 "TIOCE bus %04x:%02x\n",
1037 tioce_common->
ce_pcibus.bs_persist_segment,
1038 tioce_common->
ce_pcibus.bs_persist_busnum);
1042 return tioce_common;
1046 .dma_map = tioce_dma,
1047 .dma_map_consistent = tioce_dma_consistent,
1049 .bus_fixup = tioce_bus_fixup,
1050 .force_interrupt = tioce_force_interrupt,
1051 .target_interrupt = tioce_target_interrupt