15 #include <linux/kernel.h>
16 #include <linux/types.h>
18 #include <linux/sched.h>
19 #include <linux/pci.h>
21 #include <linux/bitops.h>
23 #include <asm/ptrace.h>
26 #include <asm/mmu_context.h>
28 #include <asm/pgtable.h>
31 #include <asm/tlbflush.h>
47 static unsigned long titan_cpu_irq_affinity[4] = { ~0
UL, ~0
UL, ~0
UL, ~0
UL };
52 static unsigned long titan_cached_irq_mask;
60 titan_update_irq_hw(
unsigned long mask)
63 unsigned long isa_enable = 1
UL << 55;
68 volatile unsigned long *dim0, *dim1, *dim2, *dim3;
69 unsigned long mask0, mask1, mask2, mask3,
dummy;
71 cpumask_copy(&cpm, cpu_present_mask);
73 mask0 = mask & titan_cpu_irq_affinity[0];
74 mask1 = mask & titan_cpu_irq_affinity[1];
75 mask2 = mask & titan_cpu_irq_affinity[2];
76 mask3 = mask & titan_cpu_irq_affinity[3];
78 if (bcpu == 0) mask0 |= isa_enable;
79 else if (bcpu == 1) mask1 |= isa_enable;
80 else if (bcpu == 2) mask2 |= isa_enable;
81 else mask3 |= isa_enable;
83 dim0 = &cchip->
dim0.csr;
84 dim1 = &cchip->
dim1.csr;
85 dim2 = &cchip->
dim2.csr;
86 dim3 = &cchip->
dim3.csr;
102 volatile unsigned long *dimB;
103 dimB = &cchip->
dim0.csr;
104 if (bcpu == 1) dimB = &cchip->
dim1.csr;
105 else if (bcpu == 2) dimB = &cchip->
dim2.csr;
106 else if (bcpu == 3) dimB = &cchip->
dim3.csr;
108 *dimB = mask | isa_enable;
117 unsigned int irq = d->
irq;
118 spin_lock(&titan_irq_lock);
119 titan_cached_irq_mask |= 1
UL << (irq - 16);
120 titan_update_irq_hw(titan_cached_irq_mask);
121 spin_unlock(&titan_irq_lock);
127 unsigned int irq = d->
irq;
128 spin_lock(&titan_irq_lock);
129 titan_cached_irq_mask &= ~(1
UL << (irq - 16));
130 titan_update_irq_hw(titan_cached_irq_mask);
131 spin_unlock(&titan_irq_lock);
139 for (cpu = 0; cpu < 4; cpu++) {
141 titan_cpu_irq_affinity[
cpu] |= 1
UL << irq;
143 titan_cpu_irq_affinity[
cpu] &= ~(1
UL << irq);
152 unsigned int irq = d->
irq;
153 spin_lock(&titan_irq_lock);
154 titan_cpu_set_irq_affinity(irq - 16, *affinity);
155 titan_update_irq_hw(titan_cached_irq_mask);
156 spin_unlock(&titan_irq_lock);
162 titan_device_interrupt(
unsigned long vector)
164 printk(
"titan_device_interrupt: NOT IMPLEMENTED YET!!\n");
168 titan_srm_device_interrupt(
unsigned long vector)
172 irq = (vector - 0x800) >> 4;
178 init_titan_irqs(
struct irq_chip * ops,
int imin,
int imax)
181 for (i = imin; i <= imax; ++
i) {
187 static struct irq_chip titan_irq_type = {
189 .irq_unmask = titan_enable_irq,
190 .irq_mask = titan_disable_irq,
191 .irq_mask_ack = titan_disable_irq,
192 .irq_set_affinity = titan_set_irq_affinity,
196 titan_intr_nop(
int irq,
void *
dev_id)
208 if (alpha_using_srm && !alpha_mv.device_interrupt)
209 alpha_mv.device_interrupt = titan_srm_device_interrupt;
210 if (!alpha_mv.device_interrupt)
211 alpha_mv.device_interrupt = titan_device_interrupt;
213 titan_update_irq_hw(0);
215 init_titan_irqs(&titan_irq_type, 16, 63 + 16);
219 titan_legacy_init_irq(
void)
251 vector = 0x900 + (vector << 4);
254 alpha_mv.device_interrupt(vector);
264 unsigned long irqflags,
const char *devname,
268 err =
request_irq(irq, handler, irqflags, devname, dev_id);
270 printk(
"titan_request_irq for IRQ %d returned %d; ignoring\n",
276 titan_late_init(
void)
284 "CChip Error",
NULL);
286 "PChip 0 H_Error",
NULL);
288 "PChip 1 H_Error",
NULL);
290 "PChip 0 C_Error",
NULL);
292 "PChip 1 C_Error",
NULL);
317 if ((irq & 0xF0) == 0xE0)
334 pci_set_flags(PCI_PROBE_ONLY);
337 locate_and_init_vga(
NULL);
345 privateer_init_pci(
void)
354 "Temperature Warning",
NULL);
359 return titan_init_pci();
367 .vector_name =
"TITAN",
383 .init_irq = titan_legacy_init_irq,
385 .init_pci = titan_init_pci,
388 .pci_map_irq = titan_map_irq,
393 struct alpha_machine_vector privateer_mv __initmv = {
394 .vector_name =
"PRIVATEER",
410 .init_irq = titan_legacy_init_irq,
412 .init_pci = privateer_init_pci,
415 .pci_map_irq = titan_map_irq,