12 #include <linux/stddef.h>
13 #include <linux/kernel.h>
19 #include <asm/byteorder.h>
27 #define MV64X60_IC_MAIN_CAUSE_LO 0x0004
28 #define MV64X60_IC_MAIN_CAUSE_HI 0x000c
29 #define MV64X60_IC_CPU0_INTR_MASK_LO 0x0014
30 #define MV64X60_IC_CPU0_INTR_MASK_HI 0x001c
31 #define MV64X60_IC_CPU0_SELECT_CAUSE 0x0024
33 #define MV64X60_HIGH_GPP_GROUPS 0x0f000000
34 #define MV64X60_SELECT_CAUSE_HIGH 0x40000000
37 #define MV64x60_GPP_INTR_CAUSE 0x0008
38 #define MV64x60_GPP_INTR_MASK 0x000c
40 #define MV64x60_LEVEL1_LOW 0
41 #define MV64x60_LEVEL1_HIGH 1
42 #define MV64x60_LEVEL1_GPP 2
44 #define MV64x60_LEVEL1_MASK 0x00000060
45 #define MV64x60_LEVEL1_OFFSET 5
47 #define MV64x60_LEVEL2_MASK 0x0000001f
49 #define MV64x60_NUM_IRQS 96
53 static void __iomem *mv64x60_irq_reg_base;
54 static void __iomem *mv64x60_gpp_reg_base;
69 static u32 mv64x60_cached_low_mask;
71 static u32 mv64x60_cached_gpp_mask;
79 static void mv64x60_mask_low(
struct irq_data *
d)
85 mv64x60_cached_low_mask &= ~(1 << level2);
87 mv64x60_cached_low_mask);
88 spin_unlock_irqrestore(&mv64x60_lock, flags);
92 static void mv64x60_unmask_low(
struct irq_data *d)
98 mv64x60_cached_low_mask |= 1 << level2;
100 mv64x60_cached_low_mask);
101 spin_unlock_irqrestore(&mv64x60_lock, flags);
105 static struct irq_chip mv64x60_chip_low = {
106 .name =
"mv64x60_low",
107 .irq_mask = mv64x60_mask_low,
108 .irq_mask_ack = mv64x60_mask_low,
109 .irq_unmask = mv64x60_unmask_low,
116 static void mv64x60_mask_high(
struct irq_data *d)
122 mv64x60_cached_high_mask &= ~(1 << level2);
124 mv64x60_cached_high_mask);
125 spin_unlock_irqrestore(&mv64x60_lock, flags);
129 static void mv64x60_unmask_high(
struct irq_data *d)
135 mv64x60_cached_high_mask |= 1 << level2;
137 mv64x60_cached_high_mask);
138 spin_unlock_irqrestore(&mv64x60_lock, flags);
142 static struct irq_chip mv64x60_chip_high = {
143 .name =
"mv64x60_high",
144 .irq_mask = mv64x60_mask_high,
145 .irq_mask_ack = mv64x60_mask_high,
146 .irq_unmask = mv64x60_unmask_high,
153 static void mv64x60_mask_gpp(
struct irq_data *d)
159 mv64x60_cached_gpp_mask &= ~(1 << level2);
161 mv64x60_cached_gpp_mask);
162 spin_unlock_irqrestore(&mv64x60_lock, flags);
166 static void mv64x60_mask_ack_gpp(
struct irq_data *d)
172 mv64x60_cached_gpp_mask &= ~(1 << level2);
174 mv64x60_cached_gpp_mask);
177 spin_unlock_irqrestore(&mv64x60_lock, flags);
181 static void mv64x60_unmask_gpp(
struct irq_data *d)
187 mv64x60_cached_gpp_mask |= 1 << level2;
189 mv64x60_cached_gpp_mask);
190 spin_unlock_irqrestore(&mv64x60_lock, flags);
194 static struct irq_chip mv64x60_chip_gpp = {
195 .name =
"mv64x60_gpp",
196 .irq_mask = mv64x60_mask_gpp,
197 .irq_mask_ack = mv64x60_mask_ack_gpp,
198 .irq_unmask = mv64x60_unmask_gpp,
205 static struct irq_chip *mv64x60_chips[] = {
211 static int mv64x60_host_map(
struct irq_domain *
h,
unsigned int virq,
220 irq_set_chip_and_handler(virq, mv64x60_chips[level1],
227 .map = mv64x60_host_map,
239 const unsigned int *
reg;
245 mv64x60_gpp_reg_base =
ioremap(paddr, reg[1]);
251 mv64x60_irq_reg_base =
ioremap(paddr, reg[1]);
254 &mv64x60_host_ops,
NULL);
258 mv64x60_cached_gpp_mask);
260 mv64x60_cached_low_mask);
262 mv64x60_cached_high_mask);
267 spin_unlock_irqrestore(&mv64x60_lock, flags);
279 cause &= mv64x60_cached_high_mask;
282 cause =
in_le32(mv64x60_gpp_reg_base +
284 cause &= mv64x60_cached_gpp_mask;
288 cause &= mv64x60_cached_low_mask;