Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
irq.c
Go to the documentation of this file.
1 /*
2  * linux/arch/arm/mach-at91/irq.c
3  *
4  * Copyright (C) 2004 SAN People
5  * Copyright (C) 2004 ATMEL
6  * Copyright (C) Rick Bronson
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  */
22 
23 #include <linux/init.h>
24 #include <linux/module.h>
25 #include <linux/mm.h>
26 #include <linux/bitmap.h>
27 #include <linux/types.h>
28 #include <linux/irq.h>
29 #include <linux/of.h>
30 #include <linux/of_address.h>
31 #include <linux/of_irq.h>
32 #include <linux/irqdomain.h>
33 #include <linux/err.h>
34 #include <linux/slab.h>
35 
36 #include <mach/hardware.h>
37 #include <asm/irq.h>
38 #include <asm/setup.h>
39 
40 #include <asm/exception.h>
41 #include <asm/mach/arch.h>
42 #include <asm/mach/irq.h>
43 #include <asm/mach/map.h>
44 
45 #include <mach/at91_aic.h>
46 
48 static struct irq_domain *at91_aic_domain;
49 static struct device_node *at91_aic_np;
50 static unsigned int n_irqs = NR_AIC_IRQS;
51 static unsigned long at91_aic_caps = 0;
52 
53 /* AIC5 introduces a Source Select Register */
54 #define AT91_AIC_CAP_AIC5 (1 << 0)
55 #define has_aic5() (at91_aic_caps & AT91_AIC_CAP_AIC5)
56 
57 #ifdef CONFIG_PM
58 
59 static unsigned long *wakeups;
60 static unsigned long *backups;
61 
62 #define set_backup(bit) set_bit(bit, backups)
63 #define clear_backup(bit) clear_bit(bit, backups)
64 
65 static int at91_aic_pm_init(void)
66 {
67  backups = kzalloc(BITS_TO_LONGS(n_irqs) * sizeof(*backups), GFP_KERNEL);
68  if (!backups)
69  return -ENOMEM;
70 
71  wakeups = kzalloc(BITS_TO_LONGS(n_irqs) * sizeof(*backups), GFP_KERNEL);
72  if (!wakeups) {
73  kfree(backups);
74  return -ENOMEM;
75  }
76 
77  return 0;
78 }
79 
80 static int at91_aic_set_wake(struct irq_data *d, unsigned value)
81 {
82  if (unlikely(d->hwirq >= n_irqs))
83  return -EINVAL;
84 
85  if (value)
86  set_bit(d->hwirq, wakeups);
87  else
88  clear_bit(d->hwirq, wakeups);
89 
90  return 0;
91 }
92 
93 void at91_irq_suspend(void)
94 {
95  int i = 0, bit;
96 
97  if (has_aic5()) {
98  /* disable enabled irqs */
99  while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) {
103  i = bit;
104  }
105  /* enable wakeup irqs */
106  i = 0;
107  while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) {
111  i = bit;
112  }
113  } else {
114  at91_aic_write(AT91_AIC_IDCR, *backups);
115  at91_aic_write(AT91_AIC_IECR, *wakeups);
116  }
117 }
118 
119 void at91_irq_resume(void)
120 {
121  int i = 0, bit;
122 
123  if (has_aic5()) {
124  /* disable wakeup irqs */
125  while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) {
129  i = bit;
130  }
131  /* enable irqs disabled for suspend */
132  i = 0;
133  while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) {
137  i = bit;
138  }
139  } else {
140  at91_aic_write(AT91_AIC_IDCR, *wakeups);
141  at91_aic_write(AT91_AIC_IECR, *backups);
142  }
143 }
144 
145 #else
146 static inline int at91_aic_pm_init(void)
147 {
148  return 0;
149 }
150 
151 #define set_backup(bit)
152 #define clear_backup(bit)
153 #define at91_aic_set_wake NULL
154 
155 #endif /* CONFIG_PM */
156 
159 {
160  u32 irqnr;
161  u32 irqstat;
162 
163  irqnr = at91_aic_read(AT91_AIC_IVR);
164  irqstat = at91_aic_read(AT91_AIC_ISR);
165 
166  /*
167  * ISR value is 0 when there is no current interrupt or when there is
168  * a spurious interrupt
169  */
170  if (!irqstat)
172  else
173  handle_IRQ(irqnr, regs);
174 }
175 
178 {
179  u32 irqnr;
180  u32 irqstat;
181 
182  irqnr = at91_aic_read(AT91_AIC5_IVR);
183  irqstat = at91_aic_read(AT91_AIC5_ISR);
184 
185  if (!irqstat)
187  else
188  handle_IRQ(irqnr, regs);
189 }
190 
191 static void at91_aic_mask_irq(struct irq_data *d)
192 {
193  /* Disable interrupt on AIC */
195  /* Update ISR cache */
196  clear_backup(d->hwirq);
197 }
198 
199 static void __maybe_unused at91_aic5_mask_irq(struct irq_data *d)
200 {
201  /* Disable interrupt on AIC5 */
204  /* Update ISR cache */
205  clear_backup(d->hwirq);
206 }
207 
208 static void at91_aic_unmask_irq(struct irq_data *d)
209 {
210  /* Enable interrupt on AIC */
212  /* Update ISR cache */
213  set_backup(d->hwirq);
214 }
215 
216 static void __maybe_unused at91_aic5_unmask_irq(struct irq_data *d)
217 {
218  /* Enable interrupt on AIC5 */
221  /* Update ISR cache */
222  set_backup(d->hwirq);
223 }
224 
225 static void at91_aic_eoi(struct irq_data *d)
226 {
227  /*
228  * Mark end-of-interrupt on AIC, the controller doesn't care about
229  * the value written. Moreover it's a write-only register.
230  */
232 }
233 
234 static void __maybe_unused at91_aic5_eoi(struct irq_data *d)
235 {
237 }
238 
239 unsigned long *at91_extern_irq;
240 
241 #define is_extern_irq(hwirq) test_bit(hwirq, at91_extern_irq)
242 
243 static int at91_aic_compute_srctype(struct irq_data *d, unsigned type)
244 {
245  int srctype;
246 
247  switch (type) {
248  case IRQ_TYPE_LEVEL_HIGH:
249  srctype = AT91_AIC_SRCTYPE_HIGH;
250  break;
252  srctype = AT91_AIC_SRCTYPE_RISING;
253  break;
254  case IRQ_TYPE_LEVEL_LOW:
255  if ((d->hwirq == AT91_ID_FIQ) || is_extern_irq(d->hwirq)) /* only supported on external interrupts */
256  srctype = AT91_AIC_SRCTYPE_LOW;
257  else
258  srctype = -EINVAL;
259  break;
261  if ((d->hwirq == AT91_ID_FIQ) || is_extern_irq(d->hwirq)) /* only supported on external interrupts */
262  srctype = AT91_AIC_SRCTYPE_FALLING;
263  else
264  srctype = -EINVAL;
265  break;
266  default:
267  srctype = -EINVAL;
268  }
269 
270  return srctype;
271 }
272 
273 static int at91_aic_set_type(struct irq_data *d, unsigned type)
274 {
275  unsigned int smr;
276  int srctype;
277 
278  srctype = at91_aic_compute_srctype(d, type);
279  if (srctype < 0)
280  return srctype;
281 
282  if (has_aic5()) {
286  at91_aic_write(AT91_AIC5_SMR, smr | srctype);
287  } else {
288  smr = at91_aic_read(AT91_AIC_SMR(d->hwirq))
289  & ~AT91_AIC_SRCTYPE;
290  at91_aic_write(AT91_AIC_SMR(d->hwirq), smr | srctype);
291  }
292 
293  return 0;
294 }
295 
296 static struct irq_chip at91_aic_chip = {
297  .name = "AIC",
298  .irq_mask = at91_aic_mask_irq,
299  .irq_unmask = at91_aic_unmask_irq,
300  .irq_set_type = at91_aic_set_type,
301  .irq_set_wake = at91_aic_set_wake,
302  .irq_eoi = at91_aic_eoi,
303 };
304 
305 static void __init at91_aic_hw_init(unsigned int spu_vector)
306 {
307  int i;
308 
309  /*
310  * Perform 8 End Of Interrupt Command to make sure AIC
311  * will not Lock out nIRQ
312  */
313  for (i = 0; i < 8; i++)
315 
316  /*
317  * Spurious Interrupt ID in Spurious Vector Register.
318  * When there is no current interrupt, the IRQ Vector Register
319  * reads the value stored in AIC_SPU
320  */
321  at91_aic_write(AT91_AIC_SPU, spu_vector);
322 
323  /* No debugging in AIC: Debug (Protect) Control Register */
325 
326  /* Disable and clear all interrupts initially */
327  at91_aic_write(AT91_AIC_IDCR, 0xFFFFFFFF);
328  at91_aic_write(AT91_AIC_ICCR, 0xFFFFFFFF);
329 }
330 
331 static void __init __maybe_unused at91_aic5_hw_init(unsigned int spu_vector)
332 {
333  int i;
334 
335  /*
336  * Perform 8 End Of Interrupt Command to make sure AIC
337  * will not Lock out nIRQ
338  */
339  for (i = 0; i < 8; i++)
341 
342  /*
343  * Spurious Interrupt ID in Spurious Vector Register.
344  * When there is no current interrupt, the IRQ Vector Register
345  * reads the value stored in AIC_SPU
346  */
347  at91_aic_write(AT91_AIC5_SPU, spu_vector);
348 
349  /* No debugging in AIC: Debug (Protect) Control Register */
351 
352  /* Disable and clear all interrupts initially */
353  for (i = 0; i < n_irqs; i++) {
357  }
358 }
359 
360 #if defined(CONFIG_OF)
361 static unsigned int *at91_aic_irq_priorities;
362 
363 static int at91_aic_irq_map(struct irq_domain *h, unsigned int virq,
365 {
366  /* Put virq number in Source Vector Register */
367  at91_aic_write(AT91_AIC_SVR(hw), virq);
368 
369  /* Active Low interrupt, with priority */
371  AT91_AIC_SRCTYPE_LOW | at91_aic_irq_priorities[hw]);
372 
373  irq_set_chip_and_handler(virq, &at91_aic_chip, handle_fasteoi_irq);
375 
376  return 0;
377 }
378 
379 static int at91_aic5_irq_map(struct irq_domain *h, unsigned int virq,
380  irq_hw_number_t hw)
381 {
383 
384  /* Put virq number in Source Vector Register */
386 
387  /* Active Low interrupt, with priority */
389  AT91_AIC_SRCTYPE_LOW | at91_aic_irq_priorities[hw]);
390 
391  irq_set_chip_and_handler(virq, &at91_aic_chip, handle_fasteoi_irq);
393 
394  return 0;
395 }
396 
397 static int at91_aic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
398  const u32 *intspec, unsigned int intsize,
399  irq_hw_number_t *out_hwirq, unsigned int *out_type)
400 {
401  if (WARN_ON(intsize < 3))
402  return -EINVAL;
403  if (WARN_ON(intspec[0] >= n_irqs))
404  return -EINVAL;
405  if (WARN_ON((intspec[2] < AT91_AIC_IRQ_MIN_PRIORITY)
406  || (intspec[2] > AT91_AIC_IRQ_MAX_PRIORITY)))
407  return -EINVAL;
408 
409  *out_hwirq = intspec[0];
410  *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
411  at91_aic_irq_priorities[*out_hwirq] = intspec[2];
412 
413  return 0;
414 }
415 
416 static struct irq_domain_ops at91_aic_irq_ops = {
417  .map = at91_aic_irq_map,
418  .xlate = at91_aic_irq_domain_xlate,
419 };
420 
421 int __init at91_aic_of_common_init(struct device_node *node,
422  struct device_node *parent)
423 {
424  struct property *prop;
425  const __be32 *p;
426  u32 val;
427 
428  at91_extern_irq = kzalloc(BITS_TO_LONGS(n_irqs)
429  * sizeof(*at91_extern_irq), GFP_KERNEL);
430  if (!at91_extern_irq)
431  return -ENOMEM;
432 
433  if (at91_aic_pm_init()) {
434  kfree(at91_extern_irq);
435  return -ENOMEM;
436  }
437 
438  at91_aic_irq_priorities = kzalloc(n_irqs
439  * sizeof(*at91_aic_irq_priorities),
440  GFP_KERNEL);
441  if (!at91_aic_irq_priorities)
442  return -ENOMEM;
443 
444  at91_aic_base = of_iomap(node, 0);
445  at91_aic_np = node;
446 
447  at91_aic_domain = irq_domain_add_linear(at91_aic_np, n_irqs,
448  &at91_aic_irq_ops, NULL);
449  if (!at91_aic_domain)
450  panic("Unable to add AIC irq domain (DT)\n");
451 
452  of_property_for_each_u32(node, "atmel,external-irqs", prop, p, val) {
453  if (val >= n_irqs)
454  pr_warn("AIC: external irq %d >= %d skip it\n",
455  val, n_irqs);
456  else
457  set_bit(val, at91_extern_irq);
458  }
459 
460  irq_set_default_host(at91_aic_domain);
461 
462  return 0;
463 }
464 
465 int __init at91_aic_of_init(struct device_node *node,
466  struct device_node *parent)
467 {
468  int err;
469 
470  err = at91_aic_of_common_init(node, parent);
471  if (err)
472  return err;
473 
474  at91_aic_hw_init(n_irqs);
475 
476  return 0;
477 }
478 
479 int __init at91_aic5_of_init(struct device_node *node,
480  struct device_node *parent)
481 {
482  int err;
483 
484  at91_aic_caps |= AT91_AIC_CAP_AIC5;
485  n_irqs = NR_AIC5_IRQS;
486  at91_aic_chip.irq_ack = at91_aic5_mask_irq;
487  at91_aic_chip.irq_mask = at91_aic5_mask_irq;
488  at91_aic_chip.irq_unmask = at91_aic5_unmask_irq;
489  at91_aic_chip.irq_eoi = at91_aic5_eoi;
490  at91_aic_irq_ops.map = at91_aic5_irq_map;
491 
492  err = at91_aic_of_common_init(node, parent);
493  if (err)
494  return err;
495 
496  at91_aic5_hw_init(n_irqs);
497 
498  return 0;
499 }
500 #endif
501 
502 /*
503  * Initialize the AIC interrupt controller.
504  */
505 void __init at91_aic_init(unsigned int *priority, unsigned int ext_irq_mask)
506 {
507  unsigned int i;
508  int irq_base;
509 
510  at91_extern_irq = kzalloc(BITS_TO_LONGS(n_irqs)
511  * sizeof(*at91_extern_irq), GFP_KERNEL);
512 
513  if (at91_aic_pm_init() || at91_extern_irq == NULL)
514  panic("Unable to allocate bit maps\n");
515 
516  *at91_extern_irq = ext_irq_mask;
517 
519  if (!at91_aic_base)
520  panic("Unable to ioremap AIC registers\n");
521 
522  /* Add irq domain for AIC */
523  irq_base = irq_alloc_descs(-1, 0, n_irqs, 0);
524  if (irq_base < 0) {
525  WARN(1, "Cannot allocate irq_descs, assuming pre-allocated\n");
526  irq_base = 0;
527  }
528  at91_aic_domain = irq_domain_add_legacy(at91_aic_np, n_irqs,
529  irq_base, 0,
531 
532  if (!at91_aic_domain)
533  panic("Unable to add AIC irq domain\n");
534 
535  irq_set_default_host(at91_aic_domain);
536 
537  /*
538  * The IVR is used by macro get_irqnr_and_base to read and verify.
539  * The irq number is NR_AIC_IRQS when a spurious interrupt has occurred.
540  */
541  for (i = 0; i < n_irqs; i++) {
542  /* Put hardware irq number in Source Vector Register: */
544  /* Active Low interrupt, with the specified priority */
546  irq_set_chip_and_handler(NR_IRQS_LEGACY + i, &at91_aic_chip, handle_fasteoi_irq);
548  }
549 
550  at91_aic_hw_init(n_irqs);
551 }