Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
irq.c
Go to the documentation of this file.
1 /*
2  * linux/arch/arm/mach-mmp/irq.c
3  *
4  * Generic IRQ handling, GPIO IRQ demultiplexing, etc.
5  * Copyright (C) 2008 - 2012 Marvell Technology Group Ltd.
6  *
7  * Author: Bin Yang <[email protected]>
8  * Haojian Zhuang <[email protected]>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14 
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/irq.h>
18 #include <linux/irqdomain.h>
19 #include <linux/io.h>
20 #include <linux/ioport.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 
24 #include <mach/irqs.h>
25 
26 #ifdef CONFIG_CPU_MMP2
27 #include <mach/pm-mmp2.h>
28 #endif
29 #ifdef CONFIG_CPU_PXA910
30 #include <mach/pm-pxa910.h>
31 #endif
32 
33 #include "common.h"
34 
35 #define MAX_ICU_NR 16
36 
37 struct icu_chip_data {
38  int nr_irqs;
39  unsigned int virq_base;
40  unsigned int cascade_irq;
43  unsigned int conf_enable;
44  unsigned int conf_disable;
45  unsigned int conf_mask;
46  unsigned int clr_mfp_irq_base;
47  unsigned int clr_mfp_hwirq;
48  struct irq_domain *domain;
49 };
50 
51 struct mmp_intc_conf {
52  unsigned int conf_enable;
53  unsigned int conf_disable;
54  unsigned int conf_mask;
55 };
56 
58 static struct icu_chip_data icu_data[MAX_ICU_NR];
59 static int max_icu_nr;
60 
61 extern void mmp2_clear_pmic_int(void);
62 
63 static void icu_mask_ack_irq(struct irq_data *d)
64 {
65  struct irq_domain *domain = d->domain;
66  struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
67  int hwirq;
68  u32 r;
69 
70  hwirq = d->irq - data->virq_base;
71  if (data == &icu_data[0]) {
72  r = readl_relaxed(mmp_icu_base + (hwirq << 2));
73  r &= ~data->conf_mask;
74  r |= data->conf_disable;
75  writel_relaxed(r, mmp_icu_base + (hwirq << 2));
76  } else {
77 #ifdef CONFIG_CPU_MMP2
78  if ((data->virq_base == data->clr_mfp_irq_base)
79  && (hwirq == data->clr_mfp_hwirq))
81 #endif
82  r = readl_relaxed(data->reg_mask) | (1 << hwirq);
83  writel_relaxed(r, data->reg_mask);
84  }
85 }
86 
87 static void icu_mask_irq(struct irq_data *d)
88 {
89  struct irq_domain *domain = d->domain;
90  struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
91  int hwirq;
92  u32 r;
93 
94  hwirq = d->irq - data->virq_base;
95  if (data == &icu_data[0]) {
96  r = readl_relaxed(mmp_icu_base + (hwirq << 2));
97  r &= ~data->conf_mask;
98  r |= data->conf_disable;
99  writel_relaxed(r, mmp_icu_base + (hwirq << 2));
100  } else {
101  r = readl_relaxed(data->reg_mask) | (1 << hwirq);
102  writel_relaxed(r, data->reg_mask);
103  }
104 }
105 
106 static void icu_unmask_irq(struct irq_data *d)
107 {
108  struct irq_domain *domain = d->domain;
109  struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
110  int hwirq;
111  u32 r;
112 
113  hwirq = d->irq - data->virq_base;
114  if (data == &icu_data[0]) {
115  r = readl_relaxed(mmp_icu_base + (hwirq << 2));
116  r &= ~data->conf_mask;
117  r |= data->conf_enable;
118  writel_relaxed(r, mmp_icu_base + (hwirq << 2));
119  } else {
120  r = readl_relaxed(data->reg_mask) & ~(1 << hwirq);
121  writel_relaxed(r, data->reg_mask);
122  }
123 }
124 
125 static struct irq_chip icu_irq_chip = {
126  .name = "icu_irq",
127  .irq_mask = icu_mask_irq,
128  .irq_mask_ack = icu_mask_ack_irq,
129  .irq_unmask = icu_unmask_irq,
130 };
131 
132 static void icu_mux_irq_demux(unsigned int irq, struct irq_desc *desc)
133 {
134  struct irq_domain *domain;
135  struct icu_chip_data *data;
136  int i;
137  unsigned long mask, status, n;
138 
139  for (i = 1; i < max_icu_nr; i++) {
140  if (irq == icu_data[i].cascade_irq) {
141  domain = icu_data[i].domain;
142  data = (struct icu_chip_data *)domain->host_data;
143  break;
144  }
145  }
146  if (i >= max_icu_nr) {
147  pr_err("Spurious irq %d in MMP INTC\n", irq);
148  return;
149  }
150 
151  mask = readl_relaxed(data->reg_mask);
152  while (1) {
153  status = readl_relaxed(data->reg_status) & ~mask;
154  if (status == 0)
155  break;
156  for_each_set_bit(n, &status, BITS_PER_LONG) {
157  generic_handle_irq(icu_data[i].virq_base + n);
158  }
159  }
160 }
161 
162 static int mmp_irq_domain_map(struct irq_domain *d, unsigned int irq,
164 {
165  irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
167  return 0;
168 }
169 
170 static int mmp_irq_domain_xlate(struct irq_domain *d, struct device_node *node,
171  const u32 *intspec, unsigned int intsize,
172  unsigned long *out_hwirq,
173  unsigned int *out_type)
174 {
175  *out_hwirq = intspec[0];
176  return 0;
177 }
178 
180  .map = mmp_irq_domain_map,
181  .xlate = mmp_irq_domain_xlate,
182 };
183 
184 static struct mmp_intc_conf mmp_conf = {
185  .conf_enable = 0x51,
186  .conf_disable = 0x0,
187  .conf_mask = 0x7f,
188 };
189 
190 static struct mmp_intc_conf mmp2_conf = {
191  .conf_enable = 0x20,
192  .conf_disable = 0x0,
193  .conf_mask = 0x7f,
194 };
195 
196 /* MMP (ARMv5) */
198 {
199  int irq;
200 
201  max_icu_nr = 1;
202  mmp_icu_base = ioremap(0xd4282000, 0x1000);
203  icu_data[0].conf_enable = mmp_conf.conf_enable;
204  icu_data[0].conf_disable = mmp_conf.conf_disable;
205  icu_data[0].conf_mask = mmp_conf.conf_mask;
206  icu_data[0].nr_irqs = 64;
207  icu_data[0].virq_base = 0;
208  icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
210  &icu_data[0]);
211  for (irq = 0; irq < 64; irq++) {
212  icu_mask_irq(irq_get_irq_data(irq));
213  irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
215  }
216  irq_set_default_host(icu_data[0].domain);
217 #ifdef CONFIG_CPU_PXA910
218  icu_irq_chip.irq_set_wake = pxa910_set_wake;
219 #endif
220 }
221 
222 /* MMP2 (ARMv7) */
224 {
225  int irq;
226 
227  max_icu_nr = 8;
228  mmp_icu_base = ioremap(0xd4282000, 0x1000);
229  icu_data[0].conf_enable = mmp2_conf.conf_enable;
230  icu_data[0].conf_disable = mmp2_conf.conf_disable;
231  icu_data[0].conf_mask = mmp2_conf.conf_mask;
232  icu_data[0].nr_irqs = 64;
233  icu_data[0].virq_base = 0;
234  icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
236  &icu_data[0]);
237  icu_data[1].reg_status = mmp_icu_base + 0x150;
238  icu_data[1].reg_mask = mmp_icu_base + 0x168;
239  icu_data[1].clr_mfp_irq_base = IRQ_MMP2_PMIC_BASE;
240  icu_data[1].clr_mfp_hwirq = IRQ_MMP2_PMIC - IRQ_MMP2_PMIC_BASE;
241  icu_data[1].nr_irqs = 2;
242  icu_data[1].cascade_irq = 4;
243  icu_data[1].virq_base = IRQ_MMP2_PMIC_BASE;
244  icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs,
245  icu_data[1].virq_base, 0,
247  &icu_data[1]);
248  icu_data[2].reg_status = mmp_icu_base + 0x154;
249  icu_data[2].reg_mask = mmp_icu_base + 0x16c;
250  icu_data[2].nr_irqs = 2;
251  icu_data[2].cascade_irq = 5;
252  icu_data[2].virq_base = IRQ_MMP2_RTC_BASE;
253  icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs,
254  icu_data[2].virq_base, 0,
256  &icu_data[2]);
257  icu_data[3].reg_status = mmp_icu_base + 0x180;
258  icu_data[3].reg_mask = mmp_icu_base + 0x17c;
259  icu_data[3].nr_irqs = 3;
260  icu_data[3].cascade_irq = 9;
261  icu_data[3].virq_base = IRQ_MMP2_KEYPAD_BASE;
262  icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs,
263  icu_data[3].virq_base, 0,
265  &icu_data[3]);
266  icu_data[4].reg_status = mmp_icu_base + 0x158;
267  icu_data[4].reg_mask = mmp_icu_base + 0x170;
268  icu_data[4].nr_irqs = 5;
269  icu_data[4].cascade_irq = 17;
270  icu_data[4].virq_base = IRQ_MMP2_TWSI_BASE;
271  icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs,
272  icu_data[4].virq_base, 0,
274  &icu_data[4]);
275  icu_data[5].reg_status = mmp_icu_base + 0x15c;
276  icu_data[5].reg_mask = mmp_icu_base + 0x174;
277  icu_data[5].nr_irqs = 15;
278  icu_data[5].cascade_irq = 35;
279  icu_data[5].virq_base = IRQ_MMP2_MISC_BASE;
280  icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs,
281  icu_data[5].virq_base, 0,
283  &icu_data[5]);
284  icu_data[6].reg_status = mmp_icu_base + 0x160;
285  icu_data[6].reg_mask = mmp_icu_base + 0x178;
286  icu_data[6].nr_irqs = 2;
287  icu_data[6].cascade_irq = 51;
288  icu_data[6].virq_base = IRQ_MMP2_MIPI_HSI1_BASE;
289  icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs,
290  icu_data[6].virq_base, 0,
292  &icu_data[6]);
293  icu_data[7].reg_status = mmp_icu_base + 0x188;
294  icu_data[7].reg_mask = mmp_icu_base + 0x184;
295  icu_data[7].nr_irqs = 2;
296  icu_data[7].cascade_irq = 55;
297  icu_data[7].virq_base = IRQ_MMP2_MIPI_HSI0_BASE;
298  icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs,
299  icu_data[7].virq_base, 0,
301  &icu_data[7]);
302  for (irq = 0; irq < IRQ_MMP2_MUX_END; irq++) {
303  icu_mask_irq(irq_get_irq_data(irq));
304  switch (irq) {
305  case IRQ_MMP2_PMIC_MUX:
306  case IRQ_MMP2_RTC_MUX:
307  case IRQ_MMP2_KEYPAD_MUX:
308  case IRQ_MMP2_TWSI_MUX:
309  case IRQ_MMP2_MISC_MUX:
312  irq_set_chip(irq, &icu_irq_chip);
313  irq_set_chained_handler(irq, icu_mux_irq_demux);
314  break;
315  default:
316  irq_set_chip_and_handler(irq, &icu_irq_chip,
318  break;
319  }
321  }
322  irq_set_default_host(icu_data[0].domain);
323 #ifdef CONFIG_CPU_MMP2
324  icu_irq_chip.irq_set_wake = mmp2_set_wake;
325 #endif
326 }
327 
328 #ifdef CONFIG_OF
329 static const struct of_device_id intc_ids[] __initconst = {
330  { .compatible = "mrvl,mmp-intc", .data = &mmp_conf },
331  { .compatible = "mrvl,mmp2-intc", .data = &mmp2_conf },
332  {}
333 };
334 
335 static const struct of_device_id mmp_mux_irq_match[] __initconst = {
336  { .compatible = "mrvl,mmp2-mux-intc" },
337  {}
338 };
339 
340 int __init mmp2_mux_init(struct device_node *parent)
341 {
342  struct device_node *node;
343  const struct of_device_id *of_id;
344  struct resource res;
345  int i, irq_base, ret, irq;
346  u32 nr_irqs, mfp_irq;
347 
348  node = parent;
349  max_icu_nr = 1;
350  for (i = 1; i < MAX_ICU_NR; i++) {
351  node = of_find_matching_node(node, mmp_mux_irq_match);
352  if (!node)
353  break;
354  of_id = of_match_node(&mmp_mux_irq_match[0], node);
355  ret = of_property_read_u32(node, "mrvl,intc-nr-irqs",
356  &nr_irqs);
357  if (ret) {
358  pr_err("Not found mrvl,intc-nr-irqs property\n");
359  ret = -EINVAL;
360  goto err;
361  }
362  ret = of_address_to_resource(node, 0, &res);
363  if (ret < 0) {
364  pr_err("Not found reg property\n");
365  ret = -EINVAL;
366  goto err;
367  }
368  icu_data[i].reg_status = mmp_icu_base + res.start;
369  ret = of_address_to_resource(node, 1, &res);
370  if (ret < 0) {
371  pr_err("Not found reg property\n");
372  ret = -EINVAL;
373  goto err;
374  }
375  icu_data[i].reg_mask = mmp_icu_base + res.start;
376  icu_data[i].cascade_irq = irq_of_parse_and_map(node, 0);
377  if (!icu_data[i].cascade_irq) {
378  ret = -EINVAL;
379  goto err;
380  }
381 
382  irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
383  if (irq_base < 0) {
384  pr_err("Failed to allocate IRQ numbers for mux intc\n");
385  ret = irq_base;
386  goto err;
387  }
388  if (!of_property_read_u32(node, "mrvl,clr-mfp-irq",
389  &mfp_irq)) {
390  icu_data[i].clr_mfp_irq_base = irq_base;
391  icu_data[i].clr_mfp_hwirq = mfp_irq;
392  }
393  irq_set_chained_handler(icu_data[i].cascade_irq,
394  icu_mux_irq_demux);
395  icu_data[i].nr_irqs = nr_irqs;
396  icu_data[i].virq_base = irq_base;
397  icu_data[i].domain = irq_domain_add_legacy(node, nr_irqs,
398  irq_base, 0,
399  &mmp_irq_domain_ops,
400  &icu_data[i]);
401  for (irq = irq_base; irq < irq_base + nr_irqs; irq++)
402  icu_mask_irq(irq_get_irq_data(irq));
403  }
404  max_icu_nr = i;
405  return 0;
406 err:
407  of_node_put(node);
408  max_icu_nr = i;
409  return ret;
410 }
411 
412 void __init mmp_dt_irq_init(void)
413 {
414  struct device_node *node;
415  const struct of_device_id *of_id;
416  struct mmp_intc_conf *conf;
417  int nr_irqs, irq_base, ret, irq;
418 
419  node = of_find_matching_node(NULL, intc_ids);
420  if (!node) {
421  pr_err("Failed to find interrupt controller in arch-mmp\n");
422  return;
423  }
424  of_id = of_match_node(intc_ids, node);
425  conf = of_id->data;
426 
427  ret = of_property_read_u32(node, "mrvl,intc-nr-irqs", &nr_irqs);
428  if (ret) {
429  pr_err("Not found mrvl,intc-nr-irqs property\n");
430  return;
431  }
432 
433  mmp_icu_base = of_iomap(node, 0);
434  if (!mmp_icu_base) {
435  pr_err("Failed to get interrupt controller register\n");
436  return;
437  }
438 
439  irq_base = irq_alloc_descs(-1, 0, nr_irqs - NR_IRQS_LEGACY, 0);
440  if (irq_base < 0) {
441  pr_err("Failed to allocate IRQ numbers\n");
442  goto err;
443  } else if (irq_base != NR_IRQS_LEGACY) {
444  pr_err("ICU's irqbase should be started from 0\n");
445  goto err;
446  }
447  icu_data[0].conf_enable = conf->conf_enable;
448  icu_data[0].conf_disable = conf->conf_disable;
449  icu_data[0].conf_mask = conf->conf_mask;
450  icu_data[0].nr_irqs = nr_irqs;
451  icu_data[0].virq_base = 0;
452  icu_data[0].domain = irq_domain_add_legacy(node, nr_irqs, 0, 0,
453  &mmp_irq_domain_ops,
454  &icu_data[0]);
455  irq_set_default_host(icu_data[0].domain);
456  for (irq = 0; irq < nr_irqs; irq++)
457  icu_mask_irq(irq_get_irq_data(irq));
458  mmp2_mux_init(node);
459  return;
460 err:
462 }
463 #endif