Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
xics-common.c
Go to the documentation of this file.
1 /*
2  * Copyright 2011 IBM Corporation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  *
9  */
10 #include <linux/types.h>
11 #include <linux/threads.h>
12 #include <linux/kernel.h>
13 #include <linux/irq.h>
14 #include <linux/debugfs.h>
15 #include <linux/smp.h>
16 #include <linux/interrupt.h>
17 #include <linux/seq_file.h>
18 #include <linux/init.h>
19 #include <linux/cpu.h>
20 #include <linux/of.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 
24 #include <asm/prom.h>
25 #include <asm/io.h>
26 #include <asm/smp.h>
27 #include <asm/machdep.h>
28 #include <asm/irq.h>
29 #include <asm/errno.h>
30 #include <asm/rtas.h>
31 #include <asm/xics.h>
32 #include <asm/firmware.h>
33 
34 /* Globals common to all ICP/ICS implementations */
35 const struct icp_ops *icp_ops;
36 
37 unsigned int xics_default_server = 0xff;
38 unsigned int xics_default_distrib_server = 0;
39 unsigned int xics_interrupt_server_size = 8;
40 
42 
44 
45 static LIST_HEAD(ics_list);
46 
48 {
49  int i, j;
50  struct device_node *np;
51  u32 ilen;
52  const u32 *ireg;
53  u32 hcpuid;
54 
55  /* Find the server numbers for the boot cpu. */
57  BUG_ON(!np);
58 
59  hcpuid = get_hard_smp_processor_id(boot_cpuid);
61 
62  pr_devel("xics: xics_default_server = 0x%x\n", xics_default_server);
63 
64  ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
65  if (!ireg) {
66  of_node_put(np);
67  return;
68  }
69 
70  i = ilen / sizeof(int);
71 
72  /* Global interrupt distribution server is specified in the last
73  * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last
74  * entry fom this property for current boot cpu id and use it as
75  * default distribution server
76  */
77  for (j = 0; j < i; j += 2) {
78  if (ireg[j] == hcpuid) {
79  xics_default_distrib_server = ireg[j+1];
80  break;
81  }
82  }
83  pr_devel("xics: xics_default_distrib_server = 0x%x\n",
85  of_node_put(np);
86 }
87 
88 /* GIQ stuff, currently only supported on RTAS setups, will have
89  * to be sorted properly for bare metal
90  */
91 void xics_set_cpu_giq(unsigned int gserver, unsigned int join)
92 {
93 #ifdef CONFIG_PPC_RTAS
94  int index;
95  int status;
96 
97  if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL))
98  return;
99 
100  index = (1UL << xics_interrupt_server_size) - 1 - gserver;
101 
102  status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join);
103 
104  WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n",
105  GLOBAL_INTERRUPT_QUEUE, index, join, status);
106 #endif
107 }
108 
109 void xics_setup_cpu(void)
110 {
111  icp_ops->set_priority(LOWEST_PRIORITY);
112 
114 }
115 
116 void xics_mask_unknown_vec(unsigned int vec)
117 {
118  struct ics *ics;
119 
120  pr_err("Interrupt 0x%x (real) is invalid, disabling it.\n", vec);
121 
122  list_for_each_entry(ics, &ics_list, link)
123  ics->mask_unknown(ics, vec);
124 }
125 
126 
127 #ifdef CONFIG_SMP
128 
129 static void xics_request_ipi(void)
130 {
131  unsigned int ipi;
132 
133  ipi = irq_create_mapping(xics_host, XICS_IPI);
134  BUG_ON(ipi == NO_IRQ);
135 
136  /*
137  * IPIs are marked IRQF_PERCPU. The handler was set in map.
138  */
139  BUG_ON(request_irq(ipi, icp_ops->ipi_action,
140  IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
141 }
142 
143 int __init xics_smp_probe(void)
144 {
145  /* Setup cause_ipi callback based on which ICP is used */
146  smp_ops->cause_ipi = icp_ops->cause_ipi;
147 
148  /* Register all the IPIs */
149  xics_request_ipi();
150 
151  return cpumask_weight(cpu_possible_mask);
152 }
153 
154 #endif /* CONFIG_SMP */
155 
157 {
158  struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
159 
160  /*
161  * we have to reset the cppr index to 0 because we're
162  * not going to return from the IPI
163  */
164  os_cppr->index = 0;
165  icp_ops->set_priority(0);
166  icp_ops->teardown_cpu();
167 }
168 
169 void xics_kexec_teardown_cpu(int secondary)
170 {
172 
173  icp_ops->flush_ipi();
174 
175  /*
176  * Some machines need to have at least one cpu in the GIQ,
177  * so leave the master cpu in the group.
178  */
179  if (secondary)
181 }
182 
183 
184 #ifdef CONFIG_HOTPLUG_CPU
185 
186 /* Interrupts are disabled. */
187 void xics_migrate_irqs_away(void)
188 {
189  int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
190  unsigned int irq, virq;
191  struct irq_desc *desc;
192 
193  /* If we used to be the default server, move to the new "boot_cpuid" */
194  if (hw_cpu == xics_default_server)
196 
197  /* Reject any interrupt that was queued to us... */
198  icp_ops->set_priority(0);
199 
200  /* Remove ourselves from the global interrupt queue */
202 
203  /* Allow IPIs again... */
204  icp_ops->set_priority(DEFAULT_PRIORITY);
205 
206  for_each_irq_desc(virq, desc) {
207  struct irq_chip *chip;
208  long server;
209  unsigned long flags;
210  struct ics *ics;
211 
212  /* We can't set affinity on ISA interrupts */
213  if (virq < NUM_ISA_INTERRUPTS)
214  continue;
215  /* We only need to migrate enabled IRQS */
216  if (!desc->action)
217  continue;
218  if (desc->irq_data.domain != xics_host)
219  continue;
220  irq = desc->irq_data.hwirq;
221  /* We need to get IPIs still. */
222  if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
223  continue;
224  chip = irq_desc_get_chip(desc);
225  if (!chip || !chip->irq_set_affinity)
226  continue;
227 
228  raw_spin_lock_irqsave(&desc->lock, flags);
229 
230  /* Locate interrupt server */
231  server = -1;
232  ics = irq_get_chip_data(virq);
233  if (ics)
234  server = ics->get_server(ics, irq);
235  if (server < 0) {
236  printk(KERN_ERR "%s: Can't find server for irq %d\n",
237  __func__, irq);
238  goto unlock;
239  }
240 
241  /* We only support delivery to all cpus or to one cpu.
242  * The irq has to be migrated only in the single cpu
243  * case.
244  */
245  if (server != hw_cpu)
246  goto unlock;
247 
248  /* This is expected during cpu offline. */
249  if (cpu_online(cpu))
250  pr_warning("IRQ %u affinity broken off cpu %u\n",
251  virq, cpu);
252 
253  /* Reset affinity to all cpus */
254  raw_spin_unlock_irqrestore(&desc->lock, flags);
255  irq_set_affinity(virq, cpu_all_mask);
256  continue;
257 unlock:
258  raw_spin_unlock_irqrestore(&desc->lock, flags);
259  }
260 }
261 #endif /* CONFIG_HOTPLUG_CPU */
262 
263 #ifdef CONFIG_SMP
264 /*
265  * For the moment we only implement delivery to all cpus or one cpu.
266  *
267  * If the requested affinity is cpu_all_mask, we set global affinity.
268  * If not we set it to the first cpu in the mask, even if multiple cpus
269  * are set. This is so things like irqbalance (which set core and package
270  * wide affinities) do the right thing.
271  *
272  * We need to fix this to implement support for the links
273  */
274 int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
275  unsigned int strict_check)
276 {
277 
278  if (!distribute_irqs)
279  return xics_default_server;
280 
281  if (!cpumask_subset(cpu_possible_mask, cpumask)) {
282  int server = cpumask_first_and(cpu_online_mask, cpumask);
283 
284  if (server < nr_cpu_ids)
285  return get_hard_smp_processor_id(server);
286 
287  if (strict_check)
288  return -1;
289  }
290 
291  /*
292  * Workaround issue with some versions of JS20 firmware that
293  * deliver interrupts to cpus which haven't been started. This
294  * happens when using the maxcpus= boot option.
295  */
296  if (cpumask_equal(cpu_online_mask, cpu_present_mask))
298 
299  return xics_default_server;
300 }
301 #endif /* CONFIG_SMP */
302 
303 static int xics_host_match(struct irq_domain *h, struct device_node *node)
304 {
305  struct ics *ics;
306 
307  list_for_each_entry(ics, &ics_list, link)
308  if (ics->host_match(ics, node))
309  return 1;
310 
311  return 0;
312 }
313 
314 /* Dummies */
315 static void xics_ipi_unmask(struct irq_data *d) { }
316 static void xics_ipi_mask(struct irq_data *d) { }
317 
318 static struct irq_chip xics_ipi_chip = {
319  .name = "XICS",
320  .irq_eoi = NULL, /* Patched at init time */
321  .irq_mask = xics_ipi_mask,
322  .irq_unmask = xics_ipi_unmask,
323 };
324 
325 static int xics_host_map(struct irq_domain *h, unsigned int virq,
327 {
328  struct ics *ics;
329 
330  pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
331 
332  /* They aren't all level sensitive but we just don't really know */
333  irq_set_status_flags(virq, IRQ_LEVEL);
334 
335  /* Don't call into ICS for IPIs */
336  if (hw == XICS_IPI) {
337  irq_set_chip_and_handler(virq, &xics_ipi_chip,
339  return 0;
340  }
341 
342  /* Let the ICS setup the chip data */
343  list_for_each_entry(ics, &ics_list, link)
344  if (ics->map(ics, virq) == 0)
345  return 0;
346 
347  return -EINVAL;
348 }
349 
350 static int xics_host_xlate(struct irq_domain *h, struct device_node *ct,
351  const u32 *intspec, unsigned int intsize,
352  irq_hw_number_t *out_hwirq, unsigned int *out_flags)
353 
354 {
355  /* Current xics implementation translates everything
356  * to level. It is not technically right for MSIs but this
357  * is irrelevant at this point. We might get smarter in the future
358  */
359  *out_hwirq = intspec[0];
360  *out_flags = IRQ_TYPE_LEVEL_LOW;
361 
362  return 0;
363 }
364 
365 static struct irq_domain_ops xics_host_ops = {
366  .match = xics_host_match,
367  .map = xics_host_map,
368  .xlate = xics_host_xlate,
369 };
370 
371 static void __init xics_init_host(void)
372 {
373  xics_host = irq_domain_add_tree(NULL, &xics_host_ops, NULL);
374  BUG_ON(xics_host == NULL);
375  irq_set_default_host(xics_host);
376 }
377 
378 void __init xics_register_ics(struct ics *ics)
379 {
380  list_add(&ics->link, &ics_list);
381 }
382 
383 static void __init xics_get_server_size(void)
384 {
385  struct device_node *np;
386  const u32 *isize;
387 
388  /* We fetch the interrupt server size from the first ICS node
389  * we find if any
390  */
391  np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xics");
392  if (!np)
393  return;
394  isize = of_get_property(np, "ibm,interrupt-server#-size", NULL);
395  if (!isize)
396  return;
398  of_node_put(np);
399 }
400 
401 void __init xics_init(void)
402 {
403  int rc = -1;
404 
405  /* Fist locate ICP */
406  if (firmware_has_feature(FW_FEATURE_LPAR))
407  rc = icp_hv_init();
408  if (rc < 0)
409  rc = icp_native_init();
410  if (rc < 0) {
411  pr_warning("XICS: Cannot find a Presentation Controller !\n");
412  return;
413  }
414 
415  /* Copy get_irq callback over to ppc_md */
416  ppc_md.get_irq = icp_ops->get_irq;
417 
418  /* Patch up IPI chip EOI */
419  xics_ipi_chip.irq_eoi = icp_ops->eoi;
420 
421  /* Now locate ICS */
422  rc = ics_rtas_init();
423  if (rc < 0)
424  rc = ics_opal_init();
425  if (rc < 0)
426  pr_warning("XICS: Cannot find a Source Controller !\n");
427 
428  /* Initialize common bits */
429  xics_get_server_size();
431  xics_init_host();
432  xics_setup_cpu();
433 }