Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
iosapic.c
Go to the documentation of this file.
1 /*
2  * I/O SAPIC support.
3  *
4  * Copyright (C) 1999 Intel Corp.
5  * Copyright (C) 1999 Asit Mallick <[email protected]>
6  * Copyright (C) 2000-2002 J.I. Lee <[email protected]>
7  * Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co.
8  * David Mosberger-Tang <[email protected]>
9  * Copyright (C) 1999 VA Linux Systems
10  * Copyright (C) 1999,2000 Walt Drummond <[email protected]>
11  *
12  * 00/04/19 D. Mosberger Rewritten to mirror more closely the x86 I/O
13  * APIC code. In particular, we now have separate
14  * handlers for edge and level triggered
15  * interrupts.
16  * 00/10/27 Asit Mallick, Goutham Rao <[email protected]> IRQ vector
17  * allocation PCI to vector mapping, shared PCI
18  * interrupts.
19  * 00/10/27 D. Mosberger Document things a bit more to make them more
20  * understandable. Clean up much of the old
21  * IOSAPIC cruft.
22  * 01/07/27 J.I. Lee PCI irq routing, Platform/Legacy interrupts
23  * and fixes for ACPI S5(SoftOff) support.
24  * 02/01/23 J.I. Lee iosapic pgm fixes for PCI irq routing from _PRT
25  * 02/01/07 E. Focht <[email protected]> Redirectable interrupt
26  * vectors in iosapic_set_affinity(),
27  * initializations for /proc/irq/#/smp_affinity
28  * 02/04/02 P. Diefenbaugh Cleaned up ACPI PCI IRQ routing.
29  * 02/04/18 J.I. Lee bug fix in iosapic_init_pci_irq
30  * 02/04/30 J.I. Lee bug fix in find_iosapic to fix ACPI PCI IRQ to
31  * IOSAPIC mapping error
32  * 02/07/29 T. Kochi Allocate interrupt vectors dynamically
33  * 02/08/04 T. Kochi Cleaned up terminology (irq, global system
34  * interrupt, vector, etc.)
35  * 02/09/20 D. Mosberger Simplified by taking advantage of ACPI's
36  * pci_irq code.
37  * 03/02/19 B. Helgaas Make pcat_compat system-wide, not per-IOSAPIC.
38  * Remove iosapic_address & gsi_base from
39  * external interfaces. Rationalize
40  * __init/__devinit attributes.
41  * 04/12/04 Ashok Raj <[email protected]> Intel Corporation 2004
42  * Updated to work with irq migration necessary
43  * for CPU Hotplug
44  */
45 /*
46  * Here is what the interrupt logic between a PCI device and the kernel looks
47  * like:
48  *
49  * (1) A PCI device raises one of the four interrupt pins (INTA, INTB, INTC,
50  * INTD). The device is uniquely identified by its bus-, and slot-number
51  * (the function number does not matter here because all functions share
52  * the same interrupt lines).
53  *
54  * (2) The motherboard routes the interrupt line to a pin on a IOSAPIC
55  * controller. Multiple interrupt lines may have to share the same
56  * IOSAPIC pin (if they're level triggered and use the same polarity).
57  * Each interrupt line has a unique Global System Interrupt (GSI) number
58  * which can be calculated as the sum of the controller's base GSI number
59  * and the IOSAPIC pin number to which the line connects.
60  *
61  * (3) The IOSAPIC uses an internal routing table entries (RTEs) to map the
62  * IOSAPIC pin into the IA-64 interrupt vector. This interrupt vector is then
63  * sent to the CPU.
64  *
65  * (4) The kernel recognizes an interrupt as an IRQ. The IRQ interface is
66  * used as architecture-independent interrupt handling mechanism in Linux.
67  * As an IRQ is a number, we have to have
68  * IA-64 interrupt vector number <-> IRQ number mapping. On smaller
69  * systems, we use one-to-one mapping between IA-64 vector and IRQ. A
70  * platform can implement platform_irq_to_vector(irq) and
71  * platform_local_vector_to_irq(vector) APIs to differentiate the mapping.
72  * Please see also arch/ia64/include/asm/hw_irq.h for those APIs.
73  *
74  * To sum up, there are three levels of mappings involved:
75  *
76  * PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ
77  *
78  * Note: The term "IRQ" is loosely used everywhere in Linux kernel to
79  * describeinterrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ
80  * (isa_irq) is the only exception in this source code.
81  */
82 
83 #include <linux/acpi.h>
84 #include <linux/init.h>
85 #include <linux/irq.h>
86 #include <linux/kernel.h>
87 #include <linux/list.h>
88 #include <linux/pci.h>
89 #include <linux/slab.h>
90 #include <linux/smp.h>
91 #include <linux/string.h>
92 #include <linux/bootmem.h>
93 
94 #include <asm/delay.h>
95 #include <asm/hw_irq.h>
96 #include <asm/io.h>
97 #include <asm/iosapic.h>
98 #include <asm/machvec.h>
99 #include <asm/processor.h>
100 #include <asm/ptrace.h>
101 
102 #undef DEBUG_INTERRUPT_ROUTING
103 
104 #ifdef DEBUG_INTERRUPT_ROUTING
105 #define DBG(fmt...) printk(fmt)
106 #else
107 #define DBG(fmt...)
108 #endif
109 
110 static DEFINE_SPINLOCK(iosapic_lock);
111 
112 /*
113  * These tables map IA-64 vectors to the IOSAPIC pin that generates this
114  * vector.
115  */
116 
117 #define NO_REF_RTE 0
118 
119 static struct iosapic {
120  char __iomem *addr; /* base address of IOSAPIC */
121  unsigned int gsi_base; /* GSI base */
122  unsigned short num_rte; /* # of RTEs on this IOSAPIC */
123  int rtes_inuse; /* # of RTEs in use on this IOSAPIC */
124 #ifdef CONFIG_NUMA
125  unsigned short node; /* numa node association via pxm */
126 #endif
127  spinlock_t lock; /* lock for indirect reg access */
128 } iosapic_lists[NR_IOSAPICS];
129 
131  struct list_head rte_list; /* RTEs sharing the same vector */
132  char rte_index; /* IOSAPIC RTE index */
133  int refcnt; /* reference counter */
134  struct iosapic *iosapic;
136 
137 static struct iosapic_intr_info {
138  struct list_head rtes; /* RTEs using this vector (empty =>
139  * not an IOSAPIC interrupt) */
140  int count; /* # of registered RTEs */
141  u32 low32; /* current value of low word of
142  * Redirection table entry */
143  unsigned int dest; /* destination CPU physical ID */
144  unsigned char dmode : 3; /* delivery mode (see iosapic.h) */
145  unsigned char polarity: 1; /* interrupt polarity
146  * (see iosapic.h) */
147  unsigned char trigger : 1; /* trigger mode (see iosapic.h) */
148 } iosapic_intr_info[NR_IRQS];
149 
150 static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */
151 
152 static inline void
153 iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val)
154 {
155  unsigned long flags;
156 
157  spin_lock_irqsave(&iosapic->lock, flags);
158  __iosapic_write(iosapic->addr, reg, val);
159  spin_unlock_irqrestore(&iosapic->lock, flags);
160 }
161 
162 /*
163  * Find an IOSAPIC associated with a GSI
164  */
165 static inline int
166 find_iosapic (unsigned int gsi)
167 {
168  int i;
169 
170  for (i = 0; i < NR_IOSAPICS; i++) {
171  if ((unsigned) (gsi - iosapic_lists[i].gsi_base) <
172  iosapic_lists[i].num_rte)
173  return i;
174  }
175 
176  return -1;
177 }
178 
179 static inline int __gsi_to_irq(unsigned int gsi)
180 {
181  int irq;
182  struct iosapic_intr_info *info;
183  struct iosapic_rte_info *rte;
184 
185  for (irq = 0; irq < NR_IRQS; irq++) {
186  info = &iosapic_intr_info[irq];
187  list_for_each_entry(rte, &info->rtes, rte_list)
188  if (rte->iosapic->gsi_base + rte->rte_index == gsi)
189  return irq;
190  }
191  return -1;
192 }
193 
194 int
195 gsi_to_irq (unsigned int gsi)
196 {
197  unsigned long flags;
198  int irq;
199 
200  spin_lock_irqsave(&iosapic_lock, flags);
201  irq = __gsi_to_irq(gsi);
202  spin_unlock_irqrestore(&iosapic_lock, flags);
203  return irq;
204 }
205 
206 static struct iosapic_rte_info *find_rte(unsigned int irq, unsigned int gsi)
207 {
208  struct iosapic_rte_info *rte;
209 
210  list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
211  if (rte->iosapic->gsi_base + rte->rte_index == gsi)
212  return rte;
213  return NULL;
214 }
215 
216 static void
217 set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask)
218 {
219  unsigned long pol, trigger, dmode;
220  u32 low32, high32;
221  int rte_index;
222  char redir;
223  struct iosapic_rte_info *rte;
224  ia64_vector vector = irq_to_vector(irq);
225 
226  DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest);
227 
228  rte = find_rte(irq, gsi);
229  if (!rte)
230  return; /* not an IOSAPIC interrupt */
231 
232  rte_index = rte->rte_index;
233  pol = iosapic_intr_info[irq].polarity;
234  trigger = iosapic_intr_info[irq].trigger;
235  dmode = iosapic_intr_info[irq].dmode;
236 
237  redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0;
238 
239 #ifdef CONFIG_SMP
240  set_irq_affinity_info(irq, (int)(dest & 0xffff), redir);
241 #endif
242 
243  low32 = ((pol << IOSAPIC_POLARITY_SHIFT) |
244  (trigger << IOSAPIC_TRIGGER_SHIFT) |
245  (dmode << IOSAPIC_DELIVERY_SHIFT) |
246  ((mask ? 1 : 0) << IOSAPIC_MASK_SHIFT) |
247  vector);
248 
249  /* dest contains both id and eid */
250  high32 = (dest << IOSAPIC_DEST_SHIFT);
251 
252  iosapic_write(rte->iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
253  iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
254  iosapic_intr_info[irq].low32 = low32;
255  iosapic_intr_info[irq].dest = dest;
256 }
257 
258 static void
259 nop (struct irq_data *data)
260 {
261  /* do nothing... */
262 }
263 
264 
265 #ifdef CONFIG_KEXEC
266 void
268 {
269  struct iosapic_intr_info *info;
270  struct iosapic_rte_info *rte;
271  ia64_vector vec;
272  int irq;
273 
274  for (irq = 0; irq < NR_IRQS; irq++) {
275  info = &iosapic_intr_info[irq];
276  vec = irq_to_vector(irq);
277  list_for_each_entry(rte, &info->rtes,
278  rte_list) {
279  iosapic_write(rte->iosapic,
281  IOSAPIC_MASK|vec);
282  iosapic_eoi(rte->iosapic->addr, vec);
283  }
284  }
285 }
286 #endif
287 
288 static void
289 mask_irq (struct irq_data *data)
290 {
291  unsigned int irq = data->irq;
292  u32 low32;
293  int rte_index;
294  struct iosapic_rte_info *rte;
295 
296  if (!iosapic_intr_info[irq].count)
297  return; /* not an IOSAPIC interrupt! */
298 
299  /* set only the mask bit */
300  low32 = iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
301  list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
302  rte_index = rte->rte_index;
303  iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
304  }
305 }
306 
307 static void
308 unmask_irq (struct irq_data *data)
309 {
310  unsigned int irq = data->irq;
311  u32 low32;
312  int rte_index;
313  struct iosapic_rte_info *rte;
314 
315  if (!iosapic_intr_info[irq].count)
316  return; /* not an IOSAPIC interrupt! */
317 
318  low32 = iosapic_intr_info[irq].low32 &= ~IOSAPIC_MASK;
319  list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
320  rte_index = rte->rte_index;
321  iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
322  }
323 }
324 
325 
326 static int
327 iosapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
328  bool force)
329 {
330 #ifdef CONFIG_SMP
331  unsigned int irq = data->irq;
332  u32 high32, low32;
333  int cpu, dest, rte_index;
334  int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
335  struct iosapic_rte_info *rte;
336  struct iosapic *iosapic;
337 
338  irq &= (~IA64_IRQ_REDIRECTED);
339 
340  cpu = cpumask_first_and(cpu_online_mask, mask);
341  if (cpu >= nr_cpu_ids)
342  return -1;
343 
344  if (irq_prepare_move(irq, cpu))
345  return -1;
346 
347  dest = cpu_physical_id(cpu);
348 
349  if (!iosapic_intr_info[irq].count)
350  return -1; /* not an IOSAPIC interrupt */
351 
352  set_irq_affinity_info(irq, dest, redir);
353 
354  /* dest contains both id and eid */
355  high32 = dest << IOSAPIC_DEST_SHIFT;
356 
357  low32 = iosapic_intr_info[irq].low32 & ~(7 << IOSAPIC_DELIVERY_SHIFT);
358  if (redir)
359  /* change delivery mode to lowest priority */
361  else
362  /* change delivery mode to fixed */
364  low32 &= IOSAPIC_VECTOR_MASK;
365  low32 |= irq_to_vector(irq);
366 
367  iosapic_intr_info[irq].low32 = low32;
368  iosapic_intr_info[irq].dest = dest;
369  list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
370  iosapic = rte->iosapic;
371  rte_index = rte->rte_index;
372  iosapic_write(iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
373  iosapic_write(iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
374  }
375 
376 #endif
377  return 0;
378 }
379 
380 /*
381  * Handlers for level-triggered interrupts.
382  */
383 
384 static unsigned int
385 iosapic_startup_level_irq (struct irq_data *data)
386 {
387  unmask_irq(data);
388  return 0;
389 }
390 
391 static void
392 iosapic_unmask_level_irq (struct irq_data *data)
393 {
394  unsigned int irq = data->irq;
395  ia64_vector vec = irq_to_vector(irq);
396  struct iosapic_rte_info *rte;
397  int do_unmask_irq = 0;
398 
399  irq_complete_move(irq);
400  if (unlikely(irqd_is_setaffinity_pending(data))) {
401  do_unmask_irq = 1;
402  mask_irq(data);
403  } else
404  unmask_irq(data);
405 
406  list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
407  iosapic_eoi(rte->iosapic->addr, vec);
408 
409  if (unlikely(do_unmask_irq)) {
410  irq_move_masked_irq(data);
411  unmask_irq(data);
412  }
413 }
414 
415 #define iosapic_shutdown_level_irq mask_irq
416 #define iosapic_enable_level_irq unmask_irq
417 #define iosapic_disable_level_irq mask_irq
418 #define iosapic_ack_level_irq nop
419 
420 static struct irq_chip irq_type_iosapic_level = {
421  .name = "IO-SAPIC-level",
422  .irq_startup = iosapic_startup_level_irq,
423  .irq_shutdown = iosapic_shutdown_level_irq,
424  .irq_enable = iosapic_enable_level_irq,
425  .irq_disable = iosapic_disable_level_irq,
426  .irq_ack = iosapic_ack_level_irq,
427  .irq_mask = mask_irq,
428  .irq_unmask = iosapic_unmask_level_irq,
429  .irq_set_affinity = iosapic_set_affinity
430 };
431 
432 /*
433  * Handlers for edge-triggered interrupts.
434  */
435 
436 static unsigned int
437 iosapic_startup_edge_irq (struct irq_data *data)
438 {
439  unmask_irq(data);
440  /*
441  * IOSAPIC simply drops interrupts pended while the
442  * corresponding pin was masked, so we can't know if an
443  * interrupt is pending already. Let's hope not...
444  */
445  return 0;
446 }
447 
448 static void
449 iosapic_ack_edge_irq (struct irq_data *data)
450 {
451  irq_complete_move(data->irq);
452  irq_move_irq(data);
453 }
454 
455 #define iosapic_enable_edge_irq unmask_irq
456 #define iosapic_disable_edge_irq nop
457 
458 static struct irq_chip irq_type_iosapic_edge = {
459  .name = "IO-SAPIC-edge",
460  .irq_startup = iosapic_startup_edge_irq,
461  .irq_shutdown = iosapic_disable_edge_irq,
462  .irq_enable = iosapic_enable_edge_irq,
463  .irq_disable = iosapic_disable_edge_irq,
464  .irq_ack = iosapic_ack_edge_irq,
465  .irq_mask = mask_irq,
466  .irq_unmask = unmask_irq,
467  .irq_set_affinity = iosapic_set_affinity
468 };
469 
470 static unsigned int
471 iosapic_version (char __iomem *addr)
472 {
473  /*
474  * IOSAPIC Version Register return 32 bit structure like:
475  * {
476  * unsigned int version : 8;
477  * unsigned int reserved1 : 8;
478  * unsigned int max_redir : 8;
479  * unsigned int reserved2 : 8;
480  * }
481  */
482  return __iosapic_read(addr, IOSAPIC_VERSION);
483 }
484 
485 static int iosapic_find_sharable_irq(unsigned long trigger, unsigned long pol)
486 {
487  int i, irq = -ENOSPC, min_count = -1;
488  struct iosapic_intr_info *info;
489 
490  /*
491  * shared vectors for edge-triggered interrupts are not
492  * supported yet
493  */
494  if (trigger == IOSAPIC_EDGE)
495  return -EINVAL;
496 
497  for (i = 0; i < NR_IRQS; i++) {
498  info = &iosapic_intr_info[i];
499  if (info->trigger == trigger && info->polarity == pol &&
500  (info->dmode == IOSAPIC_FIXED ||
501  info->dmode == IOSAPIC_LOWEST_PRIORITY) &&
503  if (min_count == -1 || info->count < min_count) {
504  irq = i;
505  min_count = info->count;
506  }
507  }
508  }
509  return irq;
510 }
511 
512 /*
513  * if the given vector is already owned by other,
514  * assign a new vector for the other and make the vector available
515  */
516 static void __init
517 iosapic_reassign_vector (int irq)
518 {
519  int new_irq;
520 
521  if (iosapic_intr_info[irq].count) {
522  new_irq = create_irq();
523  if (new_irq < 0)
524  panic("%s: out of interrupt vectors!\n", __func__);
525  printk(KERN_INFO "Reassigning vector %d to %d\n",
526  irq_to_vector(irq), irq_to_vector(new_irq));
527  memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq],
528  sizeof(struct iosapic_intr_info));
529  INIT_LIST_HEAD(&iosapic_intr_info[new_irq].rtes);
530  list_move(iosapic_intr_info[irq].rtes.next,
531  &iosapic_intr_info[new_irq].rtes);
532  memset(&iosapic_intr_info[irq], 0,
533  sizeof(struct iosapic_intr_info));
534  iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
535  INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
536  }
537 }
538 
539 static inline int irq_is_shared (int irq)
540 {
541  return (iosapic_intr_info[irq].count > 1);
542 }
543 
544 struct irq_chip*
545 ia64_native_iosapic_get_irq_chip(unsigned long trigger)
546 {
547  if (trigger == IOSAPIC_EDGE)
548  return &irq_type_iosapic_edge;
549  else
550  return &irq_type_iosapic_level;
551 }
552 
553 static int
554 register_intr (unsigned int gsi, int irq, unsigned char delivery,
555  unsigned long polarity, unsigned long trigger)
556 {
557  struct irq_chip *chip, *irq_type;
558  int index;
559  struct iosapic_rte_info *rte;
560 
561  index = find_iosapic(gsi);
562  if (index < 0) {
563  printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
564  __func__, gsi);
565  return -ENODEV;
566  }
567 
568  rte = find_rte(irq, gsi);
569  if (!rte) {
570  rte = kzalloc(sizeof (*rte), GFP_ATOMIC);
571  if (!rte) {
572  printk(KERN_WARNING "%s: cannot allocate memory\n",
573  __func__);
574  return -ENOMEM;
575  }
576 
577  rte->iosapic = &iosapic_lists[index];
578  rte->rte_index = gsi - rte->iosapic->gsi_base;
579  rte->refcnt++;
580  list_add_tail(&rte->rte_list, &iosapic_intr_info[irq].rtes);
581  iosapic_intr_info[irq].count++;
582  iosapic_lists[index].rtes_inuse++;
583  }
584  else if (rte->refcnt == NO_REF_RTE) {
585  struct iosapic_intr_info *info = &iosapic_intr_info[irq];
586  if (info->count > 0 &&
587  (info->trigger != trigger || info->polarity != polarity)){
589  "%s: cannot override the interrupt\n",
590  __func__);
591  return -EINVAL;
592  }
593  rte->refcnt++;
594  iosapic_intr_info[irq].count++;
595  iosapic_lists[index].rtes_inuse++;
596  }
597 
598  iosapic_intr_info[irq].polarity = polarity;
599  iosapic_intr_info[irq].dmode = delivery;
600  iosapic_intr_info[irq].trigger = trigger;
601 
602  irq_type = iosapic_get_irq_chip(trigger);
603 
604  chip = irq_get_chip(irq);
605  if (irq_type != NULL && chip != irq_type) {
606  if (chip != &no_irq_chip)
608  "%s: changing vector %d from %s to %s\n",
609  __func__, irq_to_vector(irq),
610  chip->name, irq_type->name);
611  chip = irq_type;
612  }
613  __irq_set_chip_handler_name_locked(irq, chip, trigger == IOSAPIC_EDGE ?
615  NULL);
616  return 0;
617 }
618 
619 static unsigned int
620 get_target_cpu (unsigned int gsi, int irq)
621 {
622 #ifdef CONFIG_SMP
623  static int cpu = -1;
624  extern int cpe_vector;
626 
627  /*
628  * In case of vector shared by multiple RTEs, all RTEs that
629  * share the vector need to use the same destination CPU.
630  */
631  if (iosapic_intr_info[irq].count)
632  return iosapic_intr_info[irq].dest;
633 
634  /*
635  * If the platform supports redirection via XTP, let it
636  * distribute interrupts.
637  */
638  if (smp_int_redirect & SMP_IRQ_REDIRECTION)
640 
641  /*
642  * Some interrupts (ACPI SCI, for instance) are registered
643  * before the BSP is marked as online.
644  */
647 
648 #ifdef CONFIG_ACPI
649  if (cpe_vector > 0 && irq_to_vector(irq) == IA64_CPEP_VECTOR)
650  return get_cpei_target_cpu();
651 #endif
652 
653 #ifdef CONFIG_NUMA
654  {
655  int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0;
656  const struct cpumask *cpu_mask;
657 
658  iosapic_index = find_iosapic(gsi);
659  if (iosapic_index < 0 ||
660  iosapic_lists[iosapic_index].node == MAX_NUMNODES)
661  goto skip_numa_setup;
662 
663  cpu_mask = cpumask_of_node(iosapic_lists[iosapic_index].node);
664  num_cpus = 0;
665  for_each_cpu_and(numa_cpu, cpu_mask, &domain) {
666  if (cpu_online(numa_cpu))
667  num_cpus++;
668  }
669 
670  if (!num_cpus)
671  goto skip_numa_setup;
672 
673  /* Use irq assignment to distribute across cpus in node */
674  cpu_index = irq % num_cpus;
675 
676  for_each_cpu_and(numa_cpu, cpu_mask, &domain)
677  if (cpu_online(numa_cpu) && i++ >= cpu_index)
678  break;
679 
680  if (numa_cpu < nr_cpu_ids)
681  return cpu_physical_id(numa_cpu);
682  }
683 skip_numa_setup:
684 #endif
685  /*
686  * Otherwise, round-robin interrupt vectors across all the
687  * processors. (It'd be nice if we could be smarter in the
688  * case of NUMA.)
689  */
690  do {
691  if (++cpu >= nr_cpu_ids)
692  cpu = 0;
693  } while (!cpu_online(cpu) || !cpu_isset(cpu, domain));
694 
695  return cpu_physical_id(cpu);
696 #else /* CONFIG_SMP */
698 #endif
699 }
700 
701 static inline unsigned char choose_dmode(void)
702 {
703 #ifdef CONFIG_SMP
704  if (smp_int_redirect & SMP_IRQ_REDIRECTION)
706 #endif
707  return IOSAPIC_FIXED;
708 }
709 
710 /*
711  * ACPI can describe IOSAPIC interrupts via static tables and namespace
712  * methods. This provides an interface to register those interrupts and
713  * program the IOSAPIC RTE.
714  */
715 int
716 iosapic_register_intr (unsigned int gsi,
717  unsigned long polarity, unsigned long trigger)
718 {
719  int irq, mask = 1, err;
720  unsigned int dest;
721  unsigned long flags;
722  struct iosapic_rte_info *rte;
723  u32 low32;
724  unsigned char dmode;
725  struct irq_desc *desc;
726 
727  /*
728  * If this GSI has already been registered (i.e., it's a
729  * shared interrupt, or we lost a race to register it),
730  * don't touch the RTE.
731  */
732  spin_lock_irqsave(&iosapic_lock, flags);
733  irq = __gsi_to_irq(gsi);
734  if (irq > 0) {
735  rte = find_rte(irq, gsi);
736  if(iosapic_intr_info[irq].count == 0) {
737  assign_irq_vector(irq);
738  dynamic_irq_init(irq);
739  } else if (rte->refcnt != NO_REF_RTE) {
740  rte->refcnt++;
741  goto unlock_iosapic_lock;
742  }
743  } else
744  irq = create_irq();
745 
746  /* If vector is running out, we try to find a sharable vector */
747  if (irq < 0) {
748  irq = iosapic_find_sharable_irq(trigger, polarity);
749  if (irq < 0)
750  goto unlock_iosapic_lock;
751  }
752 
753  desc = irq_to_desc(irq);
754  raw_spin_lock(&desc->lock);
755  dest = get_target_cpu(gsi, irq);
756  dmode = choose_dmode();
757  err = register_intr(gsi, irq, dmode, polarity, trigger);
758  if (err < 0) {
759  raw_spin_unlock(&desc->lock);
760  irq = err;
761  goto unlock_iosapic_lock;
762  }
763 
764  /*
765  * If the vector is shared and already unmasked for other
766  * interrupt sources, don't mask it.
767  */
768  low32 = iosapic_intr_info[irq].low32;
769  if (irq_is_shared(irq) && !(low32 & IOSAPIC_MASK))
770  mask = 0;
771  set_rte(gsi, irq, dest, mask);
772 
773  printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n",
774  gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
775  (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
776  cpu_logical_id(dest), dest, irq_to_vector(irq));
777 
778  raw_spin_unlock(&desc->lock);
779  unlock_iosapic_lock:
780  spin_unlock_irqrestore(&iosapic_lock, flags);
781  return irq;
782 }
783 
784 void
785 iosapic_unregister_intr (unsigned int gsi)
786 {
787  unsigned long flags;
788  int irq, index;
789  u32 low32;
790  unsigned long trigger, polarity;
791  unsigned int dest;
792  struct iosapic_rte_info *rte;
793 
794  /*
795  * If the irq associated with the gsi is not found,
796  * iosapic_unregister_intr() is unbalanced. We need to check
797  * this again after getting locks.
798  */
799  irq = gsi_to_irq(gsi);
800  if (irq < 0) {
801  printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
802  gsi);
803  WARN_ON(1);
804  return;
805  }
806 
807  spin_lock_irqsave(&iosapic_lock, flags);
808  if ((rte = find_rte(irq, gsi)) == NULL) {
809  printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
810  gsi);
811  WARN_ON(1);
812  goto out;
813  }
814 
815  if (--rte->refcnt > 0)
816  goto out;
817 
818  rte->refcnt = NO_REF_RTE;
819 
820  /* Mask the interrupt */
821  low32 = iosapic_intr_info[irq].low32 | IOSAPIC_MASK;
822  iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte->rte_index), low32);
823 
824  iosapic_intr_info[irq].count--;
825  index = find_iosapic(gsi);
826  iosapic_lists[index].rtes_inuse--;
827  WARN_ON(iosapic_lists[index].rtes_inuse < 0);
828 
829  trigger = iosapic_intr_info[irq].trigger;
830  polarity = iosapic_intr_info[irq].polarity;
831  dest = iosapic_intr_info[irq].dest;
833  "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered\n",
834  gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
835  (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
836  cpu_logical_id(dest), dest, irq_to_vector(irq));
837 
838  if (iosapic_intr_info[irq].count == 0) {
839 #ifdef CONFIG_SMP
840  /* Clear affinity */
841  cpumask_setall(irq_get_irq_data(irq)->affinity);
842 #endif
843  /* Clear the interrupt information */
844  iosapic_intr_info[irq].dest = 0;
845  iosapic_intr_info[irq].dmode = 0;
846  iosapic_intr_info[irq].polarity = 0;
847  iosapic_intr_info[irq].trigger = 0;
848  iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
849 
850  /* Destroy and reserve IRQ */
852  }
853  out:
854  spin_unlock_irqrestore(&iosapic_lock, flags);
855 }
856 
857 /*
858  * ACPI calls this when it finds an entry for a platform interrupt.
859  */
860 int __init
862  int iosapic_vector, u16 eid, u16 id,
863  unsigned long polarity, unsigned long trigger)
864 {
865  static const char * const name[] = {"unknown", "PMI", "INIT", "CPEI"};
866  unsigned char delivery;
867  int irq, vector, mask = 0;
868  unsigned int dest = ((id << 8) | eid) & 0xffff;
869 
870  switch (int_type) {
871  case ACPI_INTERRUPT_PMI:
872  irq = vector = iosapic_vector;
873  bind_irq_vector(irq, vector, CPU_MASK_ALL);
874  /*
875  * since PMI vector is alloc'd by FW(ACPI) not by kernel,
876  * we need to make sure the vector is available
877  */
878  iosapic_reassign_vector(irq);
879  delivery = IOSAPIC_PMI;
880  break;
881  case ACPI_INTERRUPT_INIT:
882  irq = create_irq();
883  if (irq < 0)
884  panic("%s: out of interrupt vectors!\n", __func__);
885  vector = irq_to_vector(irq);
886  delivery = IOSAPIC_INIT;
887  break;
888  case ACPI_INTERRUPT_CPEI:
889  irq = vector = IA64_CPE_VECTOR;
890  BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
891  delivery = IOSAPIC_FIXED;
892  mask = 1;
893  break;
894  default:
895  printk(KERN_ERR "%s: invalid int type 0x%x\n", __func__,
896  int_type);
897  return -1;
898  }
899 
900  register_intr(gsi, irq, delivery, polarity, trigger);
901 
903  "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x)"
904  " vector %d\n",
905  int_type < ARRAY_SIZE(name) ? name[int_type] : "unknown",
906  int_type, gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
907  (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
908  cpu_logical_id(dest), dest, vector);
909 
910  set_rte(gsi, irq, dest, mask);
911  return vector;
912 }
913 
914 /*
915  * ACPI calls this when it finds an entry for a legacy ISA IRQ override.
916  */
917 void __devinit
918 iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
919  unsigned long polarity,
920  unsigned long trigger)
921 {
922  int vector, irq;
923  unsigned int dest = cpu_physical_id(smp_processor_id());
924  unsigned char dmode;
925 
926  irq = vector = isa_irq_to_vector(isa_irq);
927  BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
928  dmode = choose_dmode();
929  register_intr(gsi, irq, dmode, polarity, trigger);
930 
931  DBG("ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d\n",
932  isa_irq, gsi, trigger == IOSAPIC_EDGE ? "edge" : "level",
933  polarity == IOSAPIC_POL_HIGH ? "high" : "low",
934  cpu_logical_id(dest), dest, vector);
935 
936  set_rte(gsi, irq, dest, 1);
937 }
938 
939 void __init
941 {
942  if (pcat_compat) {
943  /*
944  * Disable the compatibility mode interrupts (8259 style),
945  * needs IN/OUT support enabled.
946  */
948  "%s: Disabling PC-AT compatible 8259 interrupts\n",
949  __func__);
950  outb(0xff, 0xA1);
951  outb(0xff, 0x21);
952  }
953 }
954 
955 void __init
956 iosapic_system_init (int system_pcat_compat)
957 {
958  int irq;
959 
960  for (irq = 0; irq < NR_IRQS; ++irq) {
961  iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
962  /* mark as unused */
963  INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
964 
965  iosapic_intr_info[irq].count = 0;
966  }
967 
968  pcat_compat = system_pcat_compat;
969  if (pcat_compat)
970  iosapic_pcat_compat_init();
971 }
972 
973 static inline int
974 iosapic_alloc (void)
975 {
976  int index;
977 
978  for (index = 0; index < NR_IOSAPICS; index++)
979  if (!iosapic_lists[index].addr)
980  return index;
981 
982  printk(KERN_WARNING "%s: failed to allocate iosapic\n", __func__);
983  return -1;
984 }
985 
986 static inline void
987 iosapic_free (int index)
988 {
989  memset(&iosapic_lists[index], 0, sizeof(iosapic_lists[0]));
990 }
991 
992 static inline int
993 iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver)
994 {
995  int index;
996  unsigned int gsi_end, base, end;
997 
998  /* check gsi range */
999  gsi_end = gsi_base + ((ver >> 16) & 0xff);
1000  for (index = 0; index < NR_IOSAPICS; index++) {
1001  if (!iosapic_lists[index].addr)
1002  continue;
1003 
1004  base = iosapic_lists[index].gsi_base;
1005  end = base + iosapic_lists[index].num_rte - 1;
1006 
1007  if (gsi_end < base || end < gsi_base)
1008  continue; /* OK */
1009 
1010  return -EBUSY;
1011  }
1012  return 0;
1013 }
1014 
1015 int __devinit
1016 iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
1017 {
1018  int num_rte, err, index;
1019  unsigned int isa_irq, ver;
1020  char __iomem *addr;
1021  unsigned long flags;
1022 
1023  spin_lock_irqsave(&iosapic_lock, flags);
1024  index = find_iosapic(gsi_base);
1025  if (index >= 0) {
1026  spin_unlock_irqrestore(&iosapic_lock, flags);
1027  return -EBUSY;
1028  }
1029 
1030  addr = ioremap(phys_addr, 0);
1031  if (addr == NULL) {
1032  spin_unlock_irqrestore(&iosapic_lock, flags);
1033  return -ENOMEM;
1034  }
1035  ver = iosapic_version(addr);
1036  if ((err = iosapic_check_gsi_range(gsi_base, ver))) {
1037  iounmap(addr);
1038  spin_unlock_irqrestore(&iosapic_lock, flags);
1039  return err;
1040  }
1041 
1042  /*
1043  * The MAX_REDIR register holds the highest input pin number
1044  * (starting from 0). We add 1 so that we can use it for
1045  * number of pins (= RTEs)
1046  */
1047  num_rte = ((ver >> 16) & 0xff) + 1;
1048 
1049  index = iosapic_alloc();
1050  iosapic_lists[index].addr = addr;
1051  iosapic_lists[index].gsi_base = gsi_base;
1052  iosapic_lists[index].num_rte = num_rte;
1053 #ifdef CONFIG_NUMA
1054  iosapic_lists[index].node = MAX_NUMNODES;
1055 #endif
1056  spin_lock_init(&iosapic_lists[index].lock);
1057  spin_unlock_irqrestore(&iosapic_lock, flags);
1058 
1059  if ((gsi_base == 0) && pcat_compat) {
1060  /*
1061  * Map the legacy ISA devices into the IOSAPIC data. Some of
1062  * these may get reprogrammed later on with data from the ACPI
1063  * Interrupt Source Override table.
1064  */
1065  for (isa_irq = 0; isa_irq < 16; ++isa_irq)
1066  iosapic_override_isa_irq(isa_irq, isa_irq,
1068  IOSAPIC_EDGE);
1069  }
1070  return 0;
1071 }
1072 
1073 #ifdef CONFIG_HOTPLUG
1074 int
1075 iosapic_remove (unsigned int gsi_base)
1076 {
1077  int index, err = 0;
1078  unsigned long flags;
1079 
1080  spin_lock_irqsave(&iosapic_lock, flags);
1081  index = find_iosapic(gsi_base);
1082  if (index < 0) {
1083  printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n",
1084  __func__, gsi_base);
1085  goto out;
1086  }
1087 
1088  if (iosapic_lists[index].rtes_inuse) {
1089  err = -EBUSY;
1090  printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n",
1091  __func__, gsi_base);
1092  goto out;
1093  }
1094 
1095  iounmap(iosapic_lists[index].addr);
1096  iosapic_free(index);
1097  out:
1098  spin_unlock_irqrestore(&iosapic_lock, flags);
1099  return err;
1100 }
1101 #endif /* CONFIG_HOTPLUG */
1102 
1103 #ifdef CONFIG_NUMA
1104 void __devinit
1105 map_iosapic_to_node(unsigned int gsi_base, int node)
1106 {
1107  int index;
1108 
1109  index = find_iosapic(gsi_base);
1110  if (index < 0) {
1111  printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
1112  __func__, gsi_base);
1113  return;
1114  }
1115  iosapic_lists[index].node = node;
1116  return;
1117 }
1118 #endif