24 #include <linux/linkage.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
30 #include <linux/slab.h>
31 #include <linux/irqnr.h>
32 #include <linux/pci.h>
36 #include <asm/ptrace.h>
40 #include <asm/xen/page.h>
43 #include <asm/sync_bitops.h>
44 #include <asm/xen/hypercall.h>
45 #include <asm/xen/hypervisor.h>
49 #include <xen/xen-ops.h>
50 #include <xen/events.h>
57 #include <asm/hw_irq.h>
114 #define PIRQ_NEEDS_EOI (1 << 0)
115 #define PIRQ_SHAREABLE (1 << 1)
117 static int *evtchn_to_irq;
119 static unsigned long *pirq_eoi_map;
121 static bool (*pirq_needs_eoi)(
unsigned irq);
127 #define VALID_EVTCHN(chn) ((chn) != 0)
129 static struct irq_chip xen_dynamic_chip;
130 static struct irq_chip xen_percpu_chip;
131 static struct irq_chip xen_pirq_chip;
138 return irq_get_handler_data(irq);
142 static void xen_irq_info_common_init(
struct irq_info *
info,
159 static void xen_irq_info_evtchn_init(
unsigned irq,
164 xen_irq_info_common_init(info, irq,
IRQT_EVTCHN, evtchn, 0);
167 static void xen_irq_info_ipi_init(
unsigned cpu,
174 xen_irq_info_common_init(info, irq,
IRQT_IPI, evtchn, 0);
181 static void xen_irq_info_virq_init(
unsigned cpu,
188 xen_irq_info_common_init(info, irq,
IRQT_VIRQ, evtchn, 0);
195 static void xen_irq_info_pirq_init(
unsigned irq,
205 xen_irq_info_common_init(info, irq,
IRQT_PIRQ, evtchn, 0);
217 static unsigned int evtchn_from_irq(
unsigned irq)
222 return info_for_irq(irq)->evtchn;
227 return evtchn_to_irq[
evtchn];
231 static enum ipi_vector ipi_from_irq(
unsigned irq)
241 static unsigned virq_from_irq(
unsigned irq)
251 static unsigned pirq_from_irq(
unsigned irq)
258 return info->
u.
pirq.pirq;
263 return info_for_irq(irq)->type;
266 static unsigned cpu_from_irq(
unsigned irq)
268 return info_for_irq(irq)->cpu;
271 static unsigned int cpu_from_evtchn(
unsigned int evtchn)
273 int irq = evtchn_to_irq[
evtchn];
277 ret = cpu_from_irq(irq);
283 static bool pirq_check_eoi_map(
unsigned irq)
285 return test_bit(pirq_from_irq(irq), pirq_eoi_map);
289 static bool pirq_needs_eoi_flag(
unsigned irq)
297 static inline unsigned long active_evtchns(
unsigned int cpu,
306 static void bind_evtchn_to_cpu(
unsigned int chn,
unsigned int cpu)
308 int irq = evtchn_to_irq[
chn];
318 info_for_irq(irq)->cpu =
cpu;
321 static void init_evtchn_cpu_bindings(
void)
345 static inline void set_evtchn(
int port)
351 static inline int test_evtchn(
int port)
368 int evtchn = evtchn_from_irq(irq);
371 notify_remote_via_evtchn(evtchn);
375 static void mask_evtchn(
int port)
381 static void unmask_evtchn(
int port)
389 if (
unlikely((cpu != cpu_from_evtchn(port))))
413 if (evtchn_pending &&
434 panic(
"Unable to allocate metadata for IRQ%d\n", irq);
449 #ifdef CONFIG_X86_IO_APIC
461 irq = irq_alloc_desc_from(first, -1);
480 return xen_allocate_irq_dynamic();
486 irq = irq_alloc_desc_at(gsi, -1);
493 static void xen_free_irq(
unsigned irq)
495 struct irq_info *info = irq_get_handler_data(irq);
512 static void pirq_query_unmask(
int irq)
515 struct irq_info *info = info_for_irq(irq);
519 irq_status.irq = pirq_from_irq(irq);
521 irq_status.flags = 0;
528 static bool probing_irq(
int irq)
537 int evtchn = evtchn_from_irq(data->
irq);
544 clear_evtchn(evtchn);
546 if (pirq_needs_eoi(data->
irq)) {
552 static void mask_ack_pirq(
struct irq_data *data)
554 disable_dynirq(data);
558 static unsigned int __startup_pirq(
unsigned int irq)
561 struct irq_info *info = info_for_irq(irq);
562 int evtchn = evtchn_from_irq(irq);
570 bind_pirq.pirq = pirq_from_irq(irq);
576 if (!probing_irq(irq))
581 evtchn = bind_pirq.port;
583 pirq_query_unmask(irq);
586 bind_evtchn_to_cpu(evtchn, 0);
590 unmask_evtchn(evtchn);
596 static unsigned int startup_pirq(
struct irq_data *data)
598 return __startup_pirq(data->
irq);
601 static void shutdown_pirq(
struct irq_data *data)
604 unsigned int irq = data->
irq;
605 struct irq_info *info = info_for_irq(irq);
606 int evtchn = evtchn_from_irq(irq);
619 bind_evtchn_to_cpu(evtchn, 0);
620 evtchn_to_irq[
evtchn] = -1;
624 static void enable_pirq(
struct irq_data *data)
629 static void disable_pirq(
struct irq_data *data)
631 disable_dynirq(data);
642 if (info->
u.
pirq.gsi == gsi)
661 unsigned pirq,
int shareable,
char *
name)
675 irq = xen_allocate_irq_gsi(gsi);
695 pirq_query_unmask(irq);
724 #ifdef CONFIG_PCI_MSI
734 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
736 return rc ? -1 : op_get_free_pirq.pirq;
747 irq = xen_allocate_irq_dynamic();
754 xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0);
772 struct irq_info *info = info_for_irq(irq);
817 if (info->
u.
pirq.pirq == pirq)
830 return pirq_from_irq(irq);
839 irq = evtchn_to_irq[
evtchn];
842 irq = xen_allocate_irq_dynamic();
849 xen_irq_info_evtchn_init(irq, evtchn);
851 struct irq_info *info = info_for_irq(irq);
863 static int bind_ipi_to_irq(
unsigned int ipi,
unsigned int cpu)
870 irq =
per_cpu(ipi_to_irq, cpu)[ipi];
873 irq = xen_allocate_irq_dynamic();
884 evtchn = bind_ipi.port;
886 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
888 bind_evtchn_to_cpu(evtchn, cpu);
890 struct irq_info *info = info_for_irq(irq);
899 static int bind_interdomain_evtchn_to_irq(
unsigned int remote_domain,
905 bind_interdomain.remote_dom = remote_domain;
914 static int find_virq(
unsigned int virq,
unsigned int cpu)
939 int evtchn, irq,
ret;
946 irq = xen_allocate_irq_dynamic();
958 evtchn = bind_virq.
port;
961 ret = find_virq(virq, cpu);
966 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
968 bind_evtchn_to_cpu(evtchn, cpu);
970 struct irq_info *info = info_for_irq(irq);
980 static void unbind_from_irq(
unsigned int irq)
983 int evtchn = evtchn_from_irq(irq);
984 struct irq_info *info = irq_get_handler_data(irq);
999 switch (type_from_irq(irq)) {
1001 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
1002 [virq_from_irq(irq)] = -1;
1005 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
1006 [ipi_from_irq(irq)] = -1;
1013 bind_evtchn_to_cpu(evtchn, 0);
1015 evtchn_to_irq[
evtchn] = -1;
1028 unsigned long irqflags,
1029 const char *devname,
void *
dev_id)
1036 retval =
request_irq(irq, handler, irqflags, devname, dev_id);
1038 unbind_from_irq(irq);
1049 unsigned long irqflags,
1050 const char *devname,
1055 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
1059 retval =
request_irq(irq, handler, irqflags, devname, dev_id);
1061 unbind_from_irq(irq);
1071 unsigned long irqflags,
const char *devname,
void *
dev_id)
1078 retval =
request_irq(irq, handler, irqflags, devname, dev_id);
1080 unbind_from_irq(irq);
1091 unsigned long irqflags,
1092 const char *devname,
1097 irq = bind_ipi_to_irq(ipi, cpu);
1102 retval =
request_irq(irq, handler, irqflags, devname, dev_id);
1104 unbind_from_irq(irq);
1114 unbind_from_irq(irq);
1120 int irq = evtchn_to_irq[
evtchn];
1126 info = irq_get_handler_data(irq);
1150 irq = evtchn_to_irq[
evtchn];
1154 info = irq_get_handler_data(irq);
1174 int irq = evtchn_to_irq[
evtchn];
1177 unbind_from_irq(irq);
1192 unsigned long *cpu_evtchn =
per_cpu(cpu_evtchn_mask, cpu);
1194 unsigned long flags;
1196 struct vcpu_info *
v;
1200 printk(
"\nvcpu %d\n ", cpu);
1208 printk(
"%d: masked=%d pending=%d event_sel %0*lx\n ", i,
1219 i % 8 == 0 ?
"\n " :
" ");
1220 printk(
"\nglobal mask:\n ");
1225 i % 8 == 0 ?
"\n " :
" ");
1227 printk(
"\nglobally unmasked:\n ");
1231 i % 8 == 0 ?
"\n " :
" ");
1233 printk(
"\nlocal cpu%d mask:\n ", cpu);
1235 printk(
"%0*lx%s", (
int)(
sizeof(cpu_evtchn[0])*2),
1237 i % 8 == 0 ?
"\n " :
" ");
1239 printk(
"\nlocally unmasked:\n ");
1245 pending, i % 8 == 0 ?
"\n " :
" ");
1248 printk(
"\npending list:\n");
1252 printk(
" %d: event %d -> irq %d%s%s%s\n",
1253 cpu_from_evtchn(i), i,
1258 ?
"" :
" globally-masked",
1260 ?
"" :
" locally-masked");
1264 spin_unlock_irqrestore(&debug_lock, flags);
1276 #define MASK_LSBS(w, i) (w & ((~0UL) << i))
1287 static void __xen_evtchn_do_upcall(
void)
1289 int start_word_idx, start_bit_idx;
1290 int word_idx, bit_idx;
1298 unsigned long pending_words;
1314 word_idx = start_word_idx;
1316 for (i = 0; pending_words != 0; i++) {
1317 unsigned long pending_bits;
1318 unsigned long words;
1320 words =
MASK_LSBS(pending_words, word_idx);
1330 word_idx =
__ffs(words);
1332 pending_bits = active_evtchns(cpu, s, word_idx);
1334 if (word_idx == start_word_idx) {
1338 bit_idx = start_bit_idx;
1341 bit_idx &= (1
UL << start_bit_idx) - 1;
1349 bits =
MASK_LSBS(pending_bits, bit_idx);
1355 bit_idx =
__ffs(bits);
1359 irq = evtchn_to_irq[
port];
1364 generic_handle_irq_desc(irq, desc);
1371 bit_idx ? word_idx :
1374 }
while (bit_idx != 0);
1377 if ((word_idx != start_word_idx) || (i != 0))
1378 pending_words &= ~(1
UL << word_idx);
1396 struct pt_regs *old_regs = set_irq_regs(regs);
1403 __xen_evtchn_do_upcall();
1406 set_irq_regs(old_regs);
1411 __xen_evtchn_do_upcall();
1418 struct irq_info *info = info_for_irq(irq);
1427 BUG_ON(evtchn_to_irq[evtchn] != -1);
1432 xen_irq_info_evtchn_init(irq, evtchn);
1444 static int rebind_irq_to_cpu(
unsigned irq,
unsigned tcpu)
1447 int evtchn = evtchn_from_irq(irq);
1460 bind_vcpu.port = evtchn;
1461 bind_vcpu.vcpu = tcpu;
1469 bind_evtchn_to_cpu(evtchn, tcpu);
1477 unsigned tcpu = cpumask_first(dest);
1479 return rebind_irq_to_cpu(data->
irq, tcpu);
1484 int masked, evtchn = evtchn_from_irq(irq);
1493 unmask_evtchn(evtchn);
1498 static void enable_dynirq(
struct irq_data *data)
1500 int evtchn = evtchn_from_irq(data->
irq);
1503 unmask_evtchn(evtchn);
1506 static void disable_dynirq(
struct irq_data *data)
1508 int evtchn = evtchn_from_irq(data->
irq);
1511 mask_evtchn(evtchn);
1514 static void ack_dynirq(
struct irq_data *data)
1516 int evtchn = evtchn_from_irq(data->
irq);
1521 clear_evtchn(evtchn);
1524 static void mask_ack_dynirq(
struct irq_data *data)
1526 disable_dynirq(data);
1530 static int retrigger_dynirq(
struct irq_data *data)
1532 int evtchn = evtchn_from_irq(data->
irq);
1542 unmask_evtchn(evtchn);
1549 static void restore_pirqs(
void)
1551 int pirq,
rc, irq,
gsi;
1559 pirq = info->
u.
pirq.pirq;
1560 gsi = info->
u.
pirq.gsi;
1570 map_irq.index =
gsi;
1571 map_irq.pirq =
pirq;
1576 gsi, irq, pirq, rc);
1583 __startup_pirq(irq);
1587 static void restore_cpu_virqs(
unsigned int cpu)
1590 int virq, irq, evtchn;
1592 for (virq = 0; virq <
NR_VIRQS; virq++) {
1593 if ((irq =
per_cpu(virq_to_irq, cpu)[virq]) == -1)
1596 BUG_ON(virq_from_irq(irq) != virq);
1599 bind_virq.virq =
virq;
1600 bind_virq.vcpu =
cpu;
1604 evtchn = bind_virq.port;
1607 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
1608 bind_evtchn_to_cpu(evtchn, cpu);
1612 static void restore_cpu_ipis(
unsigned int cpu)
1615 int ipi, irq, evtchn;
1618 if ((irq =
per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1621 BUG_ON(ipi_from_irq(irq) != ipi);
1624 bind_ipi.vcpu =
cpu;
1628 evtchn = bind_ipi.port;
1631 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
1632 bind_evtchn_to_cpu(evtchn, cpu);
1639 int evtchn = evtchn_from_irq(irq);
1642 clear_evtchn(evtchn);
1647 int evtchn = evtchn_from_irq(irq);
1655 int evtchn = evtchn_from_irq(irq);
1659 ret = test_evtchn(evtchn);
1692 struct irq_info *info = info_for_irq(irq);
1703 unsigned int cpu, evtchn;
1706 init_evtchn_cpu_bindings();
1710 mask_evtchn(evtchn);
1717 evtchn_to_irq[evtchn] = -1;
1720 restore_cpu_virqs(cpu);
1721 restore_cpu_ipis(cpu);
1730 .irq_disable = disable_dynirq,
1731 .irq_mask = disable_dynirq,
1732 .irq_unmask = enable_dynirq,
1734 .irq_ack = ack_dynirq,
1735 .irq_mask_ack = mask_ack_dynirq,
1737 .irq_set_affinity = set_affinity_irq,
1738 .irq_retrigger = retrigger_dynirq,
1741 static struct irq_chip xen_pirq_chip __read_mostly = {
1744 .irq_startup = startup_pirq,
1745 .irq_shutdown = shutdown_pirq,
1746 .irq_enable = enable_pirq,
1747 .irq_disable = disable_pirq,
1749 .irq_mask = disable_dynirq,
1750 .irq_unmask = enable_dynirq,
1752 .irq_ack = eoi_pirq,
1753 .irq_eoi = eoi_pirq,
1754 .irq_mask_ack = mask_ack_pirq,
1756 .irq_set_affinity = set_affinity_irq,
1758 .irq_retrigger = retrigger_dynirq,
1761 static struct irq_chip xen_percpu_chip __read_mostly = {
1762 .
name =
"xen-percpu",
1764 .irq_disable = disable_dynirq,
1765 .irq_mask = disable_dynirq,
1766 .irq_unmask = enable_dynirq,
1768 .irq_ack = ack_dynirq,
1781 #ifdef CONFIG_XEN_PVHVM
1817 evtchn_to_irq[i] = -1;
1819 init_evtchn_cpu_bindings();
1825 pirq_needs_eoi = pirq_needs_eoi_flag;
1840 pci_xen_initial_domain();
1846 free_page((
unsigned long) pirq_eoi_map);
1847 pirq_eoi_map =
NULL;
1849 pirq_needs_eoi = pirq_check_eoi_map;