14 #include <linux/kernel.h>
17 #include <linux/slab.h>
20 #include <linux/types.h>
48 #define to_wsp_ics(ics) container_of(ics, struct wsp_ics, ics)
50 #define INT_SRC_LAYER_BUID_REG(base) ((base) + 0x00)
51 #define IODA_TBL_ADDR_REG(base) ((base) + 0x18)
52 #define IODA_TBL_DATA_REG(base) ((base) + 0x20)
53 #define XIVE_UPDATE_REG(base) ((base) + 0x28)
54 #define ICS_INT_CAPS_REG(base) ((base) + 0x30)
56 #define TBL_AUTO_INCREMENT ((1UL << 63) | (1UL << 15))
57 #define TBL_SELECT_XIST (1UL << 48)
58 #define TBL_SELECT_XIVT (1UL << 49)
60 #define IODA_IRQ(irq) ((irq) & (0x7FFULL))
62 #define XIST_REQUIRED 0x8
63 #define XIST_REJECTED 0x4
64 #define XIST_PRESENTED 0x2
65 #define XIST_PENDING 0x1
67 #define XIVE_SERVER_SHIFT 42
68 #define XIVE_SERVER_MASK 0xFFFFULL
69 #define XIVE_PRIORITY_MASK 0xFFULL
70 #define XIVE_PRIORITY_SHIFT 32
71 #define XIVE_WRITE_ENABLE (1ULL << 63)
79 #define WSP_ICS_CHIP_SHIFT 17
82 static struct wsp_ics *ics_list;
87 static u64 wsp_ics_get_xive(
struct wsp_ics *
ics,
unsigned int irq)
95 spin_unlock_irqrestore(&ics->
lock, flags);
100 static void wsp_ics_set_xive(
struct wsp_ics *
ics,
unsigned int irq,
u64 xive)
109 static u64 xive_set_server(
u64 xive,
unsigned int server)
155 cpumask_set_cpu(cpu, ret);
162 static int cache_hwirq_map(
struct wsp_ics *ics,
unsigned int hwirq,
172 if (!ics->hwirq_cpu_map)
175 if (!distribute_irqs) {
191 cpumask_and(avail, cpu_online_mask, affinity);
193 cpumask_copy(avail, cpu_online_mask);
196 cpus_on_chip(nodeid, avail, newmask);
199 if (
unlikely(cpumask_empty(newmask))) {
200 if (
unlikely(cpumask_empty(avail))) {
204 cpumask_copy(newmask, avail);
208 target = hwirq % cpumask_weight(newmask);
210 if (cpu_rover++ >=
target) {
211 ics->hwirq_cpu_map[
index] = get_hard_smp_processor_id(cpu);
221 free_cpumask_var(newmask);
223 free_cpumask_var(avail);
226 ics->hwirq_cpu_map[
index] = cpumask_first(cpu_online_mask);
227 pr_warning(
"Error, falling hwirq 0x%x routing back to CPU %i\n",
228 hwirq, ics->hwirq_cpu_map[index]);
233 static void alloc_irq_map(
struct wsp_ics *ics)
238 if (!ics->hwirq_cpu_map) {
240 "IRQ balancing disabled\n");
244 for (i=0; i < ics->
count; i++)
248 static int get_irq_server(
struct wsp_ics *ics,
unsigned int hwirq)
254 if (!ics->hwirq_cpu_map)
257 return ics->hwirq_cpu_map[
index];
260 static int cache_hwirq_map(
struct wsp_ics *ics,
unsigned int hwirq,
266 static int get_irq_server(
struct wsp_ics *ics,
unsigned int hwirq)
271 static void alloc_irq_map(
struct wsp_ics *ics) { }
274 static void wsp_chip_unmask_irq(
struct irq_data *
d)
276 unsigned int hw_irq = (
unsigned int)irqd_to_hwirq(d);
288 server = get_irq_server(ics, hw_irq);
290 xive = wsp_ics_get_xive(ics, hw_irq);
291 xive = xive_set_server(xive, server);
293 wsp_ics_set_xive(ics, hw_irq, xive);
296 static unsigned int wsp_chip_startup(
struct irq_data *d)
299 wsp_chip_unmask_irq(d);
303 static void wsp_mask_real_irq(
unsigned int hw_irq,
struct wsp_ics *ics)
312 xive = wsp_ics_get_xive(ics, hw_irq);
315 wsp_ics_set_xive(ics, hw_irq, xive);
318 static void wsp_chip_mask_irq(
struct irq_data *d)
320 unsigned int hw_irq = (
unsigned int)irqd_to_hwirq(d);
326 wsp_mask_real_irq(hw_irq, ics);
329 static int wsp_chip_set_affinity(
struct irq_data *d,
332 unsigned int hw_irq = (
unsigned int)irqd_to_hwirq(d);
343 xive = wsp_ics_get_xive(ics, hw_irq);
349 ret = cache_hwirq_map(ics, hw_irq, cpumask);
352 cpumask_scnprintf(cpulist,
sizeof(cpulist), cpumask);
353 pr_warning(
"%s: No online cpus in the mask %s for irq %d\n",
354 __func__, cpulist, d->
irq);
356 }
else if (ret == -
ENOMEM) {
361 xive = xive_set_server(xive, get_irq_server(ics, hw_irq));
362 wsp_ics_set_xive(ics, hw_irq, xive);
367 static struct irq_chip wsp_irq_chip = {
369 .irq_startup = wsp_chip_startup,
370 .irq_mask = wsp_chip_mask_irq,
371 .irq_unmask = wsp_chip_unmask_irq,
372 .irq_set_affinity = wsp_chip_set_affinity
375 static int wsp_ics_host_match(
struct ics *ics,
struct device_node *dn)
382 static int wsp_ics_match_hwirq(
struct wsp_ics *
wsp_ics,
unsigned int hwirq)
385 hwirq < wsp_ics->hwirq_start + wsp_ics->
count)
391 static int wsp_ics_map(
struct ics *ics,
unsigned int virq)
397 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
406 spin_unlock_irqrestore(&wsp_ics->
lock, flags);
411 static void wsp_ics_mask_unknown(
struct ics *ics,
unsigned long hw_irq)
415 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
418 pr_err(
"%s: IRQ %lu (real) is invalid, disabling it.\n", __func__, hw_irq);
419 wsp_mask_real_irq(hw_irq, wsp_ics);
422 static long wsp_ics_get_server(
struct ics *ics,
unsigned long hw_irq)
426 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
429 return get_irq_server(wsp_ics, hw_irq);
434 static struct wsp_ics *wsp_ics_find_dn_ics(
struct device_node *dn)
441 pr_err(
"wsp_ics: Failed to find interrupt parent!\n");
445 for(i = 0; i < num_ics; i++) {
446 if(ics_list[i].dn == iparent)
451 pr_err(
"wsp_ics: Unable to find parent bitmap!\n");
463 ics = wsp_ics_find_dn_ics(dn);
468 order = get_count_order(num);
470 spin_lock_irq(&ics->
lock);
472 spin_unlock_irq(&ics->
lock);
484 ics = wsp_ics_find_dn_ics(dn);
488 spin_lock_irq(&ics->
lock);
490 spin_unlock_irq(&ics->
lock);
495 static int __init wsp_ics_bitmap_setup(
struct wsp_ics *ics,
505 pr_err(
"wsp_ics: ENOMEM allocating IRQ bitmap!\n");
514 pr_err(
"wsp_ics: No available-ranges defined for %s\n",
519 if (len % (2 *
sizeof(
u32)) != 0) {
521 pr_err(
"wsp_ics: Invalid available-ranges for %s\n",
528 for (i = 0; i < len /
sizeof(
u32); i += 2) {
529 start = of_read_number(p + i, 1);
530 count = of_read_number(p + i + 1, 1);
532 pr_devel(
"%s: start: %d count: %d\n", __func__, start, count);
536 pr_err(
"wsp_ics: Invalid range! -> %d to %d\n",
537 start, start + count);
541 for (j = 0; j <
count; j++)
555 u32 lsi_buid, msi_buid, msi_base, msi_count;
562 if (!p || len < (2 *
sizeof(
u32))) {
563 pr_err(
"wsp_ics: No/bad interrupt-ranges found on %s\n",
568 if (len > (2 *
sizeof(
u32))) {
569 pr_err(
"wsp_ics: Multiple ics ranges not supported.\n");
580 ics->
count = of_read_number(p + 1, 1);
591 msi_count = (caps >> 44) & 0x7ff;
597 lsi_buid = (buid >> 48) & 0x1ff;
599 msi_buid = (buid >> 37) & 0x7;
603 pr_info(
"wsp_ics: irq range : 0x%06llx..0x%06llx\n",
605 pr_info(
"wsp_ics: %4d LSIs : 0x%06x..0x%06x\n",
608 pr_info(
"wsp_ics: %4d MSIs : 0x%06x..0x%06x\n",
610 msi_base + msi_count - 1);
615 pr_warning(
"wsp_ics: WARNING ! LSIs out of interrupt-ranges !\n");
616 if (msi_base < ics->hwirq_start ||
618 pr_warning(
"wsp_ics: WARNING ! MSIs out of interrupt-ranges !\n");
624 rc = wsp_ics_bitmap_setup(ics, dn);
630 ics->
dn = of_node_get(dn);
633 for(i = 0; i < ics->
count; i++)
636 ics->
ics.map = wsp_ics_map;
637 ics->
ics.mask_unknown = wsp_ics_mask_unknown;
638 ics->
ics.get_server = wsp_ics_get_server;
639 ics->
ics.host_match = wsp_ics_host_match;
646 static void __init wsp_ics_set_default_server(
void)
663 static int __init wsp_ics_init(
void)
669 wsp_ics_set_default_server();
672 for_each_compatible_node(dn,
NULL,
"ibm,ppc-xics")
676 pr_err(
"wsp_ics: No ICS's found!\n");
682 pr_err(
"wsp_ics: No memory for structs.\n");
688 for_each_compatible_node(dn,
NULL,
"ibm,wsp-xics") {
689 rc = wsp_ics_setup(ics, dn);
696 if (found != num_ics) {
697 pr_err(
"wsp_ics: Failed setting up %d ICS's\n",
714 #ifdef CONFIG_PCI_MSI
715 static void wsp_ics_msi_unmask_irq(
struct irq_data *d)
717 wsp_chip_unmask_irq(d);
721 static unsigned int wsp_ics_msi_startup(
struct irq_data *d)
723 wsp_ics_msi_unmask_irq(d);
727 static void wsp_ics_msi_mask_irq(
struct irq_data *d)
730 wsp_chip_mask_irq(d);
742 static struct irq_chip wsp_ics_msi = {
743 .
name =
"WSP ICS MSI",
744 .irq_startup = wsp_ics_msi_startup,
745 .irq_mask = wsp_ics_msi_mask_irq,
746 .irq_unmask = wsp_ics_msi_unmask_irq,
747 .irq_eoi = wsp_ics_eoi,
748 .irq_set_affinity = wsp_chip_set_affinity
751 void wsp_ics_set_msi_chip(
unsigned int irq)
756 void wsp_ics_set_std_chip(
unsigned int irq)