11 #include <linux/bitops.h>
13 #include <linux/slab.h>
44 #define MIPS_CORE_IRQ_LINES 8
48 static void octeon_irq_set_ciu_mapping(
int irq,
int line,
int bit,
int gpio_line,
54 irq_set_chip_and_handler(irq, chip, handler);
62 octeon_irq_ciu_to_irq[
line][
bit] = irq;
65 static void octeon_irq_force_ciu_mapping(
struct irq_domain *domain,
68 irq_domain_associate(domain, irq, line << 6 | bit);
71 static int octeon_coreid_for_cpu(
int cpu)
76 return cvmx_get_core_num();
80 static int octeon_cpu_for_coreid(
int coreid)
92 unsigned int bit = cd->
bit;
99 clear_c0_status(0x100 << bit);
102 clear_c0_cause(0x100 << bit);
105 static void octeon_irq_core_eoi(
struct irq_data *data)
114 set_c0_status(0x100 << cd->
bit);
117 static void octeon_irq_core_set_enable_local(
void *
arg)
121 unsigned int mask = 0x100 << cd->
bit;
129 clear_c0_status(mask);
133 static void octeon_irq_core_disable(
struct irq_data *data)
139 static void octeon_irq_core_enable(
struct irq_data *data)
145 static void octeon_irq_core_bus_lock(
struct irq_data *data)
152 static void octeon_irq_core_bus_sync_unlock(
struct irq_data *data)
157 on_each_cpu(octeon_irq_core_set_enable_local, data, 1);
165 static struct irq_chip octeon_irq_chip_core = {
167 .irq_enable = octeon_irq_core_enable,
168 .irq_disable = octeon_irq_core_disable,
169 .irq_ack = octeon_irq_core_ack,
170 .irq_eoi = octeon_irq_core_eoi,
171 .irq_bus_lock = octeon_irq_core_bus_lock,
172 .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock,
174 .irq_cpu_online = octeon_irq_core_eoi,
175 .irq_cpu_offline = octeon_irq_core_ack,
179 static void __init octeon_irq_init_core(
void)
186 cd = &octeon_irq_core_chip_data[
i];
194 irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
199 static int next_cpu_for_irq(
struct irq_data *data)
209 cpu = cpumask_next(cpu, data->
affinity);
210 if (cpu >= nr_cpu_ids) {
217 }
else if (weight == 1) {
218 cpu = cpumask_first(data->
affinity);
228 static void octeon_irq_ciu_enable(
struct irq_data *data)
230 int cpu = next_cpu_for_irq(data);
231 int coreid = octeon_coreid_for_cpu(cpu);
237 cd.p = irq_data_get_irq_chip_data(data);
240 if (cd.s.line == 0) {
241 pen = &
per_cpu(octeon_irq_ciu0_en_mirror, cpu);
250 pen = &
per_cpu(octeon_irq_ciu1_en_mirror, cpu);
262 static void octeon_irq_ciu_enable_local(
struct irq_data *data)
269 cd.p = irq_data_get_irq_chip_data(data);
272 if (cd.s.line == 0) {
294 static void octeon_irq_ciu_disable_local(
struct irq_data *data)
301 cd.p = irq_data_get_irq_chip_data(data);
304 if (cd.s.line == 0) {
326 static void octeon_irq_ciu_disable_all(
struct irq_data *data)
334 cd.p = irq_data_get_irq_chip_data(data);
337 int coreid = octeon_coreid_for_cpu(cpu);
338 lock = &
per_cpu(octeon_irq_ciu_spinlock, cpu);
340 pen = &
per_cpu(octeon_irq_ciu0_en_mirror, cpu);
342 pen = &
per_cpu(octeon_irq_ciu1_en_mirror, cpu);
359 static void octeon_irq_ciu_enable_all(
struct irq_data *data)
367 cd.p = irq_data_get_irq_chip_data(data);
370 int coreid = octeon_coreid_for_cpu(cpu);
371 lock = &
per_cpu(octeon_irq_ciu_spinlock, cpu);
373 pen = &
per_cpu(octeon_irq_ciu0_en_mirror, cpu);
375 pen = &
per_cpu(octeon_irq_ciu1_en_mirror, cpu);
396 static void octeon_irq_ciu_enable_v2(
struct irq_data *data)
399 int cpu = next_cpu_for_irq(data);
402 cd.p = irq_data_get_irq_chip_data(data);
403 mask = 1ull << (cd.s.
bit);
409 if (cd.s.line == 0) {
410 int index = octeon_coreid_for_cpu(cpu) * 2;
414 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
424 static void octeon_irq_ciu_enable_local_v2(
struct irq_data *data)
429 cd.p = irq_data_get_irq_chip_data(data);
430 mask = 1ull << (cd.s.
bit);
432 if (cd.s.line == 0) {
433 int index = cvmx_get_core_num() * 2;
437 int index = cvmx_get_core_num() * 2 + 1;
443 static void octeon_irq_ciu_disable_local_v2(
struct irq_data *data)
448 cd.p = irq_data_get_irq_chip_data(data);
449 mask = 1ull << (cd.s.
bit);
451 if (cd.s.line == 0) {
452 int index = cvmx_get_core_num() * 2;
456 int index = cvmx_get_core_num() * 2 + 1;
465 static void octeon_irq_ciu_ack(
struct irq_data *data)
470 cd.p = irq_data_get_irq_chip_data(data);
471 mask = 1ull << (cd.s.
bit);
473 if (cd.s.line == 0) {
474 int index = cvmx_get_core_num() * 2;
485 static void octeon_irq_ciu_disable_all_v2(
struct irq_data *data)
491 cd.p = irq_data_get_irq_chip_data(data);
492 mask = 1ull << (cd.s.
bit);
494 if (cd.s.line == 0) {
496 int index = octeon_coreid_for_cpu(cpu) * 2;
502 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
513 static void octeon_irq_ciu_enable_all_v2(
struct irq_data *data)
519 cd.p = irq_data_get_irq_chip_data(data);
520 mask = 1ull << (cd.s.
bit);
522 if (cd.s.line == 0) {
524 int index = octeon_coreid_for_cpu(cpu) * 2;
530 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
537 static void octeon_irq_gpio_setup(
struct irq_data *data)
541 u32 t = irqd_get_trigger_type(data);
543 cd.p = irq_data_get_irq_chip_data(data);
557 static void octeon_irq_ciu_enable_gpio_v2(
struct irq_data *data)
559 octeon_irq_gpio_setup(data);
560 octeon_irq_ciu_enable_v2(data);
563 static void octeon_irq_ciu_enable_gpio(
struct irq_data *data)
565 octeon_irq_gpio_setup(data);
566 octeon_irq_ciu_enable(data);
569 static int octeon_irq_ciu_gpio_set_type(
struct irq_data *data,
unsigned int t)
571 irqd_set_trigger_type(data, t);
572 octeon_irq_gpio_setup(data);
577 static void octeon_irq_ciu_disable_gpio_v2(
struct irq_data *data)
581 cd.p = irq_data_get_irq_chip_data(data);
584 octeon_irq_ciu_disable_all_v2(data);
587 static void octeon_irq_ciu_disable_gpio(
struct irq_data *data)
591 cd.p = irq_data_get_irq_chip_data(data);
594 octeon_irq_ciu_disable_all(data);
597 static void octeon_irq_ciu_gpio_ack(
struct irq_data *data)
602 cd.p = irq_data_get_irq_chip_data(data);
603 mask = 1ull << (cd.s.gpio_line);
608 static void octeon_irq_handle_gpio(
unsigned int irq,
struct irq_desc *
desc)
618 static void octeon_irq_cpu_offline_ciu(
struct irq_data *data)
626 if (cpumask_weight(data->
affinity) > 1) {
631 cpumask_copy(&new_affinity, data->
affinity);
632 cpumask_clear_cpu(cpu, &new_affinity);
635 cpumask_clear(&new_affinity);
636 cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
641 static int octeon_irq_ciu_set_affinity(
struct irq_data *data,
645 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
651 cd.p = irq_data_get_irq_chip_data(data);
658 if (cpumask_weight(dest) != 1)
666 int coreid = octeon_coreid_for_cpu(cpu);
668 lock = &
per_cpu(octeon_irq_ciu_spinlock, cpu);
672 pen = &
per_cpu(octeon_irq_ciu0_en_mirror, cpu);
674 pen = &
per_cpu(octeon_irq_ciu1_en_mirror, cpu);
702 static int octeon_irq_ciu_set_affinity_v2(
struct irq_data *data,
707 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
714 cd.p = irq_data_get_irq_chip_data(data);
715 mask = 1ull << cd.s.
bit;
717 if (cd.s.line == 0) {
719 unsigned long *pen = &
per_cpu(octeon_irq_ciu0_en_mirror, cpu);
720 int index = octeon_coreid_for_cpu(cpu) * 2;
732 unsigned long *pen = &
per_cpu(octeon_irq_ciu1_en_mirror, cpu);
733 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
751 static struct irq_chip octeon_irq_chip_ciu_v2 = {
753 .irq_enable = octeon_irq_ciu_enable_v2,
754 .irq_disable = octeon_irq_ciu_disable_all_v2,
755 .irq_ack = octeon_irq_ciu_ack,
756 .irq_mask = octeon_irq_ciu_disable_local_v2,
757 .irq_unmask = octeon_irq_ciu_enable_v2,
759 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
760 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
764 static struct irq_chip octeon_irq_chip_ciu = {
766 .irq_enable = octeon_irq_ciu_enable,
767 .irq_disable = octeon_irq_ciu_disable_all,
768 .irq_ack = octeon_irq_ciu_ack,
769 .irq_mask = octeon_irq_ciu_disable_local,
770 .irq_unmask = octeon_irq_ciu_enable,
772 .irq_set_affinity = octeon_irq_ciu_set_affinity,
773 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
778 static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = {
780 .irq_enable = octeon_irq_ciu_enable_all_v2,
781 .irq_disable = octeon_irq_ciu_disable_all_v2,
782 .irq_ack = octeon_irq_ciu_disable_local_v2,
783 .irq_eoi = octeon_irq_ciu_enable_local_v2,
785 .irq_cpu_online = octeon_irq_ciu_enable_local_v2,
786 .irq_cpu_offline = octeon_irq_ciu_disable_local_v2,
790 static struct irq_chip octeon_irq_chip_ciu_mbox = {
792 .irq_enable = octeon_irq_ciu_enable_all,
793 .irq_disable = octeon_irq_ciu_disable_all,
794 .irq_ack = octeon_irq_ciu_disable_local,
795 .irq_eoi = octeon_irq_ciu_enable_local,
797 .irq_cpu_online = octeon_irq_ciu_enable_local,
798 .irq_cpu_offline = octeon_irq_ciu_disable_local,
802 static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = {
804 .irq_enable = octeon_irq_ciu_enable_gpio_v2,
805 .irq_disable = octeon_irq_ciu_disable_gpio_v2,
806 .irq_ack = octeon_irq_ciu_gpio_ack,
807 .irq_mask = octeon_irq_ciu_disable_local_v2,
808 .irq_unmask = octeon_irq_ciu_enable_v2,
809 .irq_set_type = octeon_irq_ciu_gpio_set_type,
811 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
816 static struct irq_chip octeon_irq_chip_ciu_gpio = {
818 .irq_enable = octeon_irq_ciu_enable_gpio,
819 .irq_disable = octeon_irq_ciu_disable_gpio,
820 .irq_mask = octeon_irq_ciu_disable_local,
821 .irq_unmask = octeon_irq_ciu_enable,
822 .irq_ack = octeon_irq_ciu_gpio_ack,
823 .irq_set_type = octeon_irq_ciu_gpio_set_type,
825 .irq_set_affinity = octeon_irq_ciu_set_affinity,
834 static void octeon_irq_ciu_wd_enable(
struct irq_data *data)
839 int cpu = octeon_cpu_for_coreid(coreid);
843 pen = &
per_cpu(octeon_irq_ciu1_en_mirror, cpu);
858 static void octeon_irq_ciu1_wd_enable_v2(
struct irq_data *data)
861 int cpu = octeon_cpu_for_coreid(coreid);
868 static struct irq_chip octeon_irq_chip_ciu_wd_v2 = {
870 .irq_enable = octeon_irq_ciu1_wd_enable_v2,
871 .irq_disable = octeon_irq_ciu_disable_all_v2,
872 .irq_mask = octeon_irq_ciu_disable_local_v2,
873 .irq_unmask = octeon_irq_ciu_enable_local_v2,
876 static struct irq_chip octeon_irq_chip_ciu_wd = {
878 .irq_enable = octeon_irq_ciu_wd_enable,
879 .irq_disable = octeon_irq_ciu_disable_all,
880 .irq_mask = octeon_irq_ciu_disable_local,
881 .irq_unmask = octeon_irq_ciu_enable_local,
884 static bool octeon_irq_ciu_is_edge(
unsigned int line,
unsigned int bit)
914 static int octeon_irq_gpio_xlat(
struct irq_domain *
d,
917 unsigned int intsize,
918 unsigned long *out_hwirq,
919 unsigned int *out_type)
935 trigger = intspec[1];
951 pr_err(
"Error: (%s) Invalid irq trigger specification: %x\n",
963 static int octeon_irq_ciu_xlat(
struct irq_domain *
d,
966 unsigned int intsize,
967 unsigned long *out_hwirq,
968 unsigned int *out_type)
970 unsigned int ciu,
bit;
975 if (ciu > 1 || bit > 63)
979 if (ciu == 0 && bit >= 16 && bit < 32)
982 *out_hwirq = (ciu << 6) | bit;
988 static struct irq_chip *octeon_irq_ciu_chip;
989 static struct irq_chip *octeon_irq_gpio_chip;
991 static bool octeon_irq_virq_in_range(
unsigned int virq)
994 if (virq < (1ul << 8 *
sizeof(octeon_irq_ciu_to_irq[0][0])))
997 WARN_ONCE(
true,
"virq out of range %u.\n", virq);
1001 static int octeon_irq_ciu_map(
struct irq_domain *d,
1004 unsigned int line = hw >> 6;
1005 unsigned int bit = hw & 63;
1007 if (!octeon_irq_virq_in_range(virq))
1010 if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)
1013 if (octeon_irq_ciu_is_edge(line, bit))
1014 octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1015 octeon_irq_ciu_chip,
1018 octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1019 octeon_irq_ciu_chip,
1025 static int octeon_irq_gpio_map_common(
struct irq_domain *d,
1030 unsigned int line,
bit;
1032 if (!octeon_irq_virq_in_range(virq))
1038 if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0)
1041 octeon_irq_set_ciu_mapping(virq, line, bit, hw,
1042 chip, octeon_irq_handle_gpio);
1046 static int octeon_irq_gpio_map(
struct irq_domain *d,
1049 return octeon_irq_gpio_map_common(d, virq, hw, 1, octeon_irq_gpio_chip);
1053 .map = octeon_irq_ciu_map,
1054 .xlate = octeon_irq_ciu_xlat,
1058 .map = octeon_irq_gpio_map,
1059 .xlate = octeon_irq_gpio_xlat,
1062 static void octeon_irq_ip2_ciu(
void)
1064 const unsigned long core_id = cvmx_get_core_num();
1069 int bit = fls64(ciu_sum) - 1;
1070 int irq = octeon_irq_ciu_to_irq[0][
bit];
1080 static void octeon_irq_ip3_ciu(
void)
1086 int bit = fls64(ciu_sum) - 1;
1087 int irq = octeon_irq_ciu_to_irq[1][
bit];
1097 static bool octeon_irq_use_ip4;
1099 static void __cpuinit octeon_irq_local_enable_ip4(
void *arg)
1104 static void octeon_irq_ip4_mask(
void)
1110 static void (*octeon_irq_ip2)(
void);
1111 static void (*octeon_irq_ip3)(
void);
1112 static void (*octeon_irq_ip4)(
void);
1119 octeon_irq_use_ip4 =
true;
1123 static void __cpuinit octeon_irq_percpu_enable(
void)
1128 static void __cpuinit octeon_irq_init_ciu_percpu(
void)
1130 int coreid = cvmx_get_core_num();
1149 static void octeon_irq_init_ciu2_percpu(
void)
1152 int coreid = cvmx_get_core_num();
1163 for (regx = 0; regx <= 0x8000; regx += 0x1000) {
1164 for (ipx = 0; ipx <= 0x400; ipx += 0x200)
1165 cvmx_write_csr(base + regx + ipx, 0);
1171 static void __cpuinit octeon_irq_setup_secondary_ciu(
void)
1173 octeon_irq_init_ciu_percpu();
1174 octeon_irq_percpu_enable();
1181 static void octeon_irq_setup_secondary_ciu2(
void)
1183 octeon_irq_init_ciu2_percpu();
1184 octeon_irq_percpu_enable();
1188 if (octeon_irq_use_ip4)
1194 static void __init octeon_irq_init_ciu(
void)
1204 octeon_irq_init_ciu_percpu();
1207 octeon_irq_ip2 = octeon_irq_ip2_ciu;
1208 octeon_irq_ip3 = octeon_irq_ip3_ciu;
1213 chip = &octeon_irq_chip_ciu_v2;
1214 chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
1215 chip_wd = &octeon_irq_chip_ciu_wd_v2;
1216 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
1218 chip = &octeon_irq_chip_ciu;
1219 chip_mbox = &octeon_irq_chip_ciu_mbox;
1220 chip_wd = &octeon_irq_chip_ciu_wd;
1221 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio;
1223 octeon_irq_ciu_chip =
chip;
1224 octeon_irq_ip4 = octeon_irq_ip4_mask;
1227 octeon_irq_init_core();
1238 of_node_put(gpio_node);
1240 pr_warn(
"Cannot allocate memory for GPIO irq_domain.\n");
1242 pr_warn(
"Cannot find device node for cavium,octeon-3860-gpio.\n");
1248 of_node_put(ciu_node);
1250 panic(
"Cannot find device node for cavium,octeon-3860-ciu.");
1253 for (i = 0; i < 16; i++)
1259 for (i = 0; i < 4; i++)
1261 for (i = 0; i < 4; i++)
1265 for (i = 0; i < 4; i++)
1272 for (i = 0; i < 16; i++)
1286 static void octeon_irq_ciu2_wd_enable(
struct irq_data *data)
1293 cd.p = irq_data_get_irq_chip_data(data);
1294 mask = 1ull << (cd.s.
bit);
1297 cvmx_write_csr(en_addr, mask);
1301 static void octeon_irq_ciu2_enable(
struct irq_data *data)
1305 int cpu = next_cpu_for_irq(data);
1306 int coreid = octeon_coreid_for_cpu(cpu);
1309 cd.p = irq_data_get_irq_chip_data(data);
1310 mask = 1ull << (cd.s.
bit);
1313 cvmx_write_csr(en_addr, mask);
1316 static void octeon_irq_ciu2_enable_local(
struct irq_data *data)
1320 int coreid = cvmx_get_core_num();
1323 cd.p = irq_data_get_irq_chip_data(data);
1324 mask = 1ull << (cd.s.
bit);
1327 cvmx_write_csr(en_addr, mask);
1331 static void octeon_irq_ciu2_disable_local(
struct irq_data *data)
1335 int coreid = cvmx_get_core_num();
1338 cd.p = irq_data_get_irq_chip_data(data);
1339 mask = 1ull << (cd.s.
bit);
1342 cvmx_write_csr(en_addr, mask);
1346 static void octeon_irq_ciu2_ack(
struct irq_data *data)
1350 int coreid = cvmx_get_core_num();
1353 cd.p = irq_data_get_irq_chip_data(data);
1354 mask = 1ull << (cd.s.
bit);
1357 cvmx_write_csr(en_addr, mask);
1361 static void octeon_irq_ciu2_disable_all(
struct irq_data *data)
1367 cd.p = irq_data_get_irq_chip_data(data);
1368 mask = 1ull << (cd.s.
bit);
1372 cvmx_write_csr(en_addr, mask);
1376 static void octeon_irq_ciu2_mbox_enable_all(
struct irq_data *data)
1385 cvmx_write_csr(en_addr, mask);
1389 static void octeon_irq_ciu2_mbox_disable_all(
struct irq_data *data)
1398 cvmx_write_csr(en_addr, mask);
1402 static void octeon_irq_ciu2_mbox_enable_local(
struct irq_data *data)
1406 int coreid = cvmx_get_core_num();
1410 cvmx_write_csr(en_addr, mask);
1413 static void octeon_irq_ciu2_mbox_disable_local(
struct irq_data *data)
1417 int coreid = cvmx_get_core_num();
1421 cvmx_write_csr(en_addr, mask);
1425 static int octeon_irq_ciu2_set_affinity(
struct irq_data *data,
1426 const struct cpumask *dest,
bool force)
1429 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
1436 cd.p = irq_data_get_irq_chip_data(data);
1437 mask = 1ull << cd.s.
bit;
1447 cvmx_write_csr(en_addr, mask);
1454 static void octeon_irq_ciu2_enable_gpio(
struct irq_data *data)
1456 octeon_irq_gpio_setup(data);
1457 octeon_irq_ciu2_enable(data);
1460 static void octeon_irq_ciu2_disable_gpio(
struct irq_data *data)
1463 cd.p = irq_data_get_irq_chip_data(data);
1467 octeon_irq_ciu2_disable_all(data);
1470 static struct irq_chip octeon_irq_chip_ciu2 = {
1472 .irq_enable = octeon_irq_ciu2_enable,
1473 .irq_disable = octeon_irq_ciu2_disable_all,
1474 .irq_ack = octeon_irq_ciu2_ack,
1475 .irq_mask = octeon_irq_ciu2_disable_local,
1476 .irq_unmask = octeon_irq_ciu2_enable,
1478 .irq_set_affinity = octeon_irq_ciu2_set_affinity,
1479 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
1483 static struct irq_chip octeon_irq_chip_ciu2_mbox = {
1485 .irq_enable = octeon_irq_ciu2_mbox_enable_all,
1486 .irq_disable = octeon_irq_ciu2_mbox_disable_all,
1487 .irq_ack = octeon_irq_ciu2_mbox_disable_local,
1488 .irq_eoi = octeon_irq_ciu2_mbox_enable_local,
1490 .irq_cpu_online = octeon_irq_ciu2_mbox_enable_local,
1491 .irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local,
1495 static struct irq_chip octeon_irq_chip_ciu2_wd = {
1497 .irq_enable = octeon_irq_ciu2_wd_enable,
1498 .irq_disable = octeon_irq_ciu2_disable_all,
1499 .irq_mask = octeon_irq_ciu2_disable_local,
1500 .irq_unmask = octeon_irq_ciu2_enable_local,
1503 static struct irq_chip octeon_irq_chip_ciu2_gpio = {
1505 .irq_enable = octeon_irq_ciu2_enable_gpio,
1506 .irq_disable = octeon_irq_ciu2_disable_gpio,
1507 .irq_ack = octeon_irq_ciu_gpio_ack,
1508 .irq_mask = octeon_irq_ciu2_disable_local,
1509 .irq_unmask = octeon_irq_ciu2_enable,
1510 .irq_set_type = octeon_irq_ciu_gpio_set_type,
1512 .irq_set_affinity = octeon_irq_ciu2_set_affinity,
1513 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
1518 static int octeon_irq_ciu2_xlat(
struct irq_domain *d,
1521 unsigned int intsize,
1522 unsigned long *out_hwirq,
1523 unsigned int *out_type)
1525 unsigned int ciu,
bit;
1531 if (ciu > 6 || bit > 63)
1534 *out_hwirq = (ciu << 6) | bit;
1540 static bool octeon_irq_ciu2_is_edge(
unsigned int line,
unsigned int bit)
1566 static int octeon_irq_ciu2_map(
struct irq_domain *d,
1569 unsigned int line = hw >> 6;
1570 unsigned int bit = hw & 63;
1572 if (!octeon_irq_virq_in_range(virq))
1576 if (line > 6 || octeon_irq_ciu_to_irq[line][bit] != 0)
1579 if (octeon_irq_ciu2_is_edge(line, bit))
1580 octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1581 &octeon_irq_chip_ciu2,
1584 octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1585 &octeon_irq_chip_ciu2,
1590 static int octeon_irq_ciu2_gpio_map(
struct irq_domain *d,
1593 return octeon_irq_gpio_map_common(d, virq, hw, 7, &octeon_irq_chip_ciu2_gpio);
1597 .map = octeon_irq_ciu2_map,
1598 .xlate = octeon_irq_ciu2_xlat,
1602 .map = octeon_irq_ciu2_gpio_map,
1603 .xlate = octeon_irq_gpio_xlat,
1606 static void octeon_irq_ciu2(
void)
1612 const unsigned long core_id = cvmx_get_core_num();
1619 line = fls64(sum) - 1;
1621 src = cvmx_read_csr(src_reg);
1626 bit = fls64(src) - 1;
1627 irq = octeon_irq_ciu_to_irq[line][
bit];
1646 static void octeon_irq_ciu2_mbox(
void)
1650 const unsigned long core_id = cvmx_get_core_num();
1656 line = fls64(sum) - 1;
1673 static void __init octeon_irq_init_ciu2(
void)
1680 octeon_irq_init_ciu2_percpu();
1683 octeon_irq_ip2 = octeon_irq_ciu2;
1684 octeon_irq_ip3 = octeon_irq_ciu2_mbox;
1685 octeon_irq_ip4 = octeon_irq_ip4_mask;
1688 octeon_irq_init_core();
1699 of_node_put(gpio_node);
1701 pr_warn(
"Cannot allocate memory for GPIO irq_domain.\n");
1703 pr_warn(
"Cannot find device node for cavium,octeon-3860-gpio.\n");
1709 of_node_put(ciu_node);
1711 panic(
"Cannot find device node for cavium,octeon-6880-ciu2.");
1714 for (i = 0; i < 64; i++)
1717 for (i = 0; i < 32; i++)
1721 for (i = 0; i < 4; i++)
1726 for (i = 0; i < 4; i++)
1729 for (i = 0; i < 4; i++)
1746 cpumask_clear(irq_default_affinity);
1750 octeon_irq_init_ciu2();
1752 octeon_irq_init_ciu();
1757 unsigned long cop0_cause;
1758 unsigned long cop0_status;
1763 cop0_cause &= cop0_status;
1772 else if (
likely(cop0_cause))
1779 #ifdef CONFIG_HOTPLUG_CPU