15 #include <linux/slab.h>
16 #include <asm/sn/addrs.h>
17 #include <asm/sn/arch.h>
18 #include <asm/sn/intr.h>
44 (
u64) local_widget,
__pa(sn_irq_info), (
u64) req_irq,
45 (
u64) req_nasid, (
u64) req_slice);
73 (
u64) local_widget,
__pa(sn_irq_info),
74 (
u64) req_nasid, (
u64) req_slice, 0);
79 static unsigned int sn_startup_irq(
struct irq_data *
data)
96 static void sn_enable_irq(
struct irq_data *data)
102 static void sn_ack_irq(
struct irq_data *data)
105 unsigned int irq = data->
irq & 0xff;
110 __set_bit(irq, (
volatile void *)
pda->sn_in_service_ivecs);
126 struct sn_irq_info *new_irq_info;
143 status =
sn_intr_redirect(local_nasid, local_widget, sn_irq_info, nasid, slice);
145 new_irq_info = sn_irq_info;
153 new_irq_info =
kmemdup(sn_irq_info,
sizeof(
struct sn_irq_info),
155 if (new_irq_info ==
NULL)
160 unregister_intr_pda(new_irq_info);
164 new_irq_info, vector,
173 register_intr_pda(new_irq_info);
174 spin_lock(&sn_irq_info_lock);
175 list_replace_rcu(&sn_irq_info->
list, &new_irq_info->
list);
176 spin_unlock(&sn_irq_info_lock);
204 static int sn_set_affinity_irq(
struct irq_data *data,
208 unsigned int irq = data->
irq;
216 sn_irq_lh[irq],
list)
242 sn_unmask_irq(
struct irq_data *data)
248 .irq_startup = sn_startup_irq,
249 .irq_shutdown = sn_shutdown_irq,
250 .irq_enable = sn_enable_irq,
251 .irq_disable = sn_disable_irq,
252 .irq_ack = sn_ack_irq,
253 .irq_mask = sn_mask_irq,
254 .irq_unmask = sn_unmask_irq,
255 .irq_set_affinity = sn_set_affinity_irq
277 for (i = 0; i <
NR_IRQS; i++) {
283 static void register_intr_pda(
struct sn_irq_info *sn_irq_info)
285 int irq = sn_irq_info->
irq_irq;
288 if (
pdacpu(cpu)->sn_last_irq < irq) {
289 pdacpu(cpu)->sn_last_irq = irq;
292 if (
pdacpu(cpu)->sn_first_irq == 0 ||
pdacpu(cpu)->sn_first_irq > irq)
293 pdacpu(cpu)->sn_first_irq = irq;
296 static void unregister_intr_pda(
struct sn_irq_info *sn_irq_info)
298 int irq = sn_irq_info->
irq_irq;
300 struct sn_irq_info *tmp_irq_info;
304 if (
pdacpu(cpu)->sn_last_irq == irq) {
306 for (i =
pdacpu(cpu)->sn_last_irq - 1;
307 i && !foundmatch; i--) {
308 list_for_each_entry_rcu(tmp_irq_info,
320 if (
pdacpu(cpu)->sn_first_irq == irq) {
322 for (i =
pdacpu(cpu)->sn_first_irq + 1;
323 i <
NR_IRQS && !foundmatch; i++) {
324 list_for_each_entry_rcu(tmp_irq_info,
352 spin_lock(&sn_irq_info_lock);
353 list_add_rcu(&sn_irq_info->
list, sn_irq_lh[sn_irq_info->
irq_irq]);
357 spin_unlock(&sn_irq_info_lock);
359 register_intr_pda(sn_irq_info);
373 struct sn_irq_info *sn_irq_info;
387 unregister_intr_pda(sn_irq_info);
388 spin_lock(&sn_irq_info_lock);
389 list_del_rcu(&sn_irq_info->
list);
390 spin_unlock(&sn_irq_info_lock);
391 if (list_empty(sn_irq_lh[sn_irq_info->
irq_irq]))
399 sn_call_force_intr_provider(
struct sn_irq_info *sn_irq_info)
420 static void sn_check_intr(
int irq,
struct sn_irq_info *sn_irq_info)
434 pcidev_info = (
struct pcidev_info *)sn_irq_info->
irq_pciioinfo;
443 if (!ia64_get_irr(irq_to_vector(irq))) {
449 sn_call_force_intr_provider(sn_irq_info);
458 struct sn_irq_info *sn_irq_info;
465 for (i =
pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
466 list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i],
list) {
467 sn_check_intr(i, sn_irq_info);
479 panic(
"SN PCI INIT: Failed to allocate memory for PCI init\n");
481 for (i = 0; i <
NR_IRQS; i++) {
484 panic(
"SN PCI INIT: Failed IRQ memory allocation\n");
486 INIT_LIST_HEAD(sn_irq_lh[i]);