8 #include <linux/sched.h>
9 #include <linux/linkage.h>
10 #include <linux/ptrace.h>
11 #include <linux/errno.h>
13 #include <linux/signal.h>
16 #include <linux/slab.h>
17 #include <linux/random.h>
26 #include <asm/ptrace.h>
27 #include <asm/processor.h>
31 #include <asm/iommu.h>
33 #include <asm/oplib.h>
35 #include <asm/timer.h>
38 #include <asm/uaccess.h>
39 #include <asm/cache.h>
43 #include <asm/hypervisor.h>
44 #include <asm/cacheflush.h>
50 #define NUM_IVECS (IMAP_INR + 1)
59 static unsigned long bucket_get_chain_pa(
unsigned long bucket_pa)
63 __asm__ __volatile__(
"ldxa [%1] %2, %0"
73 static void bucket_clear_chain_pa(
unsigned long bucket_pa)
75 __asm__ __volatile__(
"stxa %%g0, [%0] %1"
83 static unsigned int bucket_get_irq(
unsigned long bucket_pa)
87 __asm__ __volatile__(
"lduwa [%1] %2, %0"
97 static void bucket_set_irq(
unsigned long bucket_pa,
unsigned int irq)
99 __asm__ __volatile__(
"stwa %0, [%1] %2"
108 #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
126 for (ent = 1; ent <
NR_IRQS; ent++) {
127 if (!irq_table[ent].
in_use)
130 if (ent >= NR_IRQS) {
135 irq_table[ent].dev_ino =
dev_ino;
136 irq_table[ent].in_use = 1;
139 spin_unlock_irqrestore(&irq_alloc_lock, flags);
144 #ifdef CONFIG_PCI_MSI
145 void irq_free(
unsigned int irq)
154 irq_table[irq].in_use = 0;
156 spin_unlock_irqrestore(&irq_alloc_lock, flags);
174 static unsigned int sun4u_compute_tid(
unsigned long imap,
unsigned long cpuid)
186 __asm__ (
"rdpr %%ver, %0" :
"=r" (ver));
192 unsigned int a = cpuid & 0x1f;
193 unsigned int n = (cpuid >> 5) & 0x1f;
224 cpumask_copy(&mask, affinity);
230 cpumask_and(&tmp, cpu_online_mask, &mask);
231 cpuid = cpumask_empty(&tmp) ?
map_to_cpu(irq) : cpumask_first(&tmp);
237 #define irq_choose_cpu(irq, affinity) \
238 real_hard_smp_processor_id()
245 if (
likely(handler_data)) {
250 imap = handler_data->
imap;
252 tid = sun4u_compute_tid(imap, cpuid);
254 val = upa_readq(imap);
258 upa_writeq(val, imap);
268 if (
likely(handler_data)) {
273 imap = handler_data->
imap;
275 tid = sun4u_compute_tid(imap, cpuid);
277 val = upa_readq(imap);
281 upa_writeq(val, imap);
305 static void sun4u_irq_disable(
struct irq_data *data)
309 static void sun4u_irq_eoi(
struct irq_data *data)
317 static void sun4v_irq_enable(
struct irq_data *data)
319 unsigned int ino = irq_table[data->
irq].dev_ino;
326 "err(%d)\n", ino, cpuid, err);
330 "err(%d)\n", ino, err);
337 static int sun4v_set_affinity(
struct irq_data *data,
338 const struct cpumask *mask,
bool force)
340 unsigned int ino = irq_table[data->
irq].dev_ino;
347 "err(%d)\n", ino, cpuid, err);
352 static void sun4v_irq_disable(
struct irq_data *data)
354 unsigned int ino = irq_table[data->
irq].dev_ino;
360 "err(%d)\n", ino, err);
363 static void sun4v_irq_eoi(
struct irq_data *data)
365 unsigned int ino = irq_table[data->
irq].dev_ino;
371 "err(%d)\n", ino, err);
374 static void sun4v_virq_enable(
struct irq_data *data)
381 dev_handle = irq_table[data->
irq].dev_handle;
382 dev_ino = irq_table[data->
irq].dev_ino;
388 dev_handle, dev_ino, cpuid, err);
393 "HV_INTR_STATE_IDLE): err(%d)\n",
394 dev_handle, dev_ino, err);
399 "HV_INTR_ENABLED): err(%d)\n",
400 dev_handle, dev_ino, err);
403 static int sun4v_virt_set_affinity(
struct irq_data *data,
404 const struct cpumask *mask,
bool force)
411 dev_handle = irq_table[data->
irq].dev_handle;
412 dev_ino = irq_table[data->
irq].dev_ino;
418 dev_handle, dev_ino, cpuid, err);
423 static void sun4v_virq_disable(
struct irq_data *data)
428 dev_handle = irq_table[data->
irq].dev_handle;
429 dev_ino = irq_table[data->
irq].dev_ino;
435 "HV_INTR_DISABLED): err(%d)\n",
436 dev_handle, dev_ino, err);
439 static void sun4v_virq_eoi(
struct irq_data *data)
444 dev_handle = irq_table[data->
irq].dev_handle;
445 dev_ino = irq_table[data->
irq].dev_ino;
451 "HV_INTR_STATE_IDLE): err(%d)\n",
452 dev_handle, dev_ino, err);
455 static struct irq_chip sun4u_irq = {
457 .irq_enable = sun4u_irq_enable,
458 .irq_disable = sun4u_irq_disable,
459 .irq_eoi = sun4u_irq_eoi,
460 .irq_set_affinity = sun4u_set_affinity,
464 static struct irq_chip sun4v_irq = {
466 .irq_enable = sun4v_irq_enable,
467 .irq_disable = sun4v_irq_disable,
468 .irq_eoi = sun4v_irq_eoi,
469 .irq_set_affinity = sun4v_set_affinity,
473 static struct irq_chip sun4v_virq = {
475 .irq_enable = sun4v_virq_enable,
476 .irq_disable = sun4v_virq_disable,
477 .irq_eoi = sun4v_virq_eoi,
478 .irq_set_affinity = sun4v_virt_set_affinity,
482 static void pre_flow_handler(
struct irq_data *
d)
485 unsigned int ino = irq_table[d->
irq].dev_ino;
491 void (*
func)(
unsigned int,
void *,
void *),
500 __irq_set_preflow_handler(irq, pre_flow_handler);
503 unsigned int build_irq(
int inofixup,
unsigned long iclr,
unsigned long imap)
513 bucket = &ivector_table[
ino];
514 irq = bucket_get_irq(
__pa(bucket));
517 bucket_set_irq(
__pa(bucket), irq);
522 handler_data = irq_get_handler_data(irq);
528 prom_printf(
"IRQ: kzalloc(irq_handler_data) failed.\n");
540 static unsigned int sun4v_build_common(
unsigned long sysino,
549 bucket = &ivector_table[sysino];
550 irq = bucket_get_irq(
__pa(bucket));
553 bucket_set_irq(
__pa(bucket), irq);
558 handler_data = irq_get_handler_data(irq);
564 prom_printf(
"IRQ: kzalloc(irq_handler_data) failed.\n");
573 handler_data->
imap = ~0
UL;
574 handler_data->
iclr = ~0
UL;
584 return sun4v_build_common(sysino, &sun4v_irq);
590 unsigned long hv_err,
cookie;
606 ((
unsigned long) bucket +
610 bucket_set_irq(
__pa(bucket), irq);
630 handler_data->
imap = ~0
UL;
631 handler_data->
iclr = ~0
UL;
633 cookie = ~
__pa(bucket);
636 prom_printf(
"IRQ: Fatal, cannot set cookie for [%x:%x] "
637 "err=%lu\n", devhandle, devino, hv_err);
646 unsigned int ino = irq_table[irq].dev_ino;
660 unsigned long pstate, bucket_pa;
664 clear_softint(1 << pil);
666 old_regs = set_irq_regs(regs);
670 __asm__ __volatile__(
"rdpr %%pstate, %0\n\t"
671 "wrpr %0, %3, %%pstate\n\t"
674 "wrpr %0, 0x0, %%pstate\n\t"
675 :
"=&r" (pstate),
"=&r" (bucket_pa)
680 orig_sp = set_hardirq_stack();
683 unsigned long next_pa;
686 next_pa = bucket_get_chain_pa(bucket_pa);
687 irq = bucket_get_irq(bucket_pa);
688 bucket_clear_chain_pa(bucket_pa);
695 restore_hardirq_stack(orig_sp);
698 set_irq_regs(old_regs);
715 __asm__ __volatile__(
"mov %%sp, %0\n\t"
720 __asm__ __volatile__(
"mov %0, %%sp"
727 #ifdef CONFIG_HOTPLUG_CPU
732 for (irq = 0; irq <
NR_IRQS; irq++) {
734 struct irq_data *data = irq_desc_get_irq_data(desc);
738 if (desc->
action && !irqd_is_per_cpu(data)) {
739 if (data->
chip->irq_set_affinity)
740 data->
chip->irq_set_affinity(data,
759 static u64 prom_limit0, prom_limit1;
761 static void map_prom_timers(
void)
764 const unsigned int *
addr;
786 prom_printf(
"PROM does not have timer mapped, trying to continue.\n");
790 prom_timers = (
struct sun5_timer *) ((
unsigned long)addr[0]);
793 static void kill_prom_timer(
void)
799 prom_limit0 = prom_timers->
limit0;
800 prom_limit1 = prom_timers->
limit1;
811 " ldxa [%%g0] %0, %%g1\n"
812 " ldxa [%%g2] %1, %%g1\n"
813 " stxa %%g0, [%%g0] %0\n"
845 prom_printf(
"SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
846 "err %lu\n", type, paddr, num_entries, status);
869 static void __init alloc_one_queue(
unsigned long *pa_ptr,
unsigned long qmask)
877 prom_printf(
"SUN4V: Error, cannot allocate queue.\n");
893 prom_printf(
"SUN4V: Error, cannot allocate cpu mondo page.\n");
903 static void __init sun4v_init_mondo_queues(
void)
920 static void __init init_send_mondo_info(
void)
927 init_cpu_send_mondo_info(tb);
931 static struct irqaction timer_irq_action = {
945 if (!ivector_table) {
946 prom_printf(
"Fatal error, cannot allocate ivector_table\n");
950 ((
unsigned long) ivector_table) + size);
955 sun4v_init_mondo_queues();
957 init_send_mondo_info();
968 clear_softint(get_softint());
975 __asm__ __volatile__(
"rdpr %%pstate, %%g1\n\t"
976 "or %%g1, %0, %%g1\n\t"
977 "wrpr %%g1, 0x0, %%pstate"