22 #include <linux/kernel.h>
24 #include <linux/sched.h>
28 #include <asm/tlbflush.h>
29 #include <asm/bitops.h>
30 #include <asm/processor.h>
32 #include <asm/exceptions.h>
33 #include <asm/hardirq.h>
35 #include <asm/mmu_context.h>
36 #include <asm/thread_info.h>
41 #ifdef CONFIG_HOTPLUG_CPU
42 #include <asm/cacheflush.h>
46 static void run_sleep_cpu(
unsigned int cpu);
47 static void run_wakeup_cpu(
unsigned int cpu);
56 #define Dprintk(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
58 #define Dprintk(fmt, ...) no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
62 #define CALL_FUNCTION_NMI_IPI_TIMEOUT 0
73 char size_alignment[0]
100 static int do_boot_cpu(
int);
101 static void smp_show_cpu_info(
int cpu_id);
103 static void smp_online(
void);
104 static void smp_store_cpu_info(
int);
105 static void smp_cpu_init(
void);
106 static void smp_tune_scheduling(
void);
108 static void init_ipi(
void);
113 static void mn10300_ipi_disable(
unsigned int irq);
114 static void mn10300_ipi_enable(
unsigned int irq);
115 static void mn10300_ipi_chip_disable(
struct irq_data *
d);
116 static void mn10300_ipi_chip_enable(
struct irq_data *
d);
117 static void mn10300_ipi_ack(
struct irq_data *
d);
118 static void mn10300_ipi_nop(
struct irq_data *
d);
120 static struct irq_chip mn10300_ipi_type = {
122 .irq_disable = mn10300_ipi_chip_disable,
123 .irq_enable = mn10300_ipi_chip_enable,
124 .irq_ack = mn10300_ipi_ack,
125 .irq_eoi = mn10300_ipi_nop
131 static struct irqaction reschedule_ipi = {
133 .name =
"smp reschedule IPI"
135 static struct irqaction call_function_ipi = {
137 .name =
"smp call function IPI"
140 #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
142 static struct irqaction local_timer_ipi = {
145 .name =
"smp local timer IPI"
152 static void init_ipi(
void)
158 irq_set_chip_and_handler(RESCHEDULE_IPI, &mn10300_ipi_type,
160 setup_irq(RESCHEDULE_IPI, &reschedule_ipi);
162 mn10300_ipi_enable(RESCHEDULE_IPI);
165 irq_set_chip_and_handler(CALL_FUNC_SINGLE_IPI, &mn10300_ipi_type,
167 setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi);
169 mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
172 #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
173 defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
174 irq_set_chip_and_handler(LOCAL_TIMER_IPI, &mn10300_ipi_type,
176 setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi);
178 mn10300_ipi_enable(LOCAL_TIMER_IPI);
181 #ifdef CONFIG_MN10300_CACHE_ENABLED
183 flags = arch_local_cli_save();
185 mn10300_low_ipi_handler);
186 GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
187 mn10300_ipi_enable(FLUSH_CACHE_IPI);
192 flags = arch_local_cli_save();
193 GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
194 tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
198 flags = arch_local_cli_save();
200 mn10300_low_ipi_handler);
208 static void mn10300_ipi_shutdown(
unsigned int irq)
213 flags = arch_local_cli_save();
216 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
226 static void mn10300_ipi_enable(
unsigned int irq)
231 flags = arch_local_cli_save();
234 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
240 static void mn10300_ipi_chip_enable(
struct irq_data *
d)
242 mn10300_ipi_enable(d->
irq);
249 static void mn10300_ipi_disable(
unsigned int irq)
254 flags = arch_local_cli_save();
257 GxICR(irq) = tmp & GxICR_LEVEL;
263 static void mn10300_ipi_chip_disable(
struct irq_data *
d)
265 mn10300_ipi_disable(d->
irq);
276 static void mn10300_ipi_ack(
struct irq_data *
d)
278 unsigned int irq = d->
irq;
282 flags = arch_local_cli_save();
283 GxICR_u8(irq) = GxICR_DETECT;
292 static void mn10300_ipi_nop(
struct irq_data *
d)
310 for (i = 0; i <
NR_CPUS; i++) {
313 tmp = CROSS_GxICR(irq, i);
314 CROSS_GxICR(irq, i) =
315 tmp | GxICR_REQUEST | GxICR_DETECT;
316 tmp = CROSS_GxICR(irq, i);
346 send_IPI_mask(&cpumask, irq);
357 send_IPI_mask(
cpumask_of(cpu), CALL_FUNC_SINGLE_IPI);
366 send_IPI_mask(
cpumask_of(cpu), RESCHEDULE_IPI);
402 nmi_call_data = &
data;
430 while (!cpumask_empty(&data.
started))
433 while (!cpumask_empty(&data.
finished))
437 spin_unlock_irqrestore(&smp_nmi_call_lock, flags);
463 static volatile int stopflag;
466 #ifdef CONFIG_GDBSTUB
473 flags = arch_local_cli_save();
514 generic_smp_call_function_single_interrupt();
541 #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
542 defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
559 for (i = 0; i <
NR_CPUS; i++) {
571 static void __init smp_cpu_init(
void)
593 GxICR(CALL_FUNC_SINGLE_IPI) = CALL_FUNCTION_GxICR_LV | GxICR_DETECT;
594 mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
596 GxICR(LOCAL_TIMER_IPI) = LOCAL_TIMER_GxICR_LV | GxICR_DETECT;
597 mn10300_ipi_enable(LOCAL_TIMER_IPI);
599 GxICR(RESCHEDULE_IPI) = RESCHEDULE_GxICR_LV | GxICR_DETECT;
600 mn10300_ipi_enable(RESCHEDULE_IPI);
602 #ifdef CONFIG_MN10300_CACHE_ENABLED
603 GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
604 mn10300_ipi_enable(FLUSH_CACHE_IPI);
607 mn10300_ipi_shutdown(SMP_BOOT_IRQ);
610 flags = arch_local_cli_save();
611 GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
612 tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
635 for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
636 GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
638 #ifdef CONFIG_KERNEL_DEBUGGER
644 flags = arch_local_cli_save();
645 GxICR(DEBUGGER_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
646 tmp16 = GxICR(DEBUGGER_NMI_IPI);
667 #ifdef CONFIG_GENERIC_CLOCKEVENTS
685 smp_store_cpu_info(0);
686 smp_tune_scheduling();
697 for (phy_id = 0; phy_id <
NR_CPUS; phy_id++) {
699 if (max_cpus <= cpucount + 1)
704 smp_show_cpu_info(phy_id);
717 static void __init smp_store_cpu_info(
int cpu)
731 static void __init smp_tune_scheduling(
void)
745 unsigned long send_status, callin_status;
748 send_status = GxICR_REQUEST;
758 panic(
"Failed fork for CPU#%d.", cpu_id);
763 start_stack[cpu_id - 1] = idle->
thread.sp;
768 send_IPI_mask(
cpumask_of(phy_id), SMP_BOOT_IRQ);
770 Dprintk(
"Waiting for send to finish...\n");
776 CROSS_GxICR(SMP_BOOT_IRQ, phy_id) & GxICR_REQUEST;
777 }
while (send_status == GxICR_REQUEST && timeout++ < 100);
779 Dprintk(
"Waiting for cpu_callin_map.\n");
781 if (send_status == 0) {
783 cpumask_set_cpu(cpu_id, &cpu_callout_map);
791 }
while (callin_status == 0 && timeout++ < 5000);
793 if (callin_status == 0)
799 if (send_status == GxICR_REQUEST || callin_status == 0) {
800 cpumask_clear_cpu(cpu_id, &cpu_callout_map);
801 cpumask_clear_cpu(cpu_id, &cpu_callin_map);
802 cpumask_clear_cpu(cpu_id, &cpu_initialized);
813 static void __init smp_show_cpu_info(
int cpu)
818 "CPU#%d : ioclk speed: %lu.%02luMHz : bogomips : %lu.%02lu\n",
831 unsigned long timeout;
841 Dprintk(
"CPU#%d waiting for CALLOUT\n", cpu);
852 "BUG: CPU#%d started up but did not get a callout!\n",
857 #ifdef CONFIG_CALIBRATE_DELAY
862 smp_store_cpu_info(cpu);
865 cpumask_set_cpu(cpu, &cpu_callin_map);
871 static void __init smp_online(
void)
877 notify_cpu_starting(cpu);
902 cpumask_set_cpu(0, &cpu_callout_map);
903 cpumask_set_cpu(0, &cpu_callin_map);
929 #ifdef CONFIG_HOTPLUG_CPU
936 cpumask_set_cpu(cpu, &smp_commenced_mask);
939 for (timeout = 0 ; timeout < 5000 ; timeout++) {
964 #ifdef CONFIG_HOTPLUG_CPU
968 static int __init topology_init(
void)
976 "topology_init: register_cpu %d failed (%d)\n",
991 cpumask_clear_cpu(cpu, &mm_cpumask(
current->active_mm));
1003 #ifdef CONFIG_MN10300_CACHE_ENABLED
1004 static inline void hotplug_cpu_disable_cache(
void)
1011 "1: movhu (%1),%0 \n"
1016 "i"(~(CHCTR_ICEN | CHCTR_DCEN)),
1017 "i"(CHCTR_ICBUSY | CHCTR_DCBUSY)
1021 static inline void hotplug_cpu_enable_cache(
void)
1030 "i"(CHCTR_ICEN | CHCTR_DCEN)
1034 static inline void hotplug_cpu_invalidate_cache(
void)
1043 "i"(CHCTR_ICINV | CHCTR_DCINV)
1048 #define hotplug_cpu_disable_cache() do {} while (0)
1049 #define hotplug_cpu_enable_cache() do {} while (0)
1050 #define hotplug_cpu_invalidate_cache() do {} while (0)
1064 static int hotplug_cpu_nmi_call_function(
cpumask_t cpumask,
1076 start = (
unsigned long)&nmi_call_func_mask_data;
1079 nmi_call_func_mask_data.func =
func;
1080 nmi_call_func_mask_data.info =
info;
1081 nmi_call_func_mask_data.started =
cpumask;
1082 nmi_call_func_mask_data.wait =
wait;
1084 nmi_call_func_mask_data.finished =
cpumask;
1086 spin_lock(&smp_nmi_call_lock);
1087 nmi_call_data = &nmi_call_func_mask_data;
1091 send_IPI_mask(cpumask, CALL_FUNCTION_NMI_IPI);
1096 }
while (!cpumask_empty(&nmi_call_func_mask_data.started));
1102 }
while (!cpumask_empty(&nmi_call_func_mask_data.finished));
1105 spin_unlock(&smp_nmi_call_lock);
1109 static void restart_wakeup_cpu(
void)
1113 cpumask_set_cpu(cpu, &cpu_callin_map);
1119 static void prepare_sleep_cpu(
void *
unused)
1124 hotplug_cpu_disable_cache();
1125 hotplug_cpu_invalidate_cache();
1129 static void sleep_cpu(
void *
unused)
1140 restart_wakeup_cpu();
1143 static void run_sleep_cpu(
unsigned int cpu)
1145 unsigned long flags;
1149 flags = arch_local_cli_save();
1150 hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu,
NULL, 1);
1151 hotplug_cpu_nmi_call_function(cpumask, sleep_cpu,
NULL, 0);
1156 static void wakeup_cpu(
void)
1158 hotplug_cpu_invalidate_cache();
1159 hotplug_cpu_enable_cache();
1164 static void run_wakeup_cpu(
unsigned int cpu)
1166 unsigned long flags;
1168 flags = arch_local_cli_save();
1176 #error not support NR_CPUS > 2, when CONFIG_HOTPLUG_CPU=y.
1178 hotplug_cpu_nmi_call_function(
cpumask_of(cpu), wakeup_cpu,
NULL, 1);