42 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46 #include <linux/module.h>
47 #include <linux/sched.h>
65 #include <asm/pgtable.h>
66 #include <asm/tlbflush.h>
73 #include <asm/setup.h>
74 #include <asm/uv/uv.h>
78 #include <asm/i8259.h>
85 #ifdef CONFIG_HOTPLUG_CPU
92 void cpu_hotplug_driver_lock(
void)
97 void cpu_hotplug_driver_unlock(
void)
150 phys_id = read_apic_id();
153 panic(
"%s: phys CPU#%d, CPU#%d already present??\n", __func__,
156 pr_debug(
"CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
180 panic(
"%s: CPU%d started up but did not get a callout!\n",
191 pr_debug(
"CALLIN, before setup_local_APIC()\n");
216 pr_debug(
"Stack at about %p\n", &cpuid);
225 notify_cpu_starting(cpuid);
276 boot_init_stack_canary();
305 "sched: CPU #%d's %s-sibling CPU #%d is not on the same node! "
306 "[node: %d != %d]. Ignoring dependency.\n",
310 #define link_mask(_m, c1, c2) \
312 cpumask_set_cpu((c1), cpu_##_m##_mask(c2)); \
313 cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \
324 return topology_sane(c, o,
"smt");
328 return topology_sane(c, o,
"smt");
340 return topology_sane(c, o,
"llc");
351 return topology_sane(c, o,
"mc");
366 if (!has_smt && !has_mc) {
367 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
368 cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
369 cpumask_set_cpu(cpu, cpu_core_mask(cpu));
377 if ((i == cpu) || (has_smt && match_smt(c, o)))
380 if ((i == cpu) || (has_mc && match_llc(c, o)))
392 if ((i == cpu) || (has_mc && match_mc(c, o))) {
398 if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) {
403 if (cpumask_first(cpu_sibling_mask(i)) == i)
420 return cpu_llc_shared_mask(cpu);
423 static void impress_friends(
void)
426 unsigned long bogosum = 0;
434 pr_info("Total of %
d processors activated (%lu.%02lu BogoMIPS)\
n",
437 (bogosum/(5000/HZ))%100);
439 pr_debug("Before bogocount - setting activated=1\n");
445 const char *
const names[] = {
"ID",
"VERSION",
"SPIV" };
449 pr_info(
"Inquiring remote APIC 0x%x...\n", apicid);
452 pr_info(
"... APIC 0x%x %s: ", apicid, names[i]);
457 status = safe_apic_wait_icr_idle();
459 pr_cont(
"a previous APIC delivery may have failed\n");
488 unsigned long send_status, accept_status = 0;
496 pr_debug(
"Waiting for send to finish...\n");
497 send_status = safe_apic_wait_icr_idle();
507 accept_status = (apic_read(
APIC_ESR) & 0xEF);
512 pr_err(
"APIC never delivered???\n");
514 pr_err(
"APIC delivery error (%lx)\n", accept_status);
516 return (send_status | accept_status);
520 wakeup_secondary_cpu_via_init(
int phys_apicid,
unsigned long start_eip)
522 unsigned long send_status, accept_status = 0;
523 int maxlvt, num_starts,
j;
547 pr_debug(
"Waiting for send to finish...\n");
548 send_status = safe_apic_wait_icr_idle();
558 pr_debug(
"Waiting for send to finish...\n");
559 send_status = safe_apic_wait_icr_idle();
585 pr_debug(
"#startup loops: %d\n", num_starts);
587 for (j = 1; j <= num_starts; j++) {
588 pr_debug(
"Sending STARTUP #%d\n", j);
611 pr_debug(
"Waiting for send to finish...\n");
612 send_status = safe_apic_wait_icr_idle();
620 accept_status = (apic_read(
APIC_ESR) & 0xEF);
621 if (send_status || accept_status)
627 pr_err(
"APIC never delivered???\n");
629 pr_err(
"APIC delivery error (%lx)\n", accept_status);
631 return (send_status | accept_status);
637 static int current_node = -1;
638 int node = early_cpu_to_node(cpu);
641 if (node != current_node) {
642 if (current_node > (-1))
645 pr_info(
"Booting Node %3d, Processors ", node);
647 pr_cont(
" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ?
" OK\n" :
"");
650 pr_info(
"Booting Node %d Processor %d APIC 0x%x\n",
662 volatile u32 *trampoline_status =
667 unsigned long boot_error = 0;
671 alternatives_enable_smp();
681 clear_tsk_thread_flag(idle,
TIF_FORK);
692 announce_cpu(cpu, apicid);
703 pr_debug(
"Setting warm reset code and vector.\n");
705 smpboot_setup_warm_reset_vector(start_ip);
722 boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
728 pr_debug(
"Before Callout %d\n", cpu);
730 pr_debug(
"After Callout %d\n", cpu);
735 for (timeout = 0; timeout < 50000; timeout++) {
750 pr_debug(
"CPU%d: has booted.\n", cpu);
753 if (*trampoline_status == 0xA5A5A5A5)
755 pr_err(
"CPU%d: Stuck ??\n", cpu);
758 pr_err(
"CPU%d: Not responding\n", cpu);
779 *trampoline_status = 0;
785 smpboot_restore_warm_reset_vector();
798 pr_debug(
"++++++++++++++++++++=_---CPU UP %u\n", cpu);
803 pr_err(
"%s: bad cpu %d\n", __func__, cpu);
811 pr_debug(
"do_boot_cpu %d Already started\n", cpu);
824 __cpu_disable_lazy_restore(cpu);
826 err = do_boot_cpu(apicid, cpu, tidle);
828 pr_debug(
"do_boot_cpu failed %d\n", err);
861 static __init void disable_smp(
void)
865 smpboot_clear_io_apic_irqs();
871 cpumask_set_cpu(0, cpu_sibling_mask(0));
872 cpumask_set_cpu(0, cpu_core_mask(0));
878 static int __init smp_sanity_check(
unsigned max_cpus)
882 #if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
883 if (def_to_bigsmp && nr_cpu_ids > 8) {
887 pr_warn(
"More than 8 CPUs detected - skipping them\n"
888 "Use CONFIG_X86_BIGSMP\n");
909 pr_warn(
"weird, boot CPU (#%d) not listed by the BIOS\n",
921 pr_notice(
"SMP motherboard not detected\n");
924 pr_notice(
"Local APIC not detected. Using dummy APIC emulation.\n");
933 pr_notice(
"weird, boot CPU (#%d) not listed by the BIOS\n",
945 pr_err(
"BIOS bug, local APIC #%d not detected!...\n",
946 boot_cpu_physical_apicid);
947 pr_err(
"... forcing use of dummy APIC emulation (tell your hw vendor)\n");
949 smpboot_clear_io_apic();
960 pr_info(
"SMP mode deactivated\n");
961 smpboot_clear_io_apic();
972 static void __init smp_cpu_index_default(
void)
993 smp_cpu_index_default();
1011 if (smp_sanity_check(max_cpus) < 0) {
1020 if (read_apic_id() != boot_cpu_physical_apicid) {
1021 panic(
"Boot APIC ID in local APIC unexpected (%d vs %d)",
1022 read_apic_id(), boot_cpu_physical_apicid);
1045 smpboot_setup_io_apic();
1052 x86_init.timers.setup_percpu_clockev();
1090 #ifdef CONFIG_X86_IO_APIC
1096 static int __initdata setup_possible_cpus = -1;
1097 static int __init _setup_possible_cpus(
char *
str)
1102 early_param(
"possible_cpus", _setup_possible_cpus);
1131 if (setup_possible_cpus == -1) {
1133 #ifdef CONFIG_HOTPLUG_CPU
1135 possible += disabled_cpus;
1141 possible = setup_possible_cpus;
1147 pr_warn(
"%d Processors exceeds NR_CPUS limit of %d\n",
1152 #ifdef CONFIG_HOTPLUG_CPU
1156 pr_warn(
"%d Processors exceeds max_cpus limit of %u\n",
1161 pr_info(
"Allowing %d CPUs, %d hotplug CPUs\n",
1164 for (i = 0; i < possible; i++)
1172 #ifdef CONFIG_HOTPLUG_CPU
1174 static void remove_siblinginfo(
int cpu)
1180 cpumask_clear_cpu(cpu, cpu_core_mask(sibling));
1184 if (cpumask_weight(cpu_sibling_mask(cpu)) == 1)
1189 cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
1190 cpumask_clear(cpu_sibling_mask(cpu));
1191 cpumask_clear(cpu_core_mask(cpu));
1197 static
void __ref remove_cpu_from_maps(
int cpu)
1207 void cpu_disable_common(
void)
1211 remove_siblinginfo(cpu);
1215 remove_cpu_from_maps(cpu);
1237 cpu_disable_common();
1246 for (i = 0; i < 10; i++) {
1250 pr_info(
"CPU %u is now offline\n", cpu);
1255 pr_err(
"CPU %u didn't die...\n", cpu);
1258 void play_dead_common(
void)
1261 reset_lazy_tlbstate();
1278 static inline void mwait_play_dead(
void)
1280 unsigned int eax,
ebx,
ecx, edx;
1281 unsigned int highest_cstate = 0;
1282 unsigned int highest_subcstate = 0;
1296 native_cpuid(&eax, &ebx, &ecx, &edx);
1313 (highest_subcstate - 1);
1334 __monitor(mwait_ptr, 0, 0);
1340 static inline void hlt_play_dead(
void)