18 #define KMSG_COMPONENT "cpu"
19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 #include <linux/module.h>
32 #include <linux/slab.h>
34 #include <asm/asm-offsets.h>
35 #include <asm/switch_to.h>
38 #include <asm/setup.h>
40 #include <asm/tlbflush.h>
45 #include <asm/debug.h>
73 static u8 boot_cpu_type;
74 static u16 boot_cpu_address;
88 register unsigned int reg1 asm (
"1") = parm;
95 :
"=d" (
cc),
"+d" (
reg1) :
"d" (
addr),
"a" (order) :
"cc");
96 if (status && cc == 1)
101 static inline int __pcpu_sigp_relax(
u16 addr,
u8 order,
u32 parm,
u32 *status)
106 cc = __pcpu_sigp(addr, order, parm,
NULL);
113 static int pcpu_sigp_retry(
struct pcpu *
pcpu,
u8 order,
u32 parm)
117 for (retry = 0; ; retry++) {
118 cc = __pcpu_sigp(pcpu->
address, order, parm,
NULL);
127 static inline int pcpu_stopped(
struct pcpu *pcpu)
137 static inline int pcpu_running(
struct pcpu *pcpu)
154 if (pcpu_devices[cpu].address == address)
155 return pcpu_devices + cpu;
159 static
void pcpu_ec_call(
struct pcpu *pcpu,
int ec_bit)
163 set_bit(ec_bit, &pcpu->ec_mask);
164 order = pcpu_running(pcpu) ?
166 pcpu_sigp_retry(pcpu, order, 0);
169 static int __cpuinit pcpu_alloc_lowcore(
struct pcpu *pcpu,
int cpu)
173 if (pcpu != &pcpu_devices[0]) {
183 memset((
char *) lc + 512, 0,
sizeof(*lc) - 512);
190 if (!lc->extended_save_area_addr)
194 if (vdso_alloc_per_cpu(lc))
201 if (pcpu != &pcpu_devices[0]) {
209 #ifdef CONFIG_HOTPLUG_CPU
211 static void pcpu_free_lowcore(
struct pcpu *pcpu)
219 free_page((
unsigned long) lc->extended_save_area_addr);
220 lc->extended_save_area_addr = 0;
223 vdso_free_per_cpu(pcpu->
lowcore);
225 if (pcpu != &pcpu_devices[0]) {
234 static void pcpu_prepare_secondary(
struct pcpu *pcpu,
int cpu)
251 static void pcpu_attach_task(
struct pcpu *pcpu,
struct task_struct *tsk)
264 static void pcpu_start_fn(
struct pcpu *pcpu,
void (*
func)(
void *),
void *
data)
278 static void pcpu_delegate(
struct pcpu *pcpu,
void (*
func)(
void *),
282 unsigned long source_cpu = stap();
285 if (pcpu->
address == source_cpu)
295 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
296 " brc 2,0b # busy, try again\n"
297 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
298 " brc 2,1b # busy, try again\n"
299 : :
"d" (pcpu->
address),
"d" (source_cpu),
325 pcpu_delegate(&pcpu_devices[0],
func, data,
334 if (pcpu_devices[cpu].address == address)
341 return pcpu_running(pcpu_devices + cpu);
347 asm volatile(
"diag 0,0,0x44");
353 asm volatile(
"diag %0,0,0x9c"
354 : :
"d" (pcpu_devices[
cpu].address));
356 asm volatile(
"diag 0,0,0x44");
370 struct pcpu *pcpu = pcpu_devices +
cpu;
379 if (pcpu_stopped(pcpu_devices + cpu))
380 cpumask_clear_cpu(cpu, cpumask);
381 if (cpumask_empty(cpumask))
408 struct pcpu *pcpu = pcpu_devices +
cpu;
410 while (!pcpu_stopped(pcpu))
429 unsigned int param32,
unsigned long param64)
435 if (ext_code.
code == 0x1202)
451 generic_smp_call_function_interrupt();
454 generic_smp_call_function_single_interrupt();
475 static void smp_ptlb_callback(
void *
info)
509 static void smp_ctl_bit_callback(
void *
info)
512 unsigned long cregs[16];
541 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
546 static void __init smp_get_save_area(
int cpu,
u16 address)
548 void *lc = pcpu_devices[0].lowcore;
551 if (is_kdump_kernel())
553 if (!
OLDMEM_BASE && (address == boot_cpu_address ||
557 pr_warning(
"CPU %i exceeds the maximum %i and is excluded "
558 "from the dump\n", cpu,
NR_CPUS - 1);
563 panic(
"could not allocate memory for save area\n");
564 zfcpdump_save_areas[
cpu] = save_area;
565 #ifdef CONFIG_CRASH_DUMP
566 if (address == boot_cpu_address) {
575 memcpy_real(save_area, lc + SAVE_AREA_BASE,
sizeof(*save_area));
578 int smp_store_status(
int cpu)
582 pcpu = pcpu_devices +
cpu;
591 static inline void smp_get_save_area(
int cpu,
u16 address) { }
597 pcpu_devices[
cpu].polarization =
val;
602 return pcpu_devices[
cpu].polarization;
607 static int use_sigp_detection;
613 use_sigp_detection = 1;
626 static int __devinit smp_add_present_cpu(
int cpu);
636 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
637 cpu = cpumask_first(&avail);
638 for (i = 0; (i < info->
combined) && (cpu < nr_cpu_ids); i++) {
641 if (pcpu_find_address(cpu_present_mask, info->
cpu[i].address))
643 pcpu = pcpu_devices +
cpu;
649 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
653 cpu = cpumask_next(cpu, &avail);
658 static void __init smp_detect_cpus(
void)
660 unsigned int cpu, c_cpus, s_cpus;
663 info = smp_get_cpu_info();
665 panic(
"smp_detect_cpus failed to allocate memory\n");
667 for (cpu = 0; cpu < info->
combined; cpu++) {
668 if (info->
cpu[cpu].address != boot_cpu_address)
671 boot_cpu_type = info->
cpu[
cpu].type;
676 for (cpu = 0; cpu < info->
combined; cpu++) {
680 smp_get_save_area(c_cpus, info->
cpu[cpu].address);
685 pr_info(
"%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
687 __smp_rescan_cpus(info, 0);
695 static void __cpuinit smp_start_secondary(
void *cpuvoid)
702 restore_access_regs(
S390_lowcore.access_regs_save_area);
723 pcpu = pcpu_devices +
cpu;
730 rc = pcpu_alloc_lowcore(pcpu, cpu);
733 pcpu_prepare_secondary(pcpu, cpu);
734 pcpu_attach_task(pcpu, tidle);
735 pcpu_start_fn(pcpu, smp_start_secondary,
NULL);
741 static int __init setup_possible_cpus(
char *
s)
748 for (cpu = 1; cpu < max && cpu <
nr_cpu_ids; cpu++)
754 #ifdef CONFIG_HOTPLUG_CPU
758 unsigned long cregs[16];
765 cregs[0] &= ~0x0000ee70
UL;
766 cregs[6] &= ~0xff000000
UL;
767 cregs[14] &= ~0x1f000000
UL;
777 pcpu = pcpu_devices +
cpu;
778 while (!pcpu_stopped(pcpu))
780 pcpu_free_lowcore(pcpu);
797 panic(
"Couldn't request external interrupt 0x1201");
800 panic(
"Couldn't request external interrupt 0x1202");
806 struct pcpu *pcpu = pcpu_devices;
808 boot_cpu_address = stap();
810 pcpu->
address = boot_cpu_address;
840 #ifdef CONFIG_HOTPLUG_CPU
847 count =
sprintf(buf,
"%d\n", pcpu_devices[dev->
id].state);
860 if (
sscanf(buf,
"%d %c", &val, &delim) != 1)
862 if (val != 0 && val != 1)
871 pcpu = pcpu_devices +
cpu;
900 return rc ? rc :
count;
902 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
908 return sprintf(buf,
"%d\n", pcpu_devices[dev->
id].address);
912 static struct attribute *cpu_common_attrs[] = {
913 #ifdef CONFIG_HOTPLUG_CPU
914 &dev_attr_configure.attr,
916 &dev_attr_address.attr,
921 .attrs = cpu_common_attrs,
936 }
while ((sequence & 1) || (idle->
sequence != sequence));
937 return sprintf(buf,
"%llu\n", idle_count);
945 unsigned long long now,
idle_time, idle_enter, idle_exit;
954 }
while ((sequence & 1) || (idle->
sequence != sequence));
955 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
956 return sprintf(buf,
"%llu\n", idle_time >> 12);
960 static struct attribute *cpu_online_attrs[] = {
961 &dev_attr_idle_count.attr,
962 &dev_attr_idle_time_us.attr,
967 .attrs = cpu_online_attrs,
971 unsigned long action,
void *hcpu)
973 unsigned int cpu = (
unsigned int)(
long)hcpu;
974 struct cpu *
c = &pcpu_devices[
cpu].cpu;
986 return notifier_from_errno(err);
989 static int __devinit smp_add_present_cpu(
int cpu)
991 struct cpu *c = &pcpu_devices[
cpu].cpu;
1018 #ifdef CONFIG_HOTPLUG_CPU
1025 #ifdef CONFIG_HOTPLUG_CPU
1027 int __ref smp_rescan_cpus(
void)
1032 info = smp_get_cpu_info();
1037 nr = __smp_rescan_cpus(info, 1);
1053 rc = smp_rescan_cpus();
1054 return rc ? rc :
count;
1059 static int __init s390_smp_init(
void)
1064 #ifdef CONFIG_HOTPLUG_CPU
1070 rc = smp_add_present_cpu(cpu);