21 #include <linux/kernel.h>
24 #include <linux/sched.h>
29 #include <asm/machdep.h>
30 #include <asm/vdso_datapage.h>
37 static struct rtas_args rtas_stop_self_args = {
38 .token = RTAS_UNKNOWN_SERVICE,
41 .rets = &rtas_stop_self_args.args[0],
55 static int __init setup_cede_offline(
char *
str)
58 cede_offline_enabled = 0;
59 else if (!
strcmp(str,
"on"))
60 cede_offline_enabled = 1;
66 __setup(
"cede_offline=", setup_cede_offline);
70 return per_cpu(current_state, cpu);
80 return per_cpu(preferred_offline_state, cpu);
90 per_cpu(preferred_offline_state, cpu) = default_offline_state;
93 static void rtas_stop_self(
void)
95 struct rtas_args *
args = &rtas_stop_self_args;
99 BUG_ON(args->token == RTAS_UNKNOWN_SERVICE);
101 printk(
"cpu %u (hwid %u) Ready to die...\n",
103 enter_rtas(
__pa(args));
105 panic(
"Alas, I survived.\n");
108 static void pseries_mach_cpu_die(
void)
112 u8 cede_latency_hint = 0;
120 if (
ppc_md.suspend_disable_cpu)
121 ppc_md.suspend_disable_cpu();
123 cede_latency_hint = 2;
125 get_lppaca()->idle = 1;
126 if (!get_lppaca()->shared_proc)
127 get_lppaca()->donate_dedicated_cpu = 1;
130 extended_cede_processor(cede_latency_hint);
133 if (!get_lppaca()->shared_proc)
134 get_lppaca()->donate_dedicated_cpu = 0;
135 get_lppaca()->idle = 0;
138 unregister_slb_shadow(hwcpu);
145 start_secondary_resume();
153 unregister_slb_shadow(hwcpu);
161 static int pseries_cpu_disable(
void)
189 static void pseries_cpu_die(
unsigned int cpu)
193 unsigned int pcpu = get_hard_smp_processor_id(cpu);
197 for (tries = 0; tries < 5000; tries++) {
206 for (tries = 0; tries < 25; tries++) {
215 if (cpu_status != 0) {
216 printk(
"Querying DEAD? cpu %i (%i) shows %i\n",
217 cpu, pcpu, cpu_status);
235 static int pseries_add_processor(
struct device_node *np)
246 zalloc_cpumask_var(&candidate_mask,
GFP_KERNEL);
249 nthreads = len /
sizeof(
u32);
250 for (i = 0; i < nthreads; i++)
251 cpumask_set_cpu(i, tmp);
253 cpu_maps_update_begin();
255 BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));
258 cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);
259 if (cpumask_empty(candidate_mask)) {
264 " supports %d logical cpus.\n", np->
full_name,
265 cpumask_weight(cpu_possible_mask));
269 while (!cpumask_empty(tmp))
270 if (cpumask_subset(tmp, candidate_mask))
274 cpumask_shift_left(tmp, tmp, nthreads);
276 if (cpumask_empty(tmp)) {
278 " processor %s with %d thread(s)\n", np->
name,
286 set_hard_smp_processor_id(cpu, *intserv++);
290 cpu_maps_update_done();
291 free_cpumask_var(candidate_mask);
292 free_cpumask_var(tmp);
301 static void pseries_remove_processor(
struct device_node *np)
304 int len, nthreads,
i;
311 nthreads = len /
sizeof(
u32);
313 cpu_maps_update_begin();
314 for (i = 0; i < nthreads; i++) {
316 if (get_hard_smp_processor_id(cpu) != intserv[i])
320 set_hard_smp_processor_id(cpu, -1);
323 if (cpu >= nr_cpu_ids)
325 "with physical id 0x%x\n", intserv[i]);
327 cpu_maps_update_done();
336 case PSERIES_RECONFIG_ADD:
337 err = pseries_add_processor(node);
339 case PSERIES_RECONFIG_REMOVE:
340 pseries_remove_processor(node);
343 return notifier_from_errno(err);
347 .notifier_call = pseries_smp_notifier,
350 #define MAX_CEDE_LATENCY_LEVELS 4
351 #define CEDE_LATENCY_PARAM_LENGTH 10
352 #define CEDE_LATENCY_PARAM_MAX_LENGTH \
353 (MAX_CEDE_LATENCY_LEVELS * CEDE_LATENCY_PARAM_LENGTH * sizeof(char))
354 #define CEDE_LATENCY_TOKEN 45
358 static int parse_cede_parameters(
void)
364 __pa(cede_parameters),
368 static int __init pseries_cpu_hotplug_init(
void)
375 for_each_node_by_name(np,
"interrupt-controller") {
377 if (
strstr(typep,
"open-pic")) {
381 "systems using MPIC\n");
386 rtas_stop_self_args.token =
rtas_token(
"stop-self");
387 qcss_tok =
rtas_token(
"query-cpu-stopped-state");
389 if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE ||
390 qcss_tok == RTAS_UNKNOWN_SERVICE) {
396 ppc_md.cpu_die = pseries_mach_cpu_die;
401 if (firmware_has_feature(FW_FEATURE_LPAR)) {
403 cpu_maps_update_begin();
404 if (cede_offline_enabled && parse_cede_parameters() == 0) {
409 cpu_maps_update_done();