15 #include <linux/export.h>
17 #include <linux/sched.h>
44 static bool stop_machine_initialized =
false;
48 memset(done, 0,
sizeof(*done));
54 static void cpu_stop_signal_done(
struct cpu_stop_done *done,
bool executed)
65 static void cpu_stop_queue_work(
struct cpu_stopper *stopper,
76 cpu_stop_signal_done(work->done,
false);
78 spin_unlock_irqrestore(&stopper->
lock, flags);
110 cpu_stop_init_done(&done, 1);
149 work = &
per_cpu(stop_cpus_work, cpu);
163 &
per_cpu(stop_cpus_work, cpu));
167 static
int __stop_cpus(
const struct cpumask *cpumask,
172 cpu_stop_init_done(&done, cpumask_weight(cpumask));
173 queue_stop_cpus_work(cpumask, fn, arg, &done);
212 ret = __stop_cpus(cpumask, fn, arg);
242 ret = __stop_cpus(cpumask, fn, arg);
247 static int cpu_stopper_thread(
void *
data)
249 struct cpu_stopper *stopper =
data;
262 spin_lock_irq(&stopper->
lock);
263 if (!list_empty(&stopper->
works)) {
266 list_del_init(&work->list);
268 spin_unlock_irq(&stopper->
lock);
272 void *arg = work->
arg;
288 "cpu_stop: %s(%p) leaked preempt count\n",
292 cpu_stop_signal_done(done,
true);
303 unsigned long action,
void *hcpu)
305 unsigned int cpu = (
unsigned long)hcpu;
306 struct cpu_stopper *stopper = &
per_cpu(cpu_stopper, cpu);
312 !list_empty(&stopper->
works));
316 "migration/%d", cpu);
318 return notifier_from_errno(PTR_ERR(p));
329 spin_lock_irq(&stopper->
lock);
331 spin_unlock_irq(&stopper->
lock);
334 #ifdef CONFIG_HOTPLUG_CPU
344 spin_lock_irq(&stopper->
lock);
346 cpu_stop_signal_done(work->done,
false);
348 spin_unlock_irq(&stopper->lock);
350 put_task_struct(stopper->
thread);
351 stopper->thread =
NULL;
366 .notifier_call = cpu_stop_cpu_callback,
370 static int __init cpu_stop_init(
void)
377 struct cpu_stopper *stopper = &
per_cpu(cpu_stopper, cpu);
380 INIT_LIST_HEAD(&stopper->
works);
384 err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier,
CPU_UP_PREPARE,
387 cpu_stop_cpu_callback(&cpu_stop_cpu_notifier,
CPU_ONLINE, bcpu);
388 register_cpu_notifier(&cpu_stop_cpu_notifier);
390 stop_machine_initialized =
true;
396 #ifdef CONFIG_STOP_MACHINE
399 enum stopmachine_state {
405 STOPMACHINE_DISABLE_IRQ,
412 struct stop_machine_data {
416 unsigned int num_threads;
417 const struct cpumask *active_cpus;
419 enum stopmachine_state
state;
423 static void set_state(
struct stop_machine_data *smdata,
424 enum stopmachine_state newstate)
427 atomic_set(&smdata->thread_ack, smdata->num_threads);
429 smdata->state = newstate;
433 static void ack_state(
struct stop_machine_data *smdata)
436 set_state(smdata, smdata->state + 1);
440 static int stop_machine_cpu_stop(
void *data)
442 struct stop_machine_data *smdata =
data;
443 enum stopmachine_state curstate = STOPMACHINE_NONE;
454 if (!smdata->active_cpus)
455 is_active = cpu == cpumask_first(cpu_online_mask);
463 if (smdata->state != curstate) {
464 curstate = smdata->state;
466 case STOPMACHINE_DISABLE_IRQ:
470 case STOPMACHINE_RUN:
472 err = smdata->fn(smdata->data);
479 }
while (curstate != STOPMACHINE_EXIT);
485 int __stop_machine(
int (*fn)(
void *),
void *data,
const struct cpumask *
cpus)
487 struct stop_machine_data smdata = { .fn =
fn, .data =
data,
489 .active_cpus = cpus };
491 if (!stop_machine_initialized) {
511 set_state(&smdata, STOPMACHINE_PREPARE);
512 return stop_cpus(cpu_online_mask, stop_machine_cpu_stop, &smdata);
515 int stop_machine(
int (*fn)(
void *),
void *data,
const struct cpumask *cpus)
521 ret = __stop_machine(fn, data, cpus);
549 int stop_machine_from_inactive_cpu(
int (*fn)(
void *),
void *data,
550 const struct cpumask *cpus)
552 struct stop_machine_data smdata = { .fn =
fn, .data =
data,
553 .active_cpus = cpus };
566 set_state(&smdata, STOPMACHINE_PREPARE);
568 queue_stop_cpus_work(cpu_active_mask, stop_machine_cpu_stop, &smdata,
570 ret = stop_machine_cpu_stop(&smdata);
577 return ret ?: done.
ret;