Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
kthread.c
Go to the documentation of this file.
1 /* Kernel thread helper functions.
2  * Copyright (C) 2004 IBM Corporation, Rusty Russell.
3  *
4  * Creation is done via kthreadd, so that we get a clean environment
5  * even if we're invoked from userspace (think modprobe, hotplug cpu,
6  * etc.).
7  */
8 #include <linux/sched.h>
9 #include <linux/kthread.h>
10 #include <linux/completion.h>
11 #include <linux/err.h>
12 #include <linux/cpuset.h>
13 #include <linux/unistd.h>
14 #include <linux/file.h>
15 #include <linux/export.h>
16 #include <linux/mutex.h>
17 #include <linux/slab.h>
18 #include <linux/freezer.h>
19 #include <linux/ptrace.h>
20 #include <trace/events/sched.h>
21 
22 static DEFINE_SPINLOCK(kthread_create_lock);
23 static LIST_HEAD(kthread_create_list);
25 
27 {
28  /* Information passed to kthread() from kthreadd. */
29  int (*threadfn)(void *data);
30  void *data;
31  int node;
32 
33  /* Result passed back to kthread_create() from kthreadd. */
35  struct completion done;
36 
37  struct list_head list;
38 };
39 
40 struct kthread {
41  unsigned long flags;
42  unsigned int cpu;
43  void *data;
46 };
47 
53 };
54 
55 #define to_kthread(tsk) \
56  container_of((tsk)->vfork_done, struct kthread, exited)
57 
66 {
68 }
70 
83 {
85 }
86 
96 bool kthread_freezable_should_stop(bool *was_frozen)
97 {
98  bool frozen = false;
99 
100  might_sleep();
101 
102  if (unlikely(freezing(current)))
103  frozen = __refrigerator(true);
104 
105  if (was_frozen)
106  *was_frozen = frozen;
107 
108  return kthread_should_stop();
109 }
111 
121 {
122  return to_kthread(task)->data;
123 }
124 
125 static void __kthread_parkme(struct kthread *self)
126 {
128  while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
129  if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
130  complete(&self->parked);
131  schedule();
133  }
134  clear_bit(KTHREAD_IS_PARKED, &self->flags);
136 }
137 
138 void kthread_parkme(void)
139 {
140  __kthread_parkme(to_kthread(current));
141 }
142 
143 static int kthread(void *_create)
144 {
145  /* Copy data: it's on kthread's stack */
146  struct kthread_create_info *create = _create;
147  int (*threadfn)(void *data) = create->threadfn;
148  void *data = create->data;
149  struct kthread self;
150  int ret;
151 
152  self.flags = 0;
153  self.data = data;
154  init_completion(&self.exited);
155  init_completion(&self.parked);
156  current->vfork_done = &self.exited;
157 
158  /* OK, tell user we're spawned, wait for stop or wakeup */
160  create->result = current;
161  complete(&create->done);
162  schedule();
163 
164  ret = -EINTR;
165 
166  if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
167  __kthread_parkme(&self);
168  ret = threadfn(data);
169  }
170  /* we can't just return, we must preserve "self" on stack */
171  do_exit(ret);
172 }
173 
174 /* called from do_fork() to get node information for about to be created task */
176 {
177 #ifdef CONFIG_NUMA
178  if (tsk == kthreadd_task)
179  return tsk->pref_node_fork;
180 #endif
181  return numa_node_id();
182 }
183 
184 static void create_kthread(struct kthread_create_info *create)
185 {
186  int pid;
187 
188 #ifdef CONFIG_NUMA
189  current->pref_node_fork = create->node;
190 #endif
191  /* We want our own signal handler (we take no signals by default). */
192  pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
193  if (pid < 0) {
194  create->result = ERR_PTR(pid);
195  complete(&create->done);
196  }
197 }
198 
221 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
222  void *data, int node,
223  const char namefmt[],
224  ...)
225 {
226  struct kthread_create_info create;
227 
228  create.threadfn = threadfn;
229  create.data = data;
230  create.node = node;
231  init_completion(&create.done);
232 
233  spin_lock(&kthread_create_lock);
234  list_add_tail(&create.list, &kthread_create_list);
235  spin_unlock(&kthread_create_lock);
236 
237  wake_up_process(kthreadd_task);
238  wait_for_completion(&create.done);
239 
240  if (!IS_ERR(create.result)) {
241  static const struct sched_param param = { .sched_priority = 0 };
242  va_list args;
243 
244  va_start(args, namefmt);
245  vsnprintf(create.result->comm, sizeof(create.result->comm),
246  namefmt, args);
247  va_end(args);
248  /*
249  * root may have changed our (kthreadd's) priority or CPU mask.
250  * The kernel thread should not inherit these properties.
251  */
253  set_cpus_allowed_ptr(create.result, cpu_all_mask);
254  }
255  return create.result;
256 }
258 
259 static void __kthread_bind(struct task_struct *p, unsigned int cpu)
260 {
261  /* It's safe because the task is inactive. */
262  do_set_cpus_allowed(p, cpumask_of(cpu));
263  p->flags |= PF_THREAD_BOUND;
264 }
265 
275 void kthread_bind(struct task_struct *p, unsigned int cpu)
276 {
277  /* Must have done schedule() in kthread() before we set_task_cpu */
278  if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
279  WARN_ON(1);
280  return;
281  }
282  __kthread_bind(p, cpu);
283 }
285 
297 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
298  void *data, unsigned int cpu,
299  const char *namefmt)
300 {
301  struct task_struct *p;
302 
303  p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
304  cpu);
305  if (IS_ERR(p))
306  return p;
308  to_kthread(p)->cpu = cpu;
309  /* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */
310  kthread_park(p);
311  return p;
312 }
313 
314 static struct kthread *task_get_live_kthread(struct task_struct *k)
315 {
316  struct kthread *kthread;
317 
318  get_task_struct(k);
319  kthread = to_kthread(k);
320  /* It might have exited */
321  barrier();
322  if (k->vfork_done != NULL)
323  return kthread;
324  return NULL;
325 }
326 
336 {
337  struct kthread *kthread = task_get_live_kthread(k);
338 
339  if (kthread) {
340  clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
341  /*
342  * We clear the IS_PARKED bit here as we don't wait
343  * until the task has left the park code. So if we'd
344  * park before that happens we'd see the IS_PARKED bit
345  * which might be about to be cleared.
346  */
347  if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
348  if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
349  __kthread_bind(k, kthread->cpu);
350  wake_up_process(k);
351  }
352  }
353  put_task_struct(k);
354 }
355 
368 int kthread_park(struct task_struct *k)
369 {
370  struct kthread *kthread = task_get_live_kthread(k);
371  int ret = -ENOSYS;
372 
373  if (kthread) {
374  if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
375  set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
376  if (k != current) {
377  wake_up_process(k);
378  wait_for_completion(&kthread->parked);
379  }
380  }
381  ret = 0;
382  }
383  put_task_struct(k);
384  return ret;
385 }
386 
402 int kthread_stop(struct task_struct *k)
403 {
404  struct kthread *kthread = task_get_live_kthread(k);
405  int ret;
406 
407  trace_sched_kthread_stop(k);
408  if (kthread) {
409  set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
410  clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
411  wake_up_process(k);
412  wait_for_completion(&kthread->exited);
413  }
414  ret = k->exit_code;
415 
416  put_task_struct(k);
417  trace_sched_kthread_stop_ret(ret);
418 
419  return ret;
420 }
422 
423 int kthreadd(void *unused)
424 {
425  struct task_struct *tsk = current;
426 
427  /* Setup a clean context for our children to inherit. */
428  set_task_comm(tsk, "kthreadd");
429  ignore_signals(tsk);
430  set_cpus_allowed_ptr(tsk, cpu_all_mask);
431  set_mems_allowed(node_states[N_HIGH_MEMORY]);
432 
433  current->flags |= PF_NOFREEZE;
434 
435  for (;;) {
437  if (list_empty(&kthread_create_list))
438  schedule();
440 
441  spin_lock(&kthread_create_lock);
442  while (!list_empty(&kthread_create_list)) {
443  struct kthread_create_info *create;
444 
445  create = list_entry(kthread_create_list.next,
446  struct kthread_create_info, list);
447  list_del_init(&create->list);
448  spin_unlock(&kthread_create_lock);
449 
450  create_kthread(create);
451 
452  spin_lock(&kthread_create_lock);
453  }
454  spin_unlock(&kthread_create_lock);
455  }
456 
457  return 0;
458 }
459 
461  const char *name,
462  struct lock_class_key *key)
463 {
464  spin_lock_init(&worker->lock);
465  lockdep_set_class_and_name(&worker->lock, key, name);
466  INIT_LIST_HEAD(&worker->work_list);
467  worker->task = NULL;
468 }
470 
486 int kthread_worker_fn(void *worker_ptr)
487 {
488  struct kthread_worker *worker = worker_ptr;
489  struct kthread_work *work;
490 
491  WARN_ON(worker->task);
492  worker->task = current;
493 repeat:
494  set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
495 
496  if (kthread_should_stop()) {
498  spin_lock_irq(&worker->lock);
499  worker->task = NULL;
500  spin_unlock_irq(&worker->lock);
501  return 0;
502  }
503 
504  work = NULL;
505  spin_lock_irq(&worker->lock);
506  if (!list_empty(&worker->work_list)) {
507  work = list_first_entry(&worker->work_list,
508  struct kthread_work, node);
509  list_del_init(&work->node);
510  }
511  worker->current_work = work;
512  spin_unlock_irq(&worker->lock);
513 
514  if (work) {
516  work->func(work);
517  } else if (!freezing(current))
518  schedule();
519 
520  try_to_freeze();
521  goto repeat;
522 }
524 
525 /* insert @work before @pos in @worker */
526 static void insert_kthread_work(struct kthread_worker *worker,
527  struct kthread_work *work,
528  struct list_head *pos)
529 {
530  lockdep_assert_held(&worker->lock);
531 
532  list_add_tail(&work->node, pos);
533  work->worker = worker;
534  if (likely(worker->task))
535  wake_up_process(worker->task);
536 }
537 
547 bool queue_kthread_work(struct kthread_worker *worker,
548  struct kthread_work *work)
549 {
550  bool ret = false;
551  unsigned long flags;
552 
553  spin_lock_irqsave(&worker->lock, flags);
554  if (list_empty(&work->node)) {
555  insert_kthread_work(worker, work, &worker->work_list);
556  ret = true;
557  }
558  spin_unlock_irqrestore(&worker->lock, flags);
559  return ret;
560 }
562 
564  struct kthread_work work;
565  struct completion done;
566 };
567 
568 static void kthread_flush_work_fn(struct kthread_work *work)
569 {
570  struct kthread_flush_work *fwork =
571  container_of(work, struct kthread_flush_work, work);
572  complete(&fwork->done);
573 }
574 
582 {
583  struct kthread_flush_work fwork = {
584  KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
586  };
587  struct kthread_worker *worker;
588  bool noop = false;
589 
590 retry:
591  worker = work->worker;
592  if (!worker)
593  return;
594 
595  spin_lock_irq(&worker->lock);
596  if (work->worker != worker) {
597  spin_unlock_irq(&worker->lock);
598  goto retry;
599  }
600 
601  if (!list_empty(&work->node))
602  insert_kthread_work(worker, &fwork.work, work->node.next);
603  else if (worker->current_work == work)
604  insert_kthread_work(worker, &fwork.work, worker->work_list.next);
605  else
606  noop = true;
607 
608  spin_unlock_irq(&worker->lock);
609 
610  if (!noop)
611  wait_for_completion(&fwork.done);
612 }
614 
623 {
624  struct kthread_flush_work fwork = {
625  KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
627  };
628 
629  queue_kthread_work(worker, &fwork.work);
630  wait_for_completion(&fwork.done);
631 }