26 #include <linux/export.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
30 #include <linux/signal.h>
33 #include <linux/slab.h>
38 #include <linux/mempolicy.h>
91 MAYDAY_INITIAL_TIMEOUT =
HZ / 100 >= 2 ?
HZ / 100 : 2,
94 MAYDAY_INTERVAL =
HZ / 10,
101 RESCUER_NICE_LEVEL = -20,
102 HIGHPRI_NICE_LEVEL = -20,
222 #define mayday_test_and_set_cpu(cpu, mask) \
223 cpumask_test_and_set_cpu((cpu), (mask))
224 #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
225 #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
226 #define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
227 #define free_mayday_mask(mask) free_cpumask_var((mask))
230 #define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask))
231 #define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask))
232 #define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask))
233 #define alloc_mayday_mask(maskp, gfp) true
234 #define free_mayday_mask(mask) do { } while (0)
263 #ifdef CONFIG_LOCKDEP
264 struct lockdep_map lockdep_map;
280 #define CREATE_TRACE_POINTS
283 #define for_each_worker_pool(pool, gcwq) \
284 for ((pool) = &(gcwq)->pools[0]; \
285 (pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++)
287 #define for_each_busy_worker(worker, i, pos, gcwq) \
288 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
289 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
291 static inline int __next_gcwq_cpu(
int cpu,
const struct cpumask *
mask,
296 cpu = cpumask_next(cpu, mask);
306 static inline int __next_wq_cpu(
int cpu,
const struct cpumask *
mask,
325 #define for_each_gcwq_cpu(cpu) \
326 for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \
327 (cpu) < WORK_CPU_NONE; \
328 (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
330 #define for_each_online_gcwq_cpu(cpu) \
331 for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \
332 (cpu) < WORK_CPU_NONE; \
333 (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
335 #define for_each_cwq_cpu(cpu, wq) \
336 for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \
337 (cpu) < WORK_CPU_NONE; \
338 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
340 #ifdef CONFIG_DEBUG_OBJECTS_WORK
344 static void *work_debug_hint(
void *
addr)
419 .
name =
"work_struct",
420 .debug_hint = work_debug_hint,
421 .fixup_init = work_fixup_init,
422 .fixup_activate = work_fixup_activate,
423 .fixup_free = work_fixup_free,
426 static inline void debug_work_activate(
struct work_struct *work)
431 static inline void debug_work_deactivate(
struct work_struct *work)
436 void __init_work(
struct work_struct *work,
int onstack)
445 void destroy_work_on_stack(
struct work_struct *work)
452 static inline void debug_work_activate(
struct work_struct *work) { }
453 static inline void debug_work_deactivate(
struct work_struct *work) { }
459 static bool workqueue_freezing;
479 static int worker_thread(
void *__worker);
483 return pool - pool->
gcwq->pools;
486 static struct global_cwq *get_gcwq(
unsigned int cpu)
491 return &unbound_global_cwq;
496 int cpu = pool->
gcwq->cpu;
497 int idx = worker_pool_pri(pool);
502 return &unbound_pool_nr_running[
idx];
509 if (
likely(cpu < nr_cpu_ids))
516 static unsigned int work_color_to_flags(
int color)
521 static int get_work_color(
struct work_struct *work)
527 static int work_next_color(
int color)
552 static inline void set_work_data(
struct work_struct *work,
unsigned long data,
556 atomic_long_set(&work->
data, data | flags | work_static(work));
561 unsigned long extra_flags)
563 set_work_data(work, (
unsigned long)cwq,
567 static void set_work_cpu_and_clear_pending(
struct work_struct *work,
580 static void clear_work_data(
struct work_struct *work)
588 unsigned long data = atomic_long_read(&work->
data);
598 unsigned long data = atomic_long_read(&work->
data);
601 if (data & WORK_STRUCT_CWQ)
610 return get_gcwq(cpu);
613 static void mark_work_canceling(
struct work_struct *work)
615 struct global_cwq *gcwq = get_work_gcwq(work);
622 static bool work_is_canceling(
struct work_struct *work)
624 unsigned long data = atomic_long_read(&work->
data);
635 static bool __need_more_worker(
struct worker_pool *pool)
648 static bool need_more_worker(
struct worker_pool *pool)
650 return !list_empty(&pool->
worklist) && __need_more_worker(pool);
654 static bool may_start_working(
struct worker_pool *pool)
668 static bool need_to_create_worker(
struct worker_pool *pool)
670 return need_more_worker(pool) && !may_start_working(pool);
674 static bool need_to_manage_workers(
struct worker_pool *pool)
676 return need_to_create_worker(pool) ||
681 static bool too_many_workers(
struct worker_pool *pool)
684 int nr_idle = pool->
nr_idle + managing;
719 static void wake_up_worker(
struct worker_pool *pool)
786 to_wakeup = first_worker(pool);
787 return to_wakeup ? to_wakeup->task :
NULL;
803 static inline void worker_set_flags(
struct worker *worker,
unsigned int flags,
816 !(worker->
flags & WORKER_NOT_RUNNING)) {
822 wake_up_worker(pool);
840 static inline void worker_clr_flags(
struct worker *worker,
unsigned int flags)
843 unsigned int oflags = worker->
flags;
847 worker->
flags &= ~flags;
854 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
855 if (!(worker->
flags & WORKER_NOT_RUNNING))
876 unsigned long v = (
unsigned long)work;
903 static struct worker *__find_worker_executing_work(
struct global_cwq *gcwq,
907 struct worker *worker;
911 if (worker->current_work == work)
935 return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
966 list_move_tail(&work->
entry, head);
980 static void cwq_activate_delayed_work(
struct work_struct *work)
984 trace_workqueue_activate_work(work);
985 move_linked_works(work, &cwq->
pool->worklist, NULL);
995 cwq_activate_delayed_work(work);
1021 cwq_activate_first_delayed(cwq);
1068 static int try_to_grab_pending(
struct work_struct *work,
bool is_dwork,
1069 unsigned long *flags)
1096 gcwq = get_work_gcwq(work);
1100 spin_lock(&gcwq->
lock);
1101 if (!list_empty(&work->
entry)) {
1108 if (gcwq == get_work_gcwq(work)) {
1109 debug_work_deactivate(work);
1120 cwq_activate_delayed_work(work);
1122 list_del_init(&work->
entry);
1123 cwq_dec_nr_in_flight(get_work_cwq(work),
1124 get_work_color(work));
1126 spin_unlock(&gcwq->
lock);
1130 spin_unlock(&gcwq->
lock);
1133 if (work_is_canceling(work))
1154 unsigned int extra_flags)
1159 set_work_cwq(work, cwq, extra_flags);
1176 if (__need_more_worker(pool))
1177 wake_up_worker(pool);
1187 unsigned long flags;
1192 struct worker *worker;
1200 spin_unlock_irqrestore(&gcwq->
lock, flags);
1207 spin_unlock_irqrestore(&gcwq->
lock, flags);
1218 unsigned int work_flags;
1229 debug_work_activate(work);
1249 gcwq = get_gcwq(cpu);
1250 last_gcwq = get_work_gcwq(work);
1252 if (last_gcwq && last_gcwq != gcwq) {
1253 struct worker *worker;
1255 spin_lock(&last_gcwq->
lock);
1257 worker = find_worker_executing_work(last_gcwq, work);
1263 spin_unlock(&last_gcwq->
lock);
1264 spin_lock(&gcwq->
lock);
1267 spin_lock(&gcwq->
lock);
1271 spin_lock(&gcwq->
lock);
1275 cwq = get_cwq(gcwq->
cpu, wq);
1276 trace_workqueue_queue_work(req_cpu, cwq, work);
1279 spin_unlock(&gcwq->
lock);
1284 work_flags = work_color_to_flags(cwq->
work_color);
1287 trace_workqueue_activate_work(work);
1289 worklist = &cwq->
pool->worklist;
1295 insert_work(cwq, work, worklist, work_flags);
1297 spin_unlock(&gcwq->
lock);
1315 unsigned long flags;
1320 __queue_work(cpu, wq, work);
1351 __queue_work(dwork->
cpu, cwq->
wq, &dwork->
work);
1363 timer->
data != (
unsigned long)dwork);
1374 __queue_work(cpu, wq, &dwork->
work);
1378 timer_stats_timer_set_start_info(&dwork->
timer);
1386 struct global_cwq *gcwq = get_work_gcwq(work);
1402 set_work_cwq(work, get_cwq(lcpu, wq), 0);
1429 unsigned long flags;
1435 __queue_delayed_work(cpu, wq, dwork, delay);
1480 unsigned long flags;
1484 ret = try_to_grab_pending(&dwork->
work,
true, &flags);
1488 __queue_delayed_work(cpu, wq, dwork, delay);
1506 unsigned long delay)
1522 static void worker_enter_idle(
struct worker *worker)
1539 if (too_many_workers(pool) && !timer_pending(&pool->
idle_timer))
1562 static void worker_leave_idle(
struct worker *worker)
1569 list_del_init(&worker->
entry);
1602 static bool worker_maybe_bind_and_lock(
struct worker *worker)
1616 set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->
cpu));
1618 spin_lock_irq(&gcwq->
lock);
1621 if (task_cpu(task) == gcwq->
cpu &&
1622 cpumask_equal(&
current->cpus_allowed,
1623 get_cpu_mask(gcwq->
cpu)))
1625 spin_unlock_irq(&gcwq->
lock);
1642 static void idle_worker_rebind(
struct worker *worker)
1647 if (worker_maybe_bind_and_lock(worker))
1651 list_add(&worker->
entry, &worker->
pool->idle_list);
1652 spin_unlock_irq(&gcwq->
lock);
1661 static void busy_worker_rebind_fn(
struct work_struct *work)
1666 if (worker_maybe_bind_and_lock(worker))
1669 spin_unlock_irq(&gcwq->
lock);
1694 static void rebind_workers(
struct global_cwq *gcwq)
1697 struct worker *worker, *
n;
1714 list_del_init(&worker->
entry);
1733 debug_work_activate(rebind_work);
1739 if (worker_pool_pri(worker->
pool))
1740 wq = system_highpri_wq;
1744 insert_work(get_cwq(gcwq->
cpu, wq), rebind_work,
1750 static struct worker *alloc_worker(
void)
1752 struct worker *worker;
1754 worker = kzalloc(
sizeof(*worker),
GFP_KERNEL);
1756 INIT_LIST_HEAD(&worker->
entry);
1779 static struct worker *create_worker(
struct worker_pool *pool)
1782 const char *
pri = worker_pool_pri(pool) ?
"H" :
"";
1783 struct worker *worker =
NULL;
1786 spin_lock_irq(&gcwq->
lock);
1788 spin_unlock_irq(&gcwq->
lock);
1791 spin_lock_irq(&gcwq->
lock);
1793 spin_unlock_irq(&gcwq->
lock);
1795 worker = alloc_worker();
1805 "kworker/%u:%d%s", gcwq->
cpu,
id, pri);
1808 "kworker/u:%d%s",
id, pri);
1809 if (IS_ERR(worker->
task))
1812 if (worker_pool_pri(pool))
1834 spin_lock_irq(&gcwq->
lock);
1836 spin_unlock_irq(&gcwq->
lock);
1851 static void start_worker(
struct worker *worker)
1854 worker->
pool->nr_workers++;
1855 worker_enter_idle(worker);
1868 static void destroy_worker(
struct worker *worker)
1872 int id = worker->
id;
1883 list_del_init(&worker->
entry);
1886 spin_unlock_irq(&gcwq->
lock);
1891 spin_lock_irq(&gcwq->
lock);
1895 static void idle_worker_timeout(
unsigned long __pool)
1900 spin_lock_irq(&gcwq->
lock);
1902 if (too_many_workers(pool)) {
1903 struct worker *worker;
1904 unsigned long expires;
1915 wake_up_worker(pool);
1919 spin_unlock_irq(&gcwq->
lock);
1932 cpu = cwq->
pool->gcwq->cpu;
1941 static void gcwq_mayday_timeout(
unsigned long __pool)
1947 spin_lock_irq(&gcwq->
lock);
1949 if (need_to_create_worker(pool)) {
1960 spin_unlock_irq(&gcwq->lock);
1962 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
1993 if (!need_to_create_worker(pool))
1996 spin_unlock_irq(&gcwq->
lock);
2002 struct worker *worker;
2004 worker = create_worker(pool);
2007 spin_lock_irq(&gcwq->
lock);
2008 start_worker(worker);
2009 BUG_ON(need_to_create_worker(pool));
2013 if (!need_to_create_worker(pool))
2019 if (!need_to_create_worker(pool))
2024 spin_lock_irq(&gcwq->
lock);
2025 if (need_to_create_worker(pool))
2045 static bool maybe_destroy_workers(
struct worker_pool *pool)
2049 while (too_many_workers(pool)) {
2050 struct worker *worker;
2051 unsigned long expires;
2061 destroy_worker(worker);
2088 static bool manage_workers(
struct worker *worker)
2111 spin_unlock_irq(&pool->
gcwq->lock);
2124 if (worker_maybe_bind_and_lock(worker))
2138 ret |= maybe_destroy_workers(pool);
2139 ret |= maybe_create_worker(pool);
2160 static void process_one_work(
struct worker *worker,
struct work_struct *work)
2167 struct hlist_head *bwh = busy_worker_head(gcwq, work);
2171 struct worker *collision;
2172 #ifdef CONFIG_LOCKDEP
2180 struct lockdep_map lockdep_map;
2182 lockdep_copy_map(&lockdep_map, &work->lockdep_map);
2199 collision = __find_worker_executing_work(gcwq, bwh, work);
2201 move_linked_works(work, &collision->
scheduled, NULL);
2206 debug_work_deactivate(work);
2207 hlist_add_head(&worker->
hentry, bwh);
2210 work_color = get_work_color(work);
2212 list_del_init(&work->entry);
2226 wake_up_worker(pool);
2234 set_work_cpu_and_clear_pending(work, gcwq->
cpu);
2236 spin_unlock_irq(&gcwq->
lock);
2240 trace_workqueue_execute_start(work);
2246 trace_workqueue_execute_end(work);
2251 pr_err(
"BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2252 " last function: %pf\n",
2258 spin_lock_irq(&gcwq->
lock);
2265 hlist_del_init(&worker->
hentry);
2268 cwq_dec_nr_in_flight(cwq, work_color);
2283 static void process_scheduled_works(
struct worker *worker)
2285 while (!list_empty(&worker->
scheduled)) {
2288 process_one_work(worker, work);
2302 static int worker_thread(
void *__worker)
2304 struct worker *worker = __worker;
2311 spin_lock_irq(&gcwq->
lock);
2315 spin_unlock_irq(&gcwq->
lock);
2324 idle_worker_rebind(worker);
2328 worker_leave_idle(worker);
2331 if (!need_more_worker(pool))
2335 if (
unlikely(!may_start_working(pool)) && manage_workers(worker))
2359 process_one_work(worker, work);
2361 process_scheduled_works(worker);
2363 move_linked_works(work, &worker->
scheduled, NULL);
2364 process_scheduled_works(worker);
2366 }
while (keep_working(pool));
2370 if (
unlikely(need_to_manage_workers(pool)) && manage_workers(worker))
2380 worker_enter_idle(worker);
2382 spin_unlock_irq(&gcwq->
lock);
2406 static int rescuer_thread(
void *__wq)
2409 struct worker *rescuer = wq->
rescuer;
2438 rescuer->
pool = pool;
2439 worker_maybe_bind_and_lock(rescuer);
2447 if (get_work_cwq(work) == cwq)
2448 move_linked_works(work, scheduled, &n);
2450 process_scheduled_works(rescuer);
2457 if (keep_working(pool))
2458 wake_up_worker(pool);
2460 spin_unlock_irq(&gcwq->lock);
2472 static void wq_barrier_func(
struct work_struct *work)
2507 unsigned int linked = 0;
2517 init_completion(&barr->
done);
2528 head = target->
entry.next;
2534 debug_work_activate(&barr->
work);
2535 insert_work(cwq, &barr->
work, head,
2571 int flush_color,
int work_color)
2576 if (flush_color >= 0) {
2585 spin_lock_irq(&gcwq->
lock);
2587 if (flush_color >= 0) {
2597 if (work_color >= 0) {
2602 spin_unlock_irq(&gcwq->
lock);
2638 next_color = work_next_color(wq->
work_color);
2656 if (!flush_workqueue_prep_cwqs(wq, wq->
flush_color,
2667 flush_workqueue_prep_cwqs(wq, -1, wq->
work_color);
2709 list_del_init(&next->
list);
2734 flush_workqueue_prep_cwqs(wq, -1, wq->
work_color);
2749 list_del_init(&next->
list);
2752 if (flush_workqueue_prep_cwqs(wq, wq->
flush_color, -1))
2780 unsigned int flush_cnt = 0;
2788 spin_lock(&workqueue_lock);
2791 spin_unlock(&workqueue_lock);
2799 spin_lock_irq(&cwq->
pool->gcwq->lock);
2801 spin_unlock_irq(&cwq->
pool->gcwq->lock);
2806 if (++flush_cnt == 10 ||
2807 (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2808 pr_warn(
"workqueue %s: flush on destruction isn't complete after %u tries\n",
2809 wq->
name, flush_cnt);
2813 spin_lock(&workqueue_lock);
2816 spin_unlock(&workqueue_lock);
2822 struct worker *worker =
NULL;
2827 gcwq = get_work_gcwq(work);
2831 spin_lock_irq(&gcwq->
lock);
2832 if (!list_empty(&work->
entry)) {
2839 cwq = get_work_cwq(work);
2843 worker = find_worker_executing_work(gcwq, work);
2849 insert_wq_barrier(cwq, barr, work, worker);
2850 spin_unlock_irq(&gcwq->
lock);
2858 if (cwq->
wq->saved_max_active == 1 || cwq->
wq->flags &
WQ_RESCUER)
2866 spin_unlock_irq(&gcwq->
lock);
2888 if (start_flush_work(work, &barr)) {
2890 destroy_work_on_stack(&barr.
work);
2898 static bool __cancel_work_timer(
struct work_struct *work,
bool is_dwork)
2900 unsigned long flags;
2904 ret = try_to_grab_pending(work, is_dwork, &flags);
2914 mark_work_canceling(work);
2918 clear_work_data(work);
2942 return __cancel_work_timer(work,
false);
2962 __queue_work(dwork->
cpu,
2963 get_work_cwq(&dwork->
work)->wq, &dwork->
work);
2983 unsigned long flags;
2987 ret = try_to_grab_pending(&dwork->
work,
true, &flags);
3010 return __cancel_work_timer(&dwork->
work,
true);
3054 unsigned long delay)
3181 __alignof__(
unsigned long long));
3193 ptr = kzalloc(size + align +
sizeof(
void *),
GFP_KERNEL);
3215 static int wq_clamp_max_active(
int max_active,
unsigned int flags,
3220 if (max_active < 1 || max_active > lim)
3221 pr_warn(
"workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
3222 max_active, name, 1, lim);
3231 const char *lock_name, ...)
3240 va_copy(args1, args);
3241 namelen =
vsnprintf(NULL, 0, fmt, args) + 1;
3243 wq = kzalloc(
sizeof(*wq) + namelen,
GFP_KERNEL);
3259 max_active = wq_clamp_max_active(max_active, flags, wq->
name);
3270 INIT_LIST_HEAD(&wq->
list);
3272 if (alloc_cwqs(wq) < 0)
3289 struct worker *rescuer;
3294 wq->
rescuer = rescuer = alloc_worker();
3300 if (IS_ERR(rescuer->
task))
3312 spin_lock(&workqueue_lock);
3316 get_cwq(cpu, wq)->max_active = 0;
3318 list_add(&wq->
list, &workqueues);
3320 spin_unlock(&workqueue_lock);
3351 spin_lock(&workqueue_lock);
3353 spin_unlock(&workqueue_lock);
3394 cwq_activate_first_delayed(cwq);
3411 max_active = wq_clamp_max_active(max_active, wq->
flags, wq->
name);
3413 spin_lock(&workqueue_lock);
3420 spin_lock_irq(&gcwq->
lock);
3424 cwq_set_max_active(get_cwq(gcwq->
cpu, wq), max_active);
3426 spin_unlock_irq(&gcwq->
lock);
3429 spin_unlock(&workqueue_lock);
3462 struct global_cwq *gcwq = get_work_gcwq(work);
3483 struct global_cwq *gcwq = get_work_gcwq(work);
3484 unsigned long flags;
3485 unsigned int ret = 0;
3494 if (find_worker_executing_work(gcwq, work))
3497 spin_unlock_irqrestore(&gcwq->
lock, flags);
3519 static void gcwq_claim_assoc_and_lock(
struct global_cwq *gcwq)
3525 spin_lock_irq(&gcwq->lock);
3533 spin_unlock_irq(&gcwq->lock);
3542 struct worker *worker;
3548 gcwq_claim_assoc_and_lock(gcwq);
3561 worker->flags |= WORKER_UNBOUND;
3565 gcwq_release_assoc_and_unlock(gcwq);
3597 unsigned int cpu = (
unsigned long)hcpu;
3604 struct worker *worker;
3609 worker = create_worker(pool);
3613 spin_lock_irq(&gcwq->
lock);
3614 start_worker(worker);
3615 spin_unlock_irq(&gcwq->
lock);
3621 gcwq_claim_assoc_and_lock(gcwq);
3622 gcwq->
flags &= ~GCWQ_DISASSOCIATED;
3623 rebind_workers(gcwq);
3624 gcwq_release_assoc_and_unlock(gcwq);
3635 unsigned long action,
3638 unsigned int cpu = (
unsigned long)hcpu;
3654 struct work_for_cpu {
3661 static void work_for_cpu_fn(
struct work_struct *work)
3663 struct work_for_cpu *wfc =
container_of(work,
struct work_for_cpu, work);
3665 wfc->ret = wfc->fn(wfc->arg);
3678 long work_on_cpu(
unsigned int cpu,
long (*
fn)(
void *),
void *
arg)
3680 struct work_for_cpu wfc = { .fn =
fn, .arg = arg };
3690 #ifdef CONFIG_FREEZER
3702 void freeze_workqueues_begin(
void)
3706 spin_lock(&workqueue_lock);
3708 BUG_ON(workqueue_freezing);
3709 workqueue_freezing =
true;
3715 spin_lock_irq(&gcwq->
lock);
3727 spin_unlock_irq(&gcwq->
lock);
3730 spin_unlock(&workqueue_lock);
3746 bool freeze_workqueues_busy(
void)
3751 spin_lock(&workqueue_lock);
3753 BUG_ON(!workqueue_freezing);
3775 spin_unlock(&workqueue_lock);
3788 void thaw_workqueues(
void)
3792 spin_lock(&workqueue_lock);
3794 if (!workqueue_freezing)
3802 spin_lock_irq(&gcwq->
lock);
3818 wake_up_worker(pool);
3820 spin_unlock_irq(&gcwq->lock);
3823 workqueue_freezing =
false;
3825 spin_unlock(&workqueue_lock);
3829 static int __init init_workqueues(
void)
3859 pool->
idle_timer.function = idle_worker_timeout;
3863 (
unsigned long)pool);
3876 gcwq->
flags &= ~GCWQ_DISASSOCIATED;
3879 struct worker *worker;
3881 worker = create_worker(pool);
3883 spin_lock_irq(&gcwq->
lock);
3884 start_worker(worker);
3885 spin_unlock_irq(&gcwq->
lock);
3896 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
3897 !system_unbound_wq || !system_freezable_wq);