Linux Kernel
3.7.1
|
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
#include <linux/hardirq.h>
#include <linux/mempolicy.h>
#include <linux/freezer.h>
#include <linux/kallsyms.h>
#include <linux/debug_locks.h>
#include <linux/lockdep.h>
#include <linux/idr.h>
#include "workqueue_sched.h"
#include <trace/events/workqueue.h>
Go to the source code of this file.
Data Structures | |
struct | worker |
struct | worker_pool |
struct | global_cwq |
struct | cpu_workqueue_struct |
struct | wq_flusher |
struct | workqueue_struct |
struct | wq_barrier |
Macros | |
#define | mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask)) |
#define | mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask)) |
#define | for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask)) |
#define | alloc_mayday_mask(maskp, gfp) true |
#define | free_mayday_mask(mask) do { } while (0) |
#define | CREATE_TRACE_POINTS |
#define | for_each_worker_pool(pool, gcwq) |
#define | for_each_busy_worker(worker, i, pos, gcwq) |
#define | for_each_gcwq_cpu(cpu) |
#define | for_each_online_gcwq_cpu(cpu) |
#define | for_each_cwq_cpu(cpu, wq) |
Typedefs | |
typedef unsigned long | mayday_mask_t |
Enumerations | |
enum | { GCWQ_DISASSOCIATED = 1 << 0, GCWQ_FREEZING = 1 << 1, POOL_MANAGE_WORKERS = 1 << 0, POOL_MANAGING_WORKERS = 1 << 1, WORKER_STARTED = 1 << 0, WORKER_DIE = 1 << 1, WORKER_IDLE = 1 << 2, WORKER_PREP = 1 << 3, WORKER_CPU_INTENSIVE = 1 << 6, WORKER_UNBOUND = 1 << 7, WORKER_NOT_RUNNING, NR_WORKER_POOLS = 2, BUSY_WORKER_HASH_ORDER = 6, BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER, BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1, MAX_IDLE_WORKERS_RATIO = 4, IDLE_WORKER_TIMEOUT = 300 * HZ } |
Variables | |
struct global_cwq | ____cacheline_aligned_in_smp |
struct workqueue_struct *system_wq | __read_mostly |
#define alloc_mayday_mask | ( | maskp, | |
gfp | |||
) | true |
Definition at line 233 of file workqueue.c.
#define CREATE_TRACE_POINTS |
Definition at line 280 of file workqueue.c.
Definition at line 287 of file workqueue.c.
#define for_each_cwq_cpu | ( | cpu, | |
wq | |||
) |
Definition at line 335 of file workqueue.c.
#define for_each_gcwq_cpu | ( | cpu | ) |
Definition at line 325 of file workqueue.c.
Definition at line 232 of file workqueue.c.
#define for_each_online_gcwq_cpu | ( | cpu | ) |
Definition at line 330 of file workqueue.c.
#define for_each_worker_pool | ( | pool, | |
gcwq | |||
) |
Definition at line 283 of file workqueue.c.
#define free_mayday_mask | ( | mask | ) | do { } while (0) |
Definition at line 234 of file workqueue.c.
Definition at line 231 of file workqueue.c.
#define mayday_test_and_set_cpu | ( | cpu, | |
mask | |||
) | test_and_set_bit(0, &(mask)) |
Definition at line 230 of file workqueue.c.
typedef unsigned long mayday_mask_t |
Definition at line 229 of file workqueue.c.
anonymous enum |
Definition at line 47 of file workqueue.c.
|
read |
Definition at line 3227 of file workqueue.c.
bool cancel_delayed_work | ( | struct delayed_work * | dwork | ) |
cancel_delayed_work - cancel a delayed work : delayed_work to cancel
Kill off a pending delayed_work. Returns true if was pending and canceled; false if wasn't pending. Note that the work callback function may still be running on return, unless it returns true and the work doesn't re-arm itself. Explicitly flush or use cancel_delayed_work_sync() to wait on it.
This function is safe to call from any context including IRQ handler.
Definition at line 2981 of file workqueue.c.
bool cancel_delayed_work_sync | ( | struct delayed_work * | dwork | ) |
cancel_delayed_work_sync - cancel a delayed work and wait for it to finish : the delayed work cancel
This is cancel_work_sync() for delayed works.
RETURNS: true if was pending, false otherwise.
Definition at line 3008 of file workqueue.c.
bool cancel_work_sync | ( | struct work_struct * | work | ) |
cancel_work_sync - cancel a work and wait for it to finish : the work to cancel
Cancel and wait for its execution to finish. This function can be used even if the work re-queues itself or migrates to another workqueue. On return from this function, is guaranteed to be not pending or executing on any CPU.
cancel_work_sync(&delayed_work->work) must not be used for delayed_work's. Use cancel_delayed_work_sync() instead.
The caller must ensure that the workqueue on which was last queued can't be destroyed before this function returns.
RETURNS: true if was pending, false otherwise.
Definition at line 2940 of file workqueue.c.
Definition at line 1345 of file workqueue.c.
void destroy_workqueue | ( | struct workqueue_struct * | wq | ) |
destroy_workqueue - safely terminate a workqueue : target workqueue
Safely destroy a workqueue. All work currently pending will be done first.
Definition at line 3340 of file workqueue.c.
void drain_workqueue | ( | struct workqueue_struct * | wq | ) |
drain_workqueue - drain a workqueue : workqueue to drain
Wait until the workqueue becomes empty. While draining is in progress, only chain queueing is allowed. IOW, only currently pending or running work items on can queue further work items on it. is flushed repeatedly until it becomes empty. The number of flushing is detemined by the depth of chaining and should be relatively short. Whine if it takes too long.
Definition at line 2778 of file workqueue.c.
early_initcall | ( | init_workqueues | ) |
int execute_in_process_context | ( | work_func_t | fn, |
struct execute_work * | ew | ||
) |
Definition at line 3153 of file workqueue.c.
EXPORT_SYMBOL | ( | flush_delayed_work | ) |
EXPORT_SYMBOL | ( | cancel_delayed_work | ) |
EXPORT_SYMBOL | ( | cancel_delayed_work_sync | ) |
EXPORT_SYMBOL | ( | schedule_work_on | ) |
EXPORT_SYMBOL | ( | schedule_work | ) |
EXPORT_SYMBOL | ( | schedule_delayed_work_on | ) |
EXPORT_SYMBOL | ( | schedule_delayed_work | ) |
EXPORT_SYMBOL | ( | flush_scheduled_work | ) |
EXPORT_SYMBOL_GPL | ( | system_wq | ) |
EXPORT_SYMBOL_GPL | ( | system_highpri_wq | ) |
EXPORT_SYMBOL_GPL | ( | system_long_wq | ) |
EXPORT_SYMBOL_GPL | ( | system_unbound_wq | ) |
EXPORT_SYMBOL_GPL | ( | system_freezable_wq | ) |
EXPORT_SYMBOL_GPL | ( | queue_work_on | ) |
EXPORT_SYMBOL_GPL | ( | queue_work | ) |
EXPORT_SYMBOL_GPL | ( | delayed_work_timer_fn | ) |
EXPORT_SYMBOL_GPL | ( | queue_delayed_work_on | ) |
EXPORT_SYMBOL_GPL | ( | queue_delayed_work | ) |
EXPORT_SYMBOL_GPL | ( | mod_delayed_work_on | ) |
EXPORT_SYMBOL_GPL | ( | mod_delayed_work | ) |
EXPORT_SYMBOL_GPL | ( | flush_workqueue | ) |
EXPORT_SYMBOL_GPL | ( | drain_workqueue | ) |
EXPORT_SYMBOL_GPL | ( | flush_work | ) |
EXPORT_SYMBOL_GPL | ( | cancel_work_sync | ) |
EXPORT_SYMBOL_GPL | ( | execute_in_process_context | ) |
EXPORT_SYMBOL_GPL | ( | __alloc_workqueue_key | ) |
EXPORT_SYMBOL_GPL | ( | destroy_workqueue | ) |
EXPORT_SYMBOL_GPL | ( | workqueue_set_max_active | ) |
EXPORT_SYMBOL_GPL | ( | workqueue_congested | ) |
EXPORT_SYMBOL_GPL | ( | work_cpu | ) |
EXPORT_SYMBOL_GPL | ( | work_busy | ) |
bool flush_delayed_work | ( | struct delayed_work * | dwork | ) |
flush_delayed_work - wait for a dwork to finish executing the last queueing : the delayed work to flush
Delayed timer is cancelled and the pending work is queued for immediate execution. Like flush_work(), this function only considers the last queueing instance of .
RETURNS: true if flush_work() waited for the work to finish execution, false if it was already idle.
Definition at line 2958 of file workqueue.c.
flush_scheduled_work - ensure that any scheduled work has run to completion.
Forces execution of the kernel-global workqueue and blocks until its completion.
Think twice before calling this function! It's very easy to get into trouble if you don't take great care. Either of the following situations will lead to deadlock:
One of the work items currently on the workqueue needs to acquire a lock held by your code or its caller.
Your code is running in the context of a work routine.
They will be detected by lockdep when they occur, but the first might not occur very often. It depends on what work items are on the workqueue and what locks they need, which you have no control over.
In most situations flushing the entire workqueue is overkill; you merely need to know that a particular work item isn't queued and isn't running. In such cases you should use cancel_delayed_work_sync() or cancel_work_sync() instead.
Definition at line 3135 of file workqueue.c.
bool flush_work | ( | struct work_struct * | work | ) |
flush_work - wait for a work to finish executing the last queueing instance : the work to flush
Wait until has finished execution. is guaranteed to be idle on return if it hasn't been requeued since flush started.
RETURNS: true if flush_work() waited for the work to finish execution, false if it was already idle.
Definition at line 2881 of file workqueue.c.
void flush_workqueue | ( | struct workqueue_struct * | wq | ) |
flush_workqueue - ensure that any scheduled work has run to completion. : workqueue to flush
Forces execution of the workqueue and blocks until its completion. This is typically used in driver shutdown handlers.
We sleep until all works which were queued on entry have been handled, but we are not livelocked by new incoming ones.
Definition at line 2621 of file workqueue.c.
Definition at line 3167 of file workqueue.c.
bool mod_delayed_work | ( | struct workqueue_struct * | wq, |
struct delayed_work * | dwork, | ||
unsigned long | delay | ||
) |
mod_delayed_work - modify delay of or queue a delayed work : workqueue to use : work to queue : number of jiffies to wait before queueing
mod_delayed_work_on() on local CPU.
Definition at line 1505 of file workqueue.c.
bool mod_delayed_work_on | ( | int | cpu, |
struct workqueue_struct * | wq, | ||
struct delayed_work * | dwork, | ||
unsigned long | delay | ||
) |
mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU : CPU number to execute work on : workqueue to use : work to queue : number of jiffies to wait before queueing
If is idle, equivalent to queue_delayed_work_on(); otherwise, modify 's timer so that it expires after . If is zero, is guaranteed to be scheduled immediately regardless of its current state.
Returns false if was idle and queued, true if was pending and its timer was modified.
This function is safe to call from any context including IRQ handler. See try_to_grab_pending() for details.
Definition at line 1477 of file workqueue.c.
bool queue_delayed_work | ( | struct workqueue_struct * | wq, |
struct delayed_work * | dwork, | ||
unsigned long | delay | ||
) |
queue_delayed_work - queue work on a workqueue after delay : workqueue to use : delayable work to queue : number of jiffies to wait before queueing
Equivalent to queue_delayed_work_on() but tries to use the local CPU.
Definition at line 1452 of file workqueue.c.
bool queue_delayed_work_on | ( | int | cpu, |
struct workqueue_struct * | wq, | ||
struct delayed_work * | dwork, | ||
unsigned long | delay | ||
) |
queue_delayed_work_on - queue work on specific CPU after delay : CPU number to execute work on : workqueue to use : work to queue : number of jiffies to wait before queueing
Returns false if was already on a queue, true otherwise. If is zero and is idle, it will be scheduled for immediate execution.
Definition at line 1424 of file workqueue.c.
bool queue_work | ( | struct workqueue_struct * | wq, |
struct work_struct * | work | ||
) |
queue_work - queue work on a workqueue : workqueue to use : work to queue
Returns false if was already on a queue, true otherwise.
We queue the work to the CPU on which it was submitted, but if the CPU dies it can be processed by another CPU.
Definition at line 1339 of file workqueue.c.
bool queue_work_on | ( | int | cpu, |
struct workqueue_struct * | wq, | ||
struct work_struct * | work | ||
) |
queue_work_on - queue work on specific cpu : CPU number to execute work on : workqueue to use : work to queue
Returns false if was already on a queue, true otherwise.
We queue the work to a specific CPU, the caller must ensure it can't go away.
Definition at line 1311 of file workqueue.c.
bool schedule_delayed_work | ( | struct delayed_work * | dwork, |
unsigned long | delay | ||
) |
schedule_delayed_work - put work task in global workqueue after delay : job to be done : number of jiffies to wait or 0 for immediate execution
After waiting for a given time this puts a job in the kernel-global workqueue.
Definition at line 3068 of file workqueue.c.
bool schedule_delayed_work_on | ( | int | cpu, |
struct delayed_work * | dwork, | ||
unsigned long | delay | ||
) |
schedule_delayed_work_on - queue work in global workqueue on CPU after delay : cpu to use : job to be done : number of jiffies to wait
After waiting for a given time this puts a job in the kernel-global workqueue on the specified CPU.
Definition at line 3053 of file workqueue.c.
int schedule_on_each_cpu | ( | work_func_t | func | ) |
schedule_on_each_cpu - execute a function synchronously on each online CPU : the function to call
schedule_on_each_cpu() executes on each online CPU using the system workqueue and blocks until all CPUs have completed. schedule_on_each_cpu() is very slow.
RETURNS: 0 on success, -errno on failure.
Definition at line 3085 of file workqueue.c.
bool schedule_work | ( | struct work_struct * | work | ) |
schedule_work - put work task in global workqueue : job to be done
Returns false if was already on the kernel-global workqueue and true otherwise.
This puts a job in the kernel-global workqueue if it was not already queued and leaves it in the same position on the kernel-global workqueue otherwise.
Definition at line 3038 of file workqueue.c.
bool schedule_work_on | ( | int | cpu, |
struct work_struct * | work | ||
) |
schedule_work_on - put work task on a specific cpu : cpu to put the work task on : job to be done
This puts a job on a specific cpu
Definition at line 3021 of file workqueue.c.
unsigned int work_busy | ( | struct work_struct * | work | ) |
work_busy - test whether a work is currently pending or running : the work to be tested
Test whether is currently pending or running. There is no synchronization around this function and the test result is unreliable and only useful as advisory hints or for debugging. Especially for reentrant wqs, the pending state might hide the running state.
RETURNS: OR'd bitmask of WORK_BUSY_* bits.
Definition at line 3481 of file workqueue.c.
unsigned int work_cpu | ( | struct work_struct * | work | ) |
work_cpu - return the last known associated cpu for : the work of interest
RETURNS: CPU number if was ever queued. WORK_CPU_NONE otherwise.
Definition at line 3460 of file workqueue.c.
bool workqueue_congested | ( | unsigned int | cpu, |
struct workqueue_struct * | wq | ||
) |
workqueue_congested - test whether a workqueue is congested : CPU in question : target workqueue
Test whether 's cpu workqueue for is congested. There is no synchronization around this function and the test result is unreliable and only useful as advisory hints or for debugging.
RETURNS: true if congested, false otherwise.
Definition at line 3445 of file workqueue.c.
void workqueue_set_max_active | ( | struct workqueue_struct * | wq, |
int | max_active | ||
) |
workqueue_set_max_active - adjust max_active of a workqueue : target workqueue : new max_active value.
Set max_active of to .
CONTEXT: Don't call from IRQ context.
Definition at line 3407 of file workqueue.c.
|
read |
wq_worker_sleeping - a worker is going to sleep : task going to sleep : CPU in question, must be the current CPU number
This function is called during schedule() when a busy worker is going to sleep. Worker on the same cpu can be woken up by returning pointer to its task.
CONTEXT: spin_lock_irq(rq->lock)
RETURNS: Worker task on to wake up, NULL if none.
Definition at line 761 of file workqueue.c.
void wq_worker_waking_up | ( | struct task_struct * | task, |
unsigned int | cpu | ||
) |
wq_worker_waking_up - a worker is waking up : task waking up : CPU is waking up to
This function is called during try_to_wake_up() when a worker is being awoken.
CONTEXT: spin_lock_irq(rq->lock)
Definition at line 738 of file workqueue.c.
struct global_cwq ____cacheline_aligned_in_smp |
struct workqueue_struct* system_freezable_wq __read_mostly |
Definition at line 269 of file workqueue.c.