Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
workqueue.h
Go to the documentation of this file.
1 /*
2  * workqueue.h --- work queue handling for Linux.
3  */
4 
5 #ifndef _LINUX_WORKQUEUE_H
6 #define _LINUX_WORKQUEUE_H
7 
8 #include <linux/timer.h>
9 #include <linux/linkage.h>
10 #include <linux/bitops.h>
11 #include <linux/lockdep.h>
12 #include <linux/threads.h>
13 #include <linux/atomic.h>
14 
15 struct workqueue_struct;
16 
17 struct work_struct;
18 typedef void (*work_func_t)(struct work_struct *work);
19 void delayed_work_timer_fn(unsigned long __data);
20 
21 /*
22  * The first word is the work queue pointer and the flags rolled into
23  * one
24  */
25 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
26 
27 enum {
28  WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
29  WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
30  WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */
31  WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
32 #ifdef CONFIG_DEBUG_OBJECTS_WORK
33  WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
34  WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
35 #else
36  WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
37 #endif
38 
40 
45 #ifdef CONFIG_DEBUG_OBJECTS_WORK
46  WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
47 #else
48  WORK_STRUCT_STATIC = 0,
49 #endif
50 
51  /*
52  * The last color is no color used for works which don't
53  * participate in workqueue flushing.
54  */
57 
58  /* special cpu IDs */
62 
63  /*
64  * Reserve 7 bits off of cwq pointer w/ debugobjects turned
65  * off. This makes cwqs aligned to 256 bytes and allows 15
66  * workqueue flush colors.
67  */
70 
71  /* data contains off-queue information when !WORK_STRUCT_CWQ */
73 
75 
78 
79  /* convenience constants */
83 
84  /* bit mask for work_busy() return values */
87 };
88 
89 struct work_struct {
91  struct list_head entry;
93 #ifdef CONFIG_LOCKDEP
94  struct lockdep_map lockdep_map;
95 #endif
96 };
97 
98 #define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU)
99 #define WORK_DATA_STATIC_INIT() \
100  ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC)
101 
102 struct delayed_work {
105  int cpu;
106 };
107 
108 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
109 {
110  return container_of(work, struct delayed_work, work);
111 }
112 
113 struct execute_work {
115 };
116 
117 #ifdef CONFIG_LOCKDEP
118 /*
119  * NB: because we have to copy the lockdep_map, setting _key
120  * here is required, otherwise it could get initialised to the
121  * copy of the lockdep_map!
122  */
123 #define __WORK_INIT_LOCKDEP_MAP(n, k) \
124  .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
125 #else
126 #define __WORK_INIT_LOCKDEP_MAP(n, k)
127 #endif
128 
129 #define __WORK_INITIALIZER(n, f) { \
130  .data = WORK_DATA_STATIC_INIT(), \
131  .entry = { &(n).entry, &(n).entry }, \
132  .func = (f), \
133  __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
134  }
135 
136 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
137  .work = __WORK_INITIALIZER((n).work, (f)), \
138  .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \
139  0, (unsigned long)&(n), \
140  (tflags) | TIMER_IRQSAFE), \
141  }
142 
143 #define DECLARE_WORK(n, f) \
144  struct work_struct n = __WORK_INITIALIZER(n, f)
145 
146 #define DECLARE_DELAYED_WORK(n, f) \
147  struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
148 
149 #define DECLARE_DEFERRABLE_WORK(n, f) \
150  struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
151 
152 /*
153  * initialize a work item's function pointer
154  */
155 #define PREPARE_WORK(_work, _func) \
156  do { \
157  (_work)->func = (_func); \
158  } while (0)
159 
160 #define PREPARE_DELAYED_WORK(_work, _func) \
161  PREPARE_WORK(&(_work)->work, (_func))
162 
163 #ifdef CONFIG_DEBUG_OBJECTS_WORK
164 extern void __init_work(struct work_struct *work, int onstack);
165 extern void destroy_work_on_stack(struct work_struct *work);
166 static inline unsigned int work_static(struct work_struct *work)
167 {
168  return *work_data_bits(work) & WORK_STRUCT_STATIC;
169 }
170 #else
171 static inline void __init_work(struct work_struct *work, int onstack) { }
172 static inline void destroy_work_on_stack(struct work_struct *work) { }
173 static inline unsigned int work_static(struct work_struct *work) { return 0; }
174 #endif
175 
176 /*
177  * initialize all of a work item in one go
178  *
179  * NOTE! No point in using "atomic_long_set()": using a direct
180  * assignment of the work data initializer allows the compiler
181  * to generate better code.
182  */
183 #ifdef CONFIG_LOCKDEP
184 #define __INIT_WORK(_work, _func, _onstack) \
185  do { \
186  static struct lock_class_key __key; \
187  \
188  __init_work((_work), _onstack); \
189  (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
190  lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
191  INIT_LIST_HEAD(&(_work)->entry); \
192  PREPARE_WORK((_work), (_func)); \
193  } while (0)
194 #else
195 #define __INIT_WORK(_work, _func, _onstack) \
196  do { \
197  __init_work((_work), _onstack); \
198  (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
199  INIT_LIST_HEAD(&(_work)->entry); \
200  PREPARE_WORK((_work), (_func)); \
201  } while (0)
202 #endif
203 
204 #define INIT_WORK(_work, _func) \
205  do { \
206  __INIT_WORK((_work), (_func), 0); \
207  } while (0)
208 
209 #define INIT_WORK_ONSTACK(_work, _func) \
210  do { \
211  __INIT_WORK((_work), (_func), 1); \
212  } while (0)
213 
214 #define __INIT_DELAYED_WORK(_work, _func, _tflags) \
215  do { \
216  INIT_WORK(&(_work)->work, (_func)); \
217  __setup_timer(&(_work)->timer, delayed_work_timer_fn, \
218  (unsigned long)(_work), \
219  (_tflags) | TIMER_IRQSAFE); \
220  } while (0)
221 
222 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
223  do { \
224  INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
225  __setup_timer_on_stack(&(_work)->timer, \
226  delayed_work_timer_fn, \
227  (unsigned long)(_work), \
228  (_tflags) | TIMER_IRQSAFE); \
229  } while (0)
230 
231 #define INIT_DELAYED_WORK(_work, _func) \
232  __INIT_DELAYED_WORK(_work, _func, 0)
233 
234 #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
235  __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
236 
237 #define INIT_DEFERRABLE_WORK(_work, _func) \
238  __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
239 
240 #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
241  __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
242 
247 #define work_pending(work) \
248  test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
249 
255 #define delayed_work_pending(w) \
256  work_pending(&(w)->work)
257 
262 #define work_clear_pending(work) \
263  clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
264 
265 /*
266  * Workqueue flags and constants. For details, please refer to
267  * Documentation/workqueue.txt.
268  */
269 enum {
270  WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
271  WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
272  WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
273  WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
274  WQ_HIGHPRI = 1 << 4, /* high priority */
275  WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
276 
277  WQ_DRAINING = 1 << 6, /* internal: workqueue is draining */
278  WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
279 
280  WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
281  WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
283 };
284 
285 /* unbound wq's aren't per-cpu, scale max_active according to #cpus */
286 #define WQ_UNBOUND_MAX_ACTIVE \
287  max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
288 
289 /*
290  * System-wide workqueues which are always present.
291  *
292  * system_wq is the one used by schedule[_delayed]_work[_on]().
293  * Multi-CPU multi-threaded. There are users which expect relatively
294  * short queue flush time. Don't queue works which can run for too
295  * long.
296  *
297  * system_long_wq is similar to system_wq but may host long running
298  * works. Queue flushing might take relatively long.
299  *
300  * system_unbound_wq is unbound workqueue. Workers are not bound to
301  * any specific CPU, not concurrency managed, and all queued works are
302  * executed immediately as long as max_active limit is not reached and
303  * resources are available.
304  *
305  * system_freezable_wq is equivalent to system_wq except that it's
306  * freezable.
307  */
308 extern struct workqueue_struct *system_wq;
309 extern struct workqueue_struct *system_long_wq;
310 extern struct workqueue_struct *system_unbound_wq;
312 
313 static inline struct workqueue_struct * __deprecated __system_nrt_wq(void)
314 {
315  return system_wq;
316 }
317 
318 static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void)
319 {
320  return system_freezable_wq;
321 }
322 
323 /* equivlalent to system_wq and system_freezable_wq, deprecated */
324 #define system_nrt_wq __system_nrt_wq()
325 #define system_nrt_freezable_wq __system_nrt_freezable_wq()
326 
327 extern struct workqueue_struct *
328 __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
329  struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
330 
347 #ifdef CONFIG_LOCKDEP
348 #define alloc_workqueue(fmt, flags, max_active, args...) \
349 ({ \
350  static struct lock_class_key __key; \
351  const char *__lock_name; \
352  \
353  if (__builtin_constant_p(fmt)) \
354  __lock_name = (fmt); \
355  else \
356  __lock_name = #fmt; \
357  \
358  __alloc_workqueue_key((fmt), (flags), (max_active), \
359  &__key, __lock_name, ##args); \
360 })
361 #else
362 #define alloc_workqueue(fmt, flags, max_active, args...) \
363  __alloc_workqueue_key((fmt), (flags), (max_active), \
364  NULL, NULL, ##args)
365 #endif
366 
380 #define alloc_ordered_workqueue(fmt, flags, args...) \
381  alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
382 
383 #define create_workqueue(name) \
384  alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
385 #define create_freezable_workqueue(name) \
386  alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
387 #define create_singlethread_workqueue(name) \
388  alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
389 
390 extern void destroy_workqueue(struct workqueue_struct *wq);
391 
392 extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
393  struct work_struct *work);
394 extern bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
395 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
396  struct delayed_work *work, unsigned long delay);
397 extern bool queue_delayed_work(struct workqueue_struct *wq,
398  struct delayed_work *work, unsigned long delay);
399 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
400  struct delayed_work *dwork, unsigned long delay);
401 extern bool mod_delayed_work(struct workqueue_struct *wq,
402  struct delayed_work *dwork, unsigned long delay);
403 
404 extern void flush_workqueue(struct workqueue_struct *wq);
405 extern void drain_workqueue(struct workqueue_struct *wq);
406 extern void flush_scheduled_work(void);
407 
408 extern bool schedule_work_on(int cpu, struct work_struct *work);
409 extern bool schedule_work(struct work_struct *work);
410 extern bool schedule_delayed_work_on(int cpu, struct delayed_work *work,
411  unsigned long delay);
412 extern bool schedule_delayed_work(struct delayed_work *work,
413  unsigned long delay);
415 extern int keventd_up(void);
416 
418 
419 extern bool flush_work(struct work_struct *work);
420 extern bool cancel_work_sync(struct work_struct *work);
421 
422 extern bool flush_delayed_work(struct delayed_work *dwork);
423 extern bool cancel_delayed_work(struct delayed_work *dwork);
424 extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
425 
426 extern void workqueue_set_max_active(struct workqueue_struct *wq,
427  int max_active);
428 extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
429 extern unsigned int work_cpu(struct work_struct *work);
430 extern unsigned int work_busy(struct work_struct *work);
431 
432 /*
433  * Like above, but uses del_timer() instead of del_timer_sync(). This means,
434  * if it returns 0 the timer function may be running and the queueing is in
435  * progress.
436  */
437 static inline bool __deprecated __cancel_delayed_work(struct delayed_work *work)
438 {
439  bool ret;
440 
441  ret = del_timer(&work->timer);
442  if (ret)
443  work_clear_pending(&work->work);
444  return ret;
445 }
446 
447 /* used to be different but now identical to flush_work(), deprecated */
448 static inline bool __deprecated flush_work_sync(struct work_struct *work)
449 {
450  return flush_work(work);
451 }
452 
453 /* used to be different but now identical to flush_delayed_work(), deprecated */
454 static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwork)
455 {
456  return flush_delayed_work(dwork);
457 }
458 
459 #ifndef CONFIG_SMP
460 static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
461 {
462  return fn(arg);
463 }
464 #else
465 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
466 #endif /* CONFIG_SMP */
467 
468 #ifdef CONFIG_FREEZER
469 extern void freeze_workqueues_begin(void);
470 extern bool freeze_workqueues_busy(void);
471 extern void thaw_workqueues(void);
472 #endif /* CONFIG_FREEZER */
473 
474 #endif