Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
wait.h
Go to the documentation of this file.
1 #ifndef _LINUX_WAIT_H
2 #define _LINUX_WAIT_H
3 
4 
5 #include <linux/list.h>
6 #include <linux/stddef.h>
7 #include <linux/spinlock.h>
8 #include <asm/current.h>
9 #include <uapi/linux/wait.h>
10 
11 typedef struct __wait_queue wait_queue_t;
12 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
13 int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
14 
15 struct __wait_queue {
16  unsigned int flags;
17 #define WQ_FLAG_EXCLUSIVE 0x01
18  void *private;
21 };
22 
23 struct wait_bit_key {
24  void *flags;
25  int bit_nr;
26 };
27 
29  struct wait_bit_key key;
31 };
32 
36 };
38 
39 struct task_struct;
40 
41 /*
42  * Macros for declaration and initialisaton of the datatypes
43  */
44 
45 #define __WAITQUEUE_INITIALIZER(name, tsk) { \
46  .private = tsk, \
47  .func = default_wake_function, \
48  .task_list = { NULL, NULL } }
49 
50 #define DECLARE_WAITQUEUE(name, tsk) \
51  wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
52 
53 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
54  .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
55  .task_list = { &(name).task_list, &(name).task_list } }
56 
57 #define DECLARE_WAIT_QUEUE_HEAD(name) \
58  wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
59 
60 #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
61  { .flags = word, .bit_nr = bit, }
62 
63 extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
64 
65 #define init_waitqueue_head(q) \
66  do { \
67  static struct lock_class_key __key; \
68  \
69  __init_waitqueue_head((q), #q, &__key); \
70  } while (0)
71 
72 #ifdef CONFIG_LOCKDEP
73 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
74  ({ init_waitqueue_head(&name); name; })
75 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
76  wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
77 #else
78 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
79 #endif
80 
81 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
82 {
83  q->flags = 0;
84  q->private = p;
86 }
87 
88 static inline void init_waitqueue_func_entry(wait_queue_t *q,
90 {
91  q->flags = 0;
92  q->private = NULL;
93  q->func = func;
94 }
95 
96 static inline int waitqueue_active(wait_queue_head_t *q)
97 {
98  return !list_empty(&q->task_list);
99 }
100 
104 
105 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
106 {
107  list_add(&new->task_list, &head->task_list);
108 }
109 
110 /*
111  * Used for wake-one threads:
112  */
113 static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
115 {
116  wait->flags |= WQ_FLAG_EXCLUSIVE;
117  __add_wait_queue(q, wait);
118 }
119 
120 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
121  wait_queue_t *new)
122 {
123  list_add_tail(&new->task_list, &head->task_list);
124 }
125 
126 static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
128 {
129  wait->flags |= WQ_FLAG_EXCLUSIVE;
130  __add_wait_queue_tail(q, wait);
131 }
132 
133 static inline void __remove_wait_queue(wait_queue_head_t *head,
134  wait_queue_t *old)
135 {
136  list_del(&old->task_list);
137 }
138 
139 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
140 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
141 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
142  void *key);
143 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
144 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
145 void __wake_up_bit(wait_queue_head_t *, void *, int);
146 int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
147 int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
148 void wake_up_bit(void *, int);
149 int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
150 int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
151 wait_queue_head_t *bit_waitqueue(void *, int);
152 
153 #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
154 #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
155 #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
156 #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
157 #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
158 
159 #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
160 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
161 #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
162 #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
163 
164 /*
165  * Wakeup macros to be used to report events to the targets.
166  */
167 #define wake_up_poll(x, m) \
168  __wake_up(x, TASK_NORMAL, 1, (void *) (m))
169 #define wake_up_locked_poll(x, m) \
170  __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
171 #define wake_up_interruptible_poll(x, m) \
172  __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
173 #define wake_up_interruptible_sync_poll(x, m) \
174  __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
175 
176 #define __wait_event(wq, condition) \
177 do { \
178  DEFINE_WAIT(__wait); \
179  \
180  for (;;) { \
181  prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
182  if (condition) \
183  break; \
184  schedule(); \
185  } \
186  finish_wait(&wq, &__wait); \
187 } while (0)
188 
201 #define wait_event(wq, condition) \
202 do { \
203  if (condition) \
204  break; \
205  __wait_event(wq, condition); \
206 } while (0)
207 
208 #define __wait_event_timeout(wq, condition, ret) \
209 do { \
210  DEFINE_WAIT(__wait); \
211  \
212  for (;;) { \
213  prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
214  if (condition) \
215  break; \
216  ret = schedule_timeout(ret); \
217  if (!ret) \
218  break; \
219  } \
220  finish_wait(&wq, &__wait); \
221 } while (0)
222 
239 #define wait_event_timeout(wq, condition, timeout) \
240 ({ \
241  long __ret = timeout; \
242  if (!(condition)) \
243  __wait_event_timeout(wq, condition, __ret); \
244  __ret; \
245 })
246 
247 #define __wait_event_interruptible(wq, condition, ret) \
248 do { \
249  DEFINE_WAIT(__wait); \
250  \
251  for (;;) { \
252  prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
253  if (condition) \
254  break; \
255  if (!signal_pending(current)) { \
256  schedule(); \
257  continue; \
258  } \
259  ret = -ERESTARTSYS; \
260  break; \
261  } \
262  finish_wait(&wq, &__wait); \
263 } while (0)
264 
280 #define wait_event_interruptible(wq, condition) \
281 ({ \
282  int __ret = 0; \
283  if (!(condition)) \
284  __wait_event_interruptible(wq, condition, __ret); \
285  __ret; \
286 })
287 
288 #define __wait_event_interruptible_timeout(wq, condition, ret) \
289 do { \
290  DEFINE_WAIT(__wait); \
291  \
292  for (;;) { \
293  prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
294  if (condition) \
295  break; \
296  if (!signal_pending(current)) { \
297  ret = schedule_timeout(ret); \
298  if (!ret) \
299  break; \
300  continue; \
301  } \
302  ret = -ERESTARTSYS; \
303  break; \
304  } \
305  finish_wait(&wq, &__wait); \
306 } while (0)
307 
325 #define wait_event_interruptible_timeout(wq, condition, timeout) \
326 ({ \
327  long __ret = timeout; \
328  if (!(condition)) \
329  __wait_event_interruptible_timeout(wq, condition, __ret); \
330  __ret; \
331 })
332 
333 #define __wait_event_interruptible_exclusive(wq, condition, ret) \
334 do { \
335  DEFINE_WAIT(__wait); \
336  \
337  for (;;) { \
338  prepare_to_wait_exclusive(&wq, &__wait, \
339  TASK_INTERRUPTIBLE); \
340  if (condition) { \
341  finish_wait(&wq, &__wait); \
342  break; \
343  } \
344  if (!signal_pending(current)) { \
345  schedule(); \
346  continue; \
347  } \
348  ret = -ERESTARTSYS; \
349  abort_exclusive_wait(&wq, &__wait, \
350  TASK_INTERRUPTIBLE, NULL); \
351  break; \
352  } \
353 } while (0)
354 
355 #define wait_event_interruptible_exclusive(wq, condition) \
356 ({ \
357  int __ret = 0; \
358  if (!(condition)) \
359  __wait_event_interruptible_exclusive(wq, condition, __ret);\
360  __ret; \
361 })
362 
363 
364 #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
365 ({ \
366  int __ret = 0; \
367  DEFINE_WAIT(__wait); \
368  if (exclusive) \
369  __wait.flags |= WQ_FLAG_EXCLUSIVE; \
370  do { \
371  if (likely(list_empty(&__wait.task_list))) \
372  __add_wait_queue_tail(&(wq), &__wait); \
373  set_current_state(TASK_INTERRUPTIBLE); \
374  if (signal_pending(current)) { \
375  __ret = -ERESTARTSYS; \
376  break; \
377  } \
378  if (irq) \
379  spin_unlock_irq(&(wq).lock); \
380  else \
381  spin_unlock(&(wq).lock); \
382  schedule(); \
383  if (irq) \
384  spin_lock_irq(&(wq).lock); \
385  else \
386  spin_lock(&(wq).lock); \
387  } while (!(condition)); \
388  __remove_wait_queue(&(wq), &__wait); \
389  __set_current_state(TASK_RUNNING); \
390  __ret; \
391 })
392 
393 
417 #define wait_event_interruptible_locked(wq, condition) \
418  ((condition) \
419  ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
420 
444 #define wait_event_interruptible_locked_irq(wq, condition) \
445  ((condition) \
446  ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
447 
475 #define wait_event_interruptible_exclusive_locked(wq, condition) \
476  ((condition) \
477  ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
478 
506 #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
507  ((condition) \
508  ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
509 
510 
511 
512 #define __wait_event_killable(wq, condition, ret) \
513 do { \
514  DEFINE_WAIT(__wait); \
515  \
516  for (;;) { \
517  prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
518  if (condition) \
519  break; \
520  if (!fatal_signal_pending(current)) { \
521  schedule(); \
522  continue; \
523  } \
524  ret = -ERESTARTSYS; \
525  break; \
526  } \
527  finish_wait(&wq, &__wait); \
528 } while (0)
529 
545 #define wait_event_killable(wq, condition) \
546 ({ \
547  int __ret = 0; \
548  if (!(condition)) \
549  __wait_event_killable(wq, condition, __ret); \
550  __ret; \
551 })
552 
553 /*
554  * These are the old interfaces to sleep waiting for an event.
555  * They are racy. DO NOT use them, use the wait_event* interfaces above.
556  * We plan to remove these interfaces.
557  */
558 extern void sleep_on(wait_queue_head_t *q);
560  signed long timeout);
563  signed long timeout);
564 
565 /*
566  * Waitqueues which are removed from the waitqueue_head at wakeup time
567  */
572  unsigned int mode, void *key);
573 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
574 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
575 
576 #define DEFINE_WAIT_FUNC(name, function) \
577  wait_queue_t name = { \
578  .private = current, \
579  .func = function, \
580  .task_list = LIST_HEAD_INIT((name).task_list), \
581  }
582 
583 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
584 
585 #define DEFINE_WAIT_BIT(name, word, bit) \
586  struct wait_bit_queue name = { \
587  .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
588  .wait = { \
589  .private = current, \
590  .func = wake_bit_function, \
591  .task_list = \
592  LIST_HEAD_INIT((name).wait.task_list), \
593  }, \
594  }
595 
596 #define init_wait(wait) \
597  do { \
598  (wait)->private = current; \
599  (wait)->func = autoremove_wake_function; \
600  INIT_LIST_HEAD(&(wait)->task_list); \
601  (wait)->flags = 0; \
602  } while (0)
603 
618 static inline int wait_on_bit(void *word, int bit,
619  int (*action)(void *), unsigned mode)
620 {
621  if (!test_bit(bit, word))
622  return 0;
623  return out_of_line_wait_on_bit(word, bit, action, mode);
624 }
625 
642 static inline int wait_on_bit_lock(void *word, int bit,
643  int (*action)(void *), unsigned mode)
644 {
645  if (!test_and_set_bit(bit, word))
646  return 0;
647  return out_of_line_wait_on_bit_lock(word, bit, action, mode);
648 }
649 
650 #endif