Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
rcupdate.h
Go to the documentation of this file.
1 /*
2  * Read-Copy Update mechanism for mutual exclusion
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright IBM Corporation, 2001
19  *
20  * Author: Dipankar Sarma <[email protected]>
21  *
22  * Based on the original work by Paul McKenney <[email protected]>
23  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24  * Papers:
25  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27  *
28  * For detailed explanation of Read-Copy Update mechanism see -
29  * http://lse.sourceforge.net/locking/rcupdate.html
30  *
31  */
32 
33 #ifndef __LINUX_RCUPDATE_H
34 #define __LINUX_RCUPDATE_H
35 
36 #include <linux/types.h>
37 #include <linux/cache.h>
38 #include <linux/spinlock.h>
39 #include <linux/threads.h>
40 #include <linux/cpumask.h>
41 #include <linux/seqlock.h>
42 #include <linux/lockdep.h>
43 #include <linux/completion.h>
44 #include <linux/debugobjects.h>
45 #include <linux/bug.h>
46 #include <linux/compiler.h>
47 
48 #ifdef CONFIG_RCU_TORTURE_TEST
49 extern int rcutorture_runnable; /* for sysctl */
50 #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
51 
52 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
53 extern void rcutorture_record_test_transition(void);
54 extern void rcutorture_record_progress(unsigned long vernum);
55 extern void do_trace_rcu_torture_read(char *rcutorturename,
56  struct rcu_head *rhp);
57 #else
58 static inline void rcutorture_record_test_transition(void)
59 {
60 }
61 static inline void rcutorture_record_progress(unsigned long vernum)
62 {
63 }
64 #ifdef CONFIG_RCU_TRACE
65 extern void do_trace_rcu_torture_read(char *rcutorturename,
66  struct rcu_head *rhp);
67 #else
68 #define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
69 #endif
70 #endif
71 
72 #define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b))
73 #define UINT_CMP_LT(a, b) (UINT_MAX / 2 < (a) - (b))
74 #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
75 #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
76 
77 /* Exported common interfaces */
78 
79 #ifdef CONFIG_PREEMPT_RCU
80 
94 extern void call_rcu(struct rcu_head *head,
95  void (*func)(struct rcu_head *head));
96 
97 #else /* #ifdef CONFIG_PREEMPT_RCU */
98 
99 /* In classic RCU, call_rcu() is just call_rcu_sched(). */
100 #define call_rcu call_rcu_sched
101 
102 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
103 
122 extern void call_rcu_bh(struct rcu_head *head,
123  void (*func)(struct rcu_head *head));
124 
141 extern void call_rcu_sched(struct rcu_head *head,
142  void (*func)(struct rcu_head *rcu));
143 
144 extern void synchronize_sched(void);
145 
146 #ifdef CONFIG_PREEMPT_RCU
147 
148 extern void __rcu_read_lock(void);
149 extern void __rcu_read_unlock(void);
150 extern void rcu_read_unlock_special(struct task_struct *t);
151 void synchronize_rcu(void);
152 
153 /*
154  * Defined as a macro as it is a very low level header included from
155  * areas that don't even know about current. This gives the rcu_read_lock()
156  * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
157  * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
158  */
159 #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
160 
161 #else /* #ifdef CONFIG_PREEMPT_RCU */
162 
163 static inline void __rcu_read_lock(void)
164 {
165  preempt_disable();
166 }
167 
168 static inline void __rcu_read_unlock(void)
169 {
170  preempt_enable();
171 }
172 
173 static inline void synchronize_rcu(void)
174 {
176 }
177 
178 static inline int rcu_preempt_depth(void)
179 {
180  return 0;
181 }
182 
183 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
184 
185 /* Internal to kernel */
186 extern void rcu_sched_qs(int cpu);
187 extern void rcu_bh_qs(int cpu);
188 extern void rcu_check_callbacks(int cpu, int user);
189 struct notifier_block;
190 extern void rcu_idle_enter(void);
191 extern void rcu_idle_exit(void);
192 extern void rcu_irq_enter(void);
193 extern void rcu_irq_exit(void);
194 
195 #ifdef CONFIG_RCU_USER_QS
196 extern void rcu_user_enter(void);
197 extern void rcu_user_exit(void);
198 extern void rcu_user_enter_after_irq(void);
199 extern void rcu_user_exit_after_irq(void);
200 extern void rcu_user_hooks_switch(struct task_struct *prev,
201  struct task_struct *next);
202 #else
203 static inline void rcu_user_enter(void) { }
204 static inline void rcu_user_exit(void) { }
205 static inline void rcu_user_enter_after_irq(void) { }
206 static inline void rcu_user_exit_after_irq(void) { }
207 #endif /* CONFIG_RCU_USER_QS */
208 
209 extern void exit_rcu(void);
210 
229 #define RCU_NONIDLE(a) \
230  do { \
231  rcu_irq_enter(); \
232  do { a; } while (0); \
233  rcu_irq_exit(); \
234  } while (0)
235 
236 /*
237  * Infrastructure to implement the synchronize_() primitives in
238  * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
239  */
240 
241 typedef void call_rcu_func_t(struct rcu_head *head,
242  void (*func)(struct rcu_head *head));
243 void wait_rcu_gp(call_rcu_func_t crf);
244 
245 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
246 #include <linux/rcutree.h>
247 #elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
248 #include <linux/rcutiny.h>
249 #else
250 #error "Unknown RCU implementation specified to kernel configuration"
251 #endif
252 
253 /*
254  * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
255  * initialization and destruction of rcu_head on the stack. rcu_head structures
256  * allocated dynamically in the heap or defined statically don't need any
257  * initialization.
258  */
259 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
260 extern void init_rcu_head_on_stack(struct rcu_head *head);
261 extern void destroy_rcu_head_on_stack(struct rcu_head *head);
262 #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
263 static inline void init_rcu_head_on_stack(struct rcu_head *head)
264 {
265 }
266 
267 static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
268 {
269 }
270 #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
271 
272 #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP)
273 extern int rcu_is_cpu_idle(void);
274 #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) */
275 
276 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
277 bool rcu_lockdep_current_cpu_online(void);
278 #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
279 static inline bool rcu_lockdep_current_cpu_online(void)
280 {
281  return 1;
282 }
283 #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
284 
285 #ifdef CONFIG_DEBUG_LOCK_ALLOC
286 
287 static inline void rcu_lock_acquire(struct lockdep_map *map)
288 {
289  lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
290 }
291 
292 static inline void rcu_lock_release(struct lockdep_map *map)
293 {
294  lock_release(map, 1, _THIS_IP_);
295 }
296 
297 extern struct lockdep_map rcu_lock_map;
298 extern struct lockdep_map rcu_bh_lock_map;
299 extern struct lockdep_map rcu_sched_lock_map;
300 extern int debug_lockdep_rcu_enabled(void);
301 
322 static inline int rcu_read_lock_held(void)
323 {
324  if (!debug_lockdep_rcu_enabled())
325  return 1;
326  if (rcu_is_cpu_idle())
327  return 0;
328  if (!rcu_lockdep_current_cpu_online())
329  return 0;
330  return lock_is_held(&rcu_lock_map);
331 }
332 
333 /*
334  * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
335  * hell.
336  */
337 extern int rcu_read_lock_bh_held(void);
338 
370 #ifdef CONFIG_PREEMPT_COUNT
371 static inline int rcu_read_lock_sched_held(void)
372 {
373  int lockdep_opinion = 0;
374 
375  if (!debug_lockdep_rcu_enabled())
376  return 1;
377  if (rcu_is_cpu_idle())
378  return 0;
379  if (!rcu_lockdep_current_cpu_online())
380  return 0;
381  if (debug_locks)
382  lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
383  return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
384 }
385 #else /* #ifdef CONFIG_PREEMPT_COUNT */
386 static inline int rcu_read_lock_sched_held(void)
387 {
388  return 1;
389 }
390 #endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
391 
392 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
393 
394 # define rcu_lock_acquire(a) do { } while (0)
395 # define rcu_lock_release(a) do { } while (0)
396 
397 static inline int rcu_read_lock_held(void)
398 {
399  return 1;
400 }
401 
402 static inline int rcu_read_lock_bh_held(void)
403 {
404  return 1;
405 }
406 
407 #ifdef CONFIG_PREEMPT_COUNT
408 static inline int rcu_read_lock_sched_held(void)
409 {
410  return preempt_count() != 0 || irqs_disabled();
411 }
412 #else /* #ifdef CONFIG_PREEMPT_COUNT */
413 static inline int rcu_read_lock_sched_held(void)
414 {
415  return 1;
416 }
417 #endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
418 
419 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
420 
421 #ifdef CONFIG_PROVE_RCU
422 
423 extern int rcu_my_thread_group_empty(void);
424 
430 #define rcu_lockdep_assert(c, s) \
431  do { \
432  static bool __section(.data.unlikely) __warned; \
433  if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
434  __warned = true; \
435  lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
436  } \
437  } while (0)
438 
439 #if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
440 static inline void rcu_preempt_sleep_check(void)
441 {
442  rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
443  "Illegal context switch in RCU read-side critical section");
444 }
445 #else /* #ifdef CONFIG_PROVE_RCU */
446 static inline void rcu_preempt_sleep_check(void)
447 {
448 }
449 #endif /* #else #ifdef CONFIG_PROVE_RCU */
450 
451 #define rcu_sleep_check() \
452  do { \
453  rcu_preempt_sleep_check(); \
454  rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \
455  "Illegal context switch in RCU-bh" \
456  " read-side critical section"); \
457  rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), \
458  "Illegal context switch in RCU-sched"\
459  " read-side critical section"); \
460  } while (0)
461 
462 #else /* #ifdef CONFIG_PROVE_RCU */
463 
464 #define rcu_lockdep_assert(c, s) do { } while (0)
465 #define rcu_sleep_check() do { } while (0)
466 
467 #endif /* #else #ifdef CONFIG_PROVE_RCU */
468 
469 /*
470  * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
471  * and rcu_assign_pointer(). Some of these could be folded into their
472  * callers, but they are left separate in order to ease introduction of
473  * multiple flavors of pointers to match the multiple flavors of RCU
474  * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
475  * the future.
476  */
477 
478 #ifdef __CHECKER__
479 #define rcu_dereference_sparse(p, space) \
480  ((void)(((typeof(*p) space *)p) == p))
481 #else /* #ifdef __CHECKER__ */
482 #define rcu_dereference_sparse(p, space)
483 #endif /* #else #ifdef __CHECKER__ */
484 
485 #define __rcu_access_pointer(p, space) \
486  ({ \
487  typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
488  rcu_dereference_sparse(p, space); \
489  ((typeof(*p) __force __kernel *)(_________p1)); \
490  })
491 #define __rcu_dereference_check(p, c, space) \
492  ({ \
493  typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
494  rcu_lockdep_assert(c, "suspicious rcu_dereference_check()" \
495  " usage"); \
496  rcu_dereference_sparse(p, space); \
497  smp_read_barrier_depends(); \
498  ((typeof(*p) __force __kernel *)(_________p1)); \
499  })
500 #define __rcu_dereference_protected(p, c, space) \
501  ({ \
502  rcu_lockdep_assert(c, "suspicious rcu_dereference_protected()" \
503  " usage"); \
504  rcu_dereference_sparse(p, space); \
505  ((typeof(*p) __force __kernel *)(p)); \
506  })
507 
508 #define __rcu_access_index(p, space) \
509  ({ \
510  typeof(p) _________p1 = ACCESS_ONCE(p); \
511  rcu_dereference_sparse(p, space); \
512  (_________p1); \
513  })
514 #define __rcu_dereference_index_check(p, c) \
515  ({ \
516  typeof(p) _________p1 = ACCESS_ONCE(p); \
517  rcu_lockdep_assert(c, \
518  "suspicious rcu_dereference_index_check()" \
519  " usage"); \
520  smp_read_barrier_depends(); \
521  (_________p1); \
522  })
523 #define __rcu_assign_pointer(p, v, space) \
524  do { \
525  smp_wmb(); \
526  (p) = (typeof(*v) __force space *)(v); \
527  } while (0)
528 
529 
549 #define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
550 
584 #define rcu_dereference_check(p, c) \
585  __rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu)
586 
594 #define rcu_dereference_bh_check(p, c) \
595  __rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu)
596 
604 #define rcu_dereference_sched_check(p, c) \
605  __rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \
606  __rcu)
607 
608 #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
609 
622 #define rcu_access_index(p) __rcu_access_index((p), __rcu)
623 
642 #define rcu_dereference_index_check(p, c) \
643  __rcu_dereference_index_check((p), (c))
644 
662 #define rcu_dereference_protected(p, c) \
663  __rcu_dereference_protected((p), (c), __rcu)
664 
665 
672 #define rcu_dereference(p) rcu_dereference_check(p, 0)
673 
680 #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
681 
688 #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
689 
732 static inline void rcu_read_lock(void)
733 {
734  __rcu_read_lock();
735  __acquire(RCU);
736  rcu_lock_acquire(&rcu_lock_map);
738  "rcu_read_lock() used illegally while idle");
739 }
740 
741 /*
742  * So where is rcu_write_lock()? It does not exist, as there is no
743  * way for writers to lock out RCU readers. This is a feature, not
744  * a bug -- this property is what provides RCU's performance benefits.
745  * Of course, writers must coordinate with each other. The normal
746  * spinlock primitives work well for this, but any other technique may be
747  * used as well. RCU does not care how the writers keep out of each
748  * others' way, as long as they do so.
749  */
750 
756 static inline void rcu_read_unlock(void)
757 {
759  "rcu_read_unlock() used illegally while idle");
760  rcu_lock_release(&rcu_lock_map);
761  __release(RCU);
762  __rcu_read_unlock();
763 }
764 
782 static inline void rcu_read_lock_bh(void)
783 {
785  __acquire(RCU_BH);
786  rcu_lock_acquire(&rcu_bh_lock_map);
788  "rcu_read_lock_bh() used illegally while idle");
789 }
790 
791 /*
792  * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
793  *
794  * See rcu_read_lock_bh() for more information.
795  */
796 static inline void rcu_read_unlock_bh(void)
797 {
799  "rcu_read_unlock_bh() used illegally while idle");
800  rcu_lock_release(&rcu_bh_lock_map);
801  __release(RCU_BH);
802  local_bh_enable();
803 }
804 
818 static inline void rcu_read_lock_sched(void)
819 {
820  preempt_disable();
821  __acquire(RCU_SCHED);
822  rcu_lock_acquire(&rcu_sched_lock_map);
824  "rcu_read_lock_sched() used illegally while idle");
825 }
826 
827 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
828 static inline notrace void rcu_read_lock_sched_notrace(void)
829 {
831  __acquire(RCU_SCHED);
832 }
833 
834 /*
835  * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
836  *
837  * See rcu_read_lock_sched for more information.
838  */
839 static inline void rcu_read_unlock_sched(void)
840 {
842  "rcu_read_unlock_sched() used illegally while idle");
843  rcu_lock_release(&rcu_sched_lock_map);
844  __release(RCU_SCHED);
845  preempt_enable();
846 }
847 
848 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
849 static inline notrace void rcu_read_unlock_sched_notrace(void)
850 {
851  __release(RCU_SCHED);
853 }
854 
878 #define rcu_assign_pointer(p, v) \
879  __rcu_assign_pointer((p), (v), __rcu)
880 
913 #define RCU_INIT_POINTER(p, v) \
914  do { \
915  p = (typeof(*v) __force __rcu *)(v); \
916  } while (0)
917 
923 #define RCU_POINTER_INITIALIZER(p, v) \
924  .p = (typeof(*v) __force __rcu *)(v)
925 
926 /*
927  * Does the specified offset indicate that the corresponding rcu_head
928  * structure can be handled by kfree_rcu()?
929  */
930 #define __is_kfree_rcu_offset(offset) ((offset) < 4096)
931 
932 /*
933  * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
934  */
935 #define __kfree_rcu(head, offset) \
936  do { \
937  BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
938  kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
939  } while (0)
940 
967 #define kfree_rcu(ptr, rcu_head) \
968  __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
969 
970 #endif /* __LINUX_RCUPDATE_H */