Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
rcutiny.h
Go to the documentation of this file.
1 /*
2  * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright IBM Corporation, 2008
19  *
20  * Author: Paul E. McKenney <[email protected]>
21  *
22  * For detailed explanation of Read-Copy Update mechanism see -
23  * Documentation/RCU
24  */
25 #ifndef __LINUX_TINY_H
26 #define __LINUX_TINY_H
27 
28 #include <linux/cache.h>
29 
30 static inline void rcu_init(void)
31 {
32 }
33 
34 static inline void rcu_barrier_bh(void)
35 {
37 }
38 
39 static inline void rcu_barrier_sched(void)
40 {
42 }
43 
44 #ifdef CONFIG_TINY_RCU
45 
46 static inline void synchronize_rcu_expedited(void)
47 {
48  synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */
49 }
50 
51 static inline void rcu_barrier(void)
52 {
53  rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */
54 }
55 
56 #else /* #ifdef CONFIG_TINY_RCU */
57 
58 void synchronize_rcu_expedited(void);
59 
60 static inline void rcu_barrier(void)
61 {
63 }
64 
65 #endif /* #else #ifdef CONFIG_TINY_RCU */
66 
67 static inline void synchronize_rcu_bh(void)
68 {
70 }
71 
72 static inline void synchronize_rcu_bh_expedited(void)
73 {
75 }
76 
77 static inline void synchronize_sched_expedited(void)
78 {
80 }
81 
82 static inline void kfree_call_rcu(struct rcu_head *head,
83  void (*func)(struct rcu_head *rcu))
84 {
85  call_rcu(head, func);
86 }
87 
88 #ifdef CONFIG_TINY_RCU
89 
90 static inline void rcu_preempt_note_context_switch(void)
91 {
92 }
93 
94 static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
95 {
96  *delta_jiffies = ULONG_MAX;
97  return 0;
98 }
99 
100 #else /* #ifdef CONFIG_TINY_RCU */
101 
103 int rcu_preempt_needs_cpu(void);
104 
105 static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
106 {
107  *delta_jiffies = ULONG_MAX;
108  return rcu_preempt_needs_cpu();
109 }
110 
111 #endif /* #else #ifdef CONFIG_TINY_RCU */
112 
113 static inline void rcu_note_context_switch(int cpu)
114 {
115  rcu_sched_qs(cpu);
117 }
118 
119 /*
120  * Take advantage of the fact that there is only one CPU, which
121  * allows us to ignore virtualization-based context switches.
122  */
123 static inline void rcu_virt_note_context_switch(int cpu)
124 {
125 }
126 
127 /*
128  * Return the number of grace periods.
129  */
130 static inline long rcu_batches_completed(void)
131 {
132  return 0;
133 }
134 
135 /*
136  * Return the number of bottom-half grace periods.
137  */
138 static inline long rcu_batches_completed_bh(void)
139 {
140  return 0;
141 }
142 
143 static inline void rcu_force_quiescent_state(void)
144 {
145 }
146 
147 static inline void rcu_bh_force_quiescent_state(void)
148 {
149 }
150 
151 static inline void rcu_sched_force_quiescent_state(void)
152 {
153 }
154 
155 static inline void rcu_cpu_stall_reset(void)
156 {
157 }
158 
159 #ifdef CONFIG_DEBUG_LOCK_ALLOC
160 extern int rcu_scheduler_active __read_mostly;
161 extern void rcu_scheduler_starting(void);
162 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
163 static inline void rcu_scheduler_starting(void)
164 {
165 }
166 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
167 
168 #endif /* __LINUX_RCUTINY_H */