Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
seqlock.h
Go to the documentation of this file.
1 #ifndef __LINUX_SEQLOCK_H
2 #define __LINUX_SEQLOCK_H
3 /*
4  * Reader/writer consistent mechanism without starving writers. This type of
5  * lock for data where the reader wants a consistent set of information
6  * and is willing to retry if the information changes. Readers never
7  * block but they may have to retry if a writer is in
8  * progress. Writers do not wait for readers.
9  *
10  * This is not as cache friendly as brlock. Also, this will not work
11  * for data that contains pointers, because any writer could
12  * invalidate a pointer that a reader was following.
13  *
14  * Expected reader usage:
15  * do {
16  * seq = read_seqbegin(&foo);
17  * ...
18  * } while (read_seqretry(&foo, seq));
19  *
20  *
21  * On non-SMP the spin locks disappear but the writer still needs
22  * to increment the sequence variables because an interrupt routine could
23  * change the state of the data.
24  *
25  * Based on x86_64 vsyscall gettimeofday
26  * by Keith Owens and Andrea Arcangeli
27  */
28 
29 #include <linux/spinlock.h>
30 #include <linux/preempt.h>
31 #include <asm/processor.h>
32 
33 typedef struct {
34  unsigned sequence;
36 } seqlock_t;
37 
38 /*
39  * These macros triggered gcc-3.x compile-time problems. We think these are
40  * OK now. Be cautious.
41  */
42 #define __SEQLOCK_UNLOCKED(lockname) \
43  { 0, __SPIN_LOCK_UNLOCKED(lockname) }
44 
45 #define seqlock_init(x) \
46  do { \
47  (x)->sequence = 0; \
48  spin_lock_init(&(x)->lock); \
49  } while (0)
50 
51 #define DEFINE_SEQLOCK(x) \
52  seqlock_t x = __SEQLOCK_UNLOCKED(x)
53 
54 /* Lock out other writers and update the count.
55  * Acts like a normal spin_lock/unlock.
56  * Don't need preempt_disable() because that is in the spin_lock already.
57  */
58 static inline void write_seqlock(seqlock_t *sl)
59 {
60  spin_lock(&sl->lock);
61  ++sl->sequence;
62  smp_wmb();
63 }
64 
65 static inline void write_sequnlock(seqlock_t *sl)
66 {
67  smp_wmb();
68  sl->sequence++;
69  spin_unlock(&sl->lock);
70 }
71 
72 static inline int write_tryseqlock(seqlock_t *sl)
73 {
74  int ret = spin_trylock(&sl->lock);
75 
76  if (ret) {
77  ++sl->sequence;
78  smp_wmb();
79  }
80  return ret;
81 }
82 
83 /* Start of read calculation -- fetch last complete writer token */
84 static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
85 {
86  unsigned ret;
87 
88 repeat:
89  ret = ACCESS_ONCE(sl->sequence);
90  if (unlikely(ret & 1)) {
91  cpu_relax();
92  goto repeat;
93  }
94  smp_rmb();
95 
96  return ret;
97 }
98 
99 /*
100  * Test if reader processed invalid data.
101  *
102  * If sequence value changed then writer changed data while in section.
103  */
104 static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
105 {
106  smp_rmb();
107 
108  return unlikely(sl->sequence != start);
109 }
110 
111 
112 /*
113  * Version using sequence counter only.
114  * This can be used when code has its own mutex protecting the
115  * updating starting before the write_seqcountbeqin() and ending
116  * after the write_seqcount_end().
117  */
118 
119 typedef struct seqcount {
120  unsigned sequence;
121 } seqcount_t;
122 
123 #define SEQCNT_ZERO { 0 }
124 #define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
125 
139 static inline unsigned __read_seqcount_begin(const seqcount_t *s)
140 {
141  unsigned ret;
142 
143 repeat:
144  ret = ACCESS_ONCE(s->sequence);
145  if (unlikely(ret & 1)) {
146  cpu_relax();
147  goto repeat;
148  }
149  return ret;
150 }
151 
161 static inline unsigned read_seqcount_begin(const seqcount_t *s)
162 {
163  unsigned ret = __read_seqcount_begin(s);
164  smp_rmb();
165  return ret;
166 }
167 
182 static inline unsigned raw_seqcount_begin(const seqcount_t *s)
183 {
184  unsigned ret = ACCESS_ONCE(s->sequence);
185  smp_rmb();
186  return ret & ~1;
187 }
188 
203 static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
204 {
205  return unlikely(s->sequence != start);
206 }
207 
218 static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
219 {
220  smp_rmb();
221 
222  return __read_seqcount_retry(s, start);
223 }
224 
225 
226 /*
227  * Sequence counter only version assumes that callers are using their
228  * own mutexing.
229  */
230 static inline void write_seqcount_begin(seqcount_t *s)
231 {
232  s->sequence++;
233  smp_wmb();
234 }
235 
236 static inline void write_seqcount_end(seqcount_t *s)
237 {
238  smp_wmb();
239  s->sequence++;
240 }
241 
249 static inline void write_seqcount_barrier(seqcount_t *s)
250 {
251  smp_wmb();
252  s->sequence+=2;
253 }
254 
255 /*
256  * Possible sw/hw IRQ protected versions of the interfaces.
257  */
258 #define write_seqlock_irqsave(lock, flags) \
259  do { local_irq_save(flags); write_seqlock(lock); } while (0)
260 #define write_seqlock_irq(lock) \
261  do { local_irq_disable(); write_seqlock(lock); } while (0)
262 #define write_seqlock_bh(lock) \
263  do { local_bh_disable(); write_seqlock(lock); } while (0)
264 
265 #define write_sequnlock_irqrestore(lock, flags) \
266  do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
267 #define write_sequnlock_irq(lock) \
268  do { write_sequnlock(lock); local_irq_enable(); } while(0)
269 #define write_sequnlock_bh(lock) \
270  do { write_sequnlock(lock); local_bh_enable(); } while(0)
271 
272 #define read_seqbegin_irqsave(lock, flags) \
273  ({ local_irq_save(flags); read_seqbegin(lock); })
274 
275 #define read_seqretry_irqrestore(lock, iv, flags) \
276  ({ \
277  int ret = read_seqretry(lock, iv); \
278  local_irq_restore(flags); \
279  ret; \
280  })
281 
282 #endif /* __LINUX_SEQLOCK_H */