Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
srcu.c
Go to the documentation of this file.
1 /*
2  * Sleepable Read-Copy Update mechanism for mutual exclusion.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2006
19  *
20  * Author: Paul McKenney <[email protected]>
21  *
22  * For detailed explanation of Read-Copy Update mechanism see -
23  * Documentation/RCU/ *.txt
24  *
25  */
26 
27 #include <linux/export.h>
28 #include <linux/mutex.h>
29 #include <linux/percpu.h>
30 #include <linux/preempt.h>
31 #include <linux/rcupdate.h>
32 #include <linux/sched.h>
33 #include <linux/smp.h>
34 #include <linux/delay.h>
35 #include <linux/srcu.h>
36 
37 /*
38  * Initialize an rcu_batch structure to empty.
39  */
40 static inline void rcu_batch_init(struct rcu_batch *b)
41 {
42  b->head = NULL;
43  b->tail = &b->head;
44 }
45 
46 /*
47  * Enqueue a callback onto the tail of the specified rcu_batch structure.
48  */
49 static inline void rcu_batch_queue(struct rcu_batch *b, struct rcu_head *head)
50 {
51  *b->tail = head;
52  b->tail = &head->next;
53 }
54 
55 /*
56  * Is the specified rcu_batch structure empty?
57  */
58 static inline bool rcu_batch_empty(struct rcu_batch *b)
59 {
60  return b->tail == &b->head;
61 }
62 
63 /*
64  * Remove the callback at the head of the specified rcu_batch structure
65  * and return a pointer to it, or return NULL if the structure is empty.
66  */
67 static inline struct rcu_head *rcu_batch_dequeue(struct rcu_batch *b)
68 {
69  struct rcu_head *head;
70 
71  if (rcu_batch_empty(b))
72  return NULL;
73 
74  head = b->head;
75  b->head = head->next;
76  if (b->tail == &head->next)
77  rcu_batch_init(b);
78 
79  return head;
80 }
81 
82 /*
83  * Move all callbacks from the rcu_batch structure specified by "from" to
84  * the structure specified by "to".
85  */
86 static inline void rcu_batch_move(struct rcu_batch *to, struct rcu_batch *from)
87 {
88  if (!rcu_batch_empty(from)) {
89  *to->tail = from->head;
90  to->tail = from->tail;
91  rcu_batch_init(from);
92  }
93 }
94 
95 /* single-thread state-machine */
96 static void process_srcu(struct work_struct *work);
97 
98 static int init_srcu_struct_fields(struct srcu_struct *sp)
99 {
100  sp->completed = 0;
102  sp->running = false;
103  rcu_batch_init(&sp->batch_queue);
104  rcu_batch_init(&sp->batch_check0);
105  rcu_batch_init(&sp->batch_check1);
106  rcu_batch_init(&sp->batch_done);
107  INIT_DELAYED_WORK(&sp->work, process_srcu);
109  return sp->per_cpu_ref ? 0 : -ENOMEM;
110 }
111 
112 #ifdef CONFIG_DEBUG_LOCK_ALLOC
113 
114 int __init_srcu_struct(struct srcu_struct *sp, const char *name,
115  struct lock_class_key *key)
116 {
117  /* Don't re-initialize a lock while it is held. */
118  debug_check_no_locks_freed((void *)sp, sizeof(*sp));
119  lockdep_init_map(&sp->dep_map, name, key, 0);
120  return init_srcu_struct_fields(sp);
121 }
122 EXPORT_SYMBOL_GPL(__init_srcu_struct);
123 
124 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
125 
135 {
136  return init_srcu_struct_fields(sp);
137 }
139 
140 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
141 
142 /*
143  * Returns approximate total of the readers' ->seq[] values for the
144  * rank of per-CPU counters specified by idx.
145  */
146 static unsigned long srcu_readers_seq_idx(struct srcu_struct *sp, int idx)
147 {
148  int cpu;
149  unsigned long sum = 0;
150  unsigned long t;
151 
152  for_each_possible_cpu(cpu) {
153  t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]);
154  sum += t;
155  }
156  return sum;
157 }
158 
159 /*
160  * Returns approximate number of readers active on the specified rank
161  * of the per-CPU ->c[] counters.
162  */
163 static unsigned long srcu_readers_active_idx(struct srcu_struct *sp, int idx)
164 {
165  int cpu;
166  unsigned long sum = 0;
167  unsigned long t;
168 
169  for_each_possible_cpu(cpu) {
170  t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]);
171  sum += t;
172  }
173  return sum;
174 }
175 
176 /*
177  * Return true if the number of pre-existing readers is determined to
178  * be stably zero. An example unstable zero can occur if the call
179  * to srcu_readers_active_idx() misses an __srcu_read_lock() increment,
180  * but due to task migration, sees the corresponding __srcu_read_unlock()
181  * decrement. This can happen because srcu_readers_active_idx() takes
182  * time to sum the array, and might in fact be interrupted or preempted
183  * partway through the summation.
184  */
185 static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
186 {
187  unsigned long seq;
188 
189  seq = srcu_readers_seq_idx(sp, idx);
190 
191  /*
192  * The following smp_mb() A pairs with the smp_mb() B located in
193  * __srcu_read_lock(). This pairing ensures that if an
194  * __srcu_read_lock() increments its counter after the summation
195  * in srcu_readers_active_idx(), then the corresponding SRCU read-side
196  * critical section will see any changes made prior to the start
197  * of the current SRCU grace period.
198  *
199  * Also, if the above call to srcu_readers_seq_idx() saw the
200  * increment of ->seq[], then the call to srcu_readers_active_idx()
201  * must see the increment of ->c[].
202  */
203  smp_mb(); /* A */
204 
205  /*
206  * Note that srcu_readers_active_idx() can incorrectly return
207  * zero even though there is a pre-existing reader throughout.
208  * To see this, suppose that task A is in a very long SRCU
209  * read-side critical section that started on CPU 0, and that
210  * no other reader exists, so that the sum of the counters
211  * is equal to one. Then suppose that task B starts executing
212  * srcu_readers_active_idx(), summing up to CPU 1, and then that
213  * task C starts reading on CPU 0, so that its increment is not
214  * summed, but finishes reading on CPU 2, so that its decrement
215  * -is- summed. Then when task B completes its sum, it will
216  * incorrectly get zero, despite the fact that task A has been
217  * in its SRCU read-side critical section the whole time.
218  *
219  * We therefore do a validation step should srcu_readers_active_idx()
220  * return zero.
221  */
222  if (srcu_readers_active_idx(sp, idx) != 0)
223  return false;
224 
225  /*
226  * The remainder of this function is the validation step.
227  * The following smp_mb() D pairs with the smp_mb() C in
228  * __srcu_read_unlock(). If the __srcu_read_unlock() was seen
229  * by srcu_readers_active_idx() above, then any destructive
230  * operation performed after the grace period will happen after
231  * the corresponding SRCU read-side critical section.
232  *
233  * Note that there can be at most NR_CPUS worth of readers using
234  * the old index, which is not enough to overflow even a 32-bit
235  * integer. (Yes, this does mean that systems having more than
236  * a billion or so CPUs need to be 64-bit systems.) Therefore,
237  * the sum of the ->seq[] counters cannot possibly overflow.
238  * Therefore, the only way that the return values of the two
239  * calls to srcu_readers_seq_idx() can be equal is if there were
240  * no increments of the corresponding rank of ->seq[] counts
241  * in the interim. But the missed-increment scenario laid out
242  * above includes an increment of the ->seq[] counter by
243  * the corresponding __srcu_read_lock(). Therefore, if this
244  * scenario occurs, the return values from the two calls to
245  * srcu_readers_seq_idx() will differ, and thus the validation
246  * step below suffices.
247  */
248  smp_mb(); /* D */
249 
250  return srcu_readers_seq_idx(sp, idx) == seq;
251 }
252 
261 static int srcu_readers_active(struct srcu_struct *sp)
262 {
263  int cpu;
264  unsigned long sum = 0;
265 
266  for_each_possible_cpu(cpu) {
267  sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]);
268  sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]);
269  }
270  return sum;
271 }
272 
281 {
282  int sum;
283 
284  sum = srcu_readers_active(sp);
285  WARN_ON(sum); /* Leakage unless caller handles error. */
286  if (sum != 0)
287  return;
289  sp->per_cpu_ref = NULL;
290 }
292 
293 /*
294  * Counts the new reader in the appropriate per-CPU element of the
295  * srcu_struct. Must be called from process context.
296  * Returns an index that must be passed to the matching srcu_read_unlock().
297  */
299 {
300  int idx;
301 
302  preempt_disable();
304  rcu_read_lock_sched_held()) & 0x1;
305  ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
306  smp_mb(); /* B */ /* Avoid leaking the critical section. */
307  ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
308  preempt_enable();
309  return idx;
310 }
312 
313 /*
314  * Removes the count for the old reader from the appropriate per-CPU
315  * element of the srcu_struct. Note that this may well be a different
316  * CPU than that which was incremented by the corresponding srcu_read_lock().
317  * Must be called from process context.
318  */
319 void __srcu_read_unlock(struct srcu_struct *sp, int idx)
320 {
321  preempt_disable();
322  smp_mb(); /* C */ /* Avoid leaking the critical section. */
323  ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
324  preempt_enable();
325 }
327 
328 /*
329  * We use an adaptive strategy for synchronize_srcu() and especially for
330  * synchronize_srcu_expedited(). We spin for a fixed time period
331  * (defined below) to allow SRCU readers to exit their read-side critical
332  * sections. If there are still some readers after 10 microseconds,
333  * we repeatedly block for 1-millisecond time periods. This approach
334  * has done well in testing, so there is no need for a config parameter.
335  */
336 #define SRCU_RETRY_CHECK_DELAY 5
337 #define SYNCHRONIZE_SRCU_TRYCOUNT 2
338 #define SYNCHRONIZE_SRCU_EXP_TRYCOUNT 12
339 
340 /*
341  * @@@ Wait until all pre-existing readers complete. Such readers
342  * will have used the index specified by "idx".
343  * the caller should ensures the ->completed is not changed while checking
344  * and idx = (->completed & 1) ^ 1
345  */
346 static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
347 {
348  for (;;) {
349  if (srcu_readers_active_idx_check(sp, idx))
350  return true;
351  if (--trycount <= 0)
352  return false;
354  }
355 }
356 
357 /*
358  * Increment the ->completed counter so that future SRCU readers will
359  * use the other rank of the ->c[] and ->seq[] arrays. This allows
360  * us to wait for pre-existing readers in a starvation-free manner.
361  */
362 static void srcu_flip(struct srcu_struct *sp)
363 {
364  sp->completed++;
365 }
366 
367 /*
368  * Enqueue an SRCU callback on the specified srcu_struct structure,
369  * initiating grace-period processing if it is not already running.
370  */
371 void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
372  void (*func)(struct rcu_head *head))
373 {
374  unsigned long flags;
375 
376  head->next = NULL;
377  head->func = func;
378  spin_lock_irqsave(&sp->queue_lock, flags);
379  rcu_batch_queue(&sp->batch_queue, head);
380  if (!sp->running) {
381  sp->running = true;
382  schedule_delayed_work(&sp->work, 0);
383  }
384  spin_unlock_irqrestore(&sp->queue_lock, flags);
385 }
387 
388 struct rcu_synchronize {
389  struct rcu_head head;
390  struct completion completion;
391 };
392 
393 /*
394  * Awaken the corresponding synchronize_srcu() instance now that a
395  * grace period has elapsed.
396  */
397 static void wakeme_after_rcu(struct rcu_head *head)
398 {
399  struct rcu_synchronize *rcu;
400 
401  rcu = container_of(head, struct rcu_synchronize, head);
402  complete(&rcu->completion);
403 }
404 
405 static void srcu_advance_batches(struct srcu_struct *sp, int trycount);
406 static void srcu_reschedule(struct srcu_struct *sp);
407 
408 /*
409  * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
410  */
411 static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
412 {
413  struct rcu_synchronize rcu;
414  struct rcu_head *head = &rcu.head;
415  bool done = false;
416 
417  rcu_lockdep_assert(!lock_is_held(&sp->dep_map) &&
418  !lock_is_held(&rcu_bh_lock_map) &&
419  !lock_is_held(&rcu_lock_map) &&
420  !lock_is_held(&rcu_sched_lock_map),
421  "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section");
422 
423  init_completion(&rcu.completion);
424 
425  head->next = NULL;
426  head->func = wakeme_after_rcu;
427  spin_lock_irq(&sp->queue_lock);
428  if (!sp->running) {
429  /* steal the processing owner */
430  sp->running = true;
431  rcu_batch_queue(&sp->batch_check0, head);
432  spin_unlock_irq(&sp->queue_lock);
433 
434  srcu_advance_batches(sp, trycount);
435  if (!rcu_batch_empty(&sp->batch_done)) {
436  BUG_ON(sp->batch_done.head != head);
437  rcu_batch_dequeue(&sp->batch_done);
438  done = true;
439  }
440  /* give the processing owner to work_struct */
441  srcu_reschedule(sp);
442  } else {
443  rcu_batch_queue(&sp->batch_queue, head);
444  spin_unlock_irq(&sp->queue_lock);
445  }
446 
447  if (!done)
448  wait_for_completion(&rcu.completion);
449 }
450 
466 {
467  __synchronize_srcu(sp, SYNCHRONIZE_SRCU_TRYCOUNT);
468 }
470 
487 {
488  __synchronize_srcu(sp, SYNCHRONIZE_SRCU_EXP_TRYCOUNT);
489 }
491 
495 void srcu_barrier(struct srcu_struct *sp)
496 {
497  synchronize_srcu(sp);
498 }
500 
509 {
510  return sp->completed;
511 }
513 
514 #define SRCU_CALLBACK_BATCH 10
515 #define SRCU_INTERVAL 1
516 
517 /*
518  * Move any new SRCU callbacks to the first stage of the SRCU grace
519  * period pipeline.
520  */
521 static void srcu_collect_new(struct srcu_struct *sp)
522 {
523  if (!rcu_batch_empty(&sp->batch_queue)) {
524  spin_lock_irq(&sp->queue_lock);
525  rcu_batch_move(&sp->batch_check0, &sp->batch_queue);
526  spin_unlock_irq(&sp->queue_lock);
527  }
528 }
529 
530 /*
531  * Core SRCU state machine. Advance callbacks from ->batch_check0 to
532  * ->batch_check1 and then to ->batch_done as readers drain.
533  */
534 static void srcu_advance_batches(struct srcu_struct *sp, int trycount)
535 {
536  int idx = 1 ^ (sp->completed & 1);
537 
538  /*
539  * Because readers might be delayed for an extended period after
540  * fetching ->completed for their index, at any point in time there
541  * might well be readers using both idx=0 and idx=1. We therefore
542  * need to wait for readers to clear from both index values before
543  * invoking a callback.
544  */
545 
546  if (rcu_batch_empty(&sp->batch_check0) &&
547  rcu_batch_empty(&sp->batch_check1))
548  return; /* no callbacks need to be advanced */
549 
550  if (!try_check_zero(sp, idx, trycount))
551  return; /* failed to advance, will try after SRCU_INTERVAL */
552 
553  /*
554  * The callbacks in ->batch_check1 have already done with their
555  * first zero check and flip back when they were enqueued on
556  * ->batch_check0 in a previous invocation of srcu_advance_batches().
557  * (Presumably try_check_zero() returned false during that
558  * invocation, leaving the callbacks stranded on ->batch_check1.)
559  * They are therefore ready to invoke, so move them to ->batch_done.
560  */
561  rcu_batch_move(&sp->batch_done, &sp->batch_check1);
562 
563  if (rcu_batch_empty(&sp->batch_check0))
564  return; /* no callbacks need to be advanced */
565  srcu_flip(sp);
566 
567  /*
568  * The callbacks in ->batch_check0 just finished their
569  * first check zero and flip, so move them to ->batch_check1
570  * for future checking on the other idx.
571  */
572  rcu_batch_move(&sp->batch_check1, &sp->batch_check0);
573 
574  /*
575  * SRCU read-side critical sections are normally short, so check
576  * at least twice in quick succession after a flip.
577  */
578  trycount = trycount < 2 ? 2 : trycount;
579  if (!try_check_zero(sp, idx^1, trycount))
580  return; /* failed to advance, will try after SRCU_INTERVAL */
581 
582  /*
583  * The callbacks in ->batch_check1 have now waited for all
584  * pre-existing readers using both idx values. They are therefore
585  * ready to invoke, so move them to ->batch_done.
586  */
587  rcu_batch_move(&sp->batch_done, &sp->batch_check1);
588 }
589 
590 /*
591  * Invoke a limited number of SRCU callbacks that have passed through
592  * their grace period. If there are more to do, SRCU will reschedule
593  * the workqueue.
594  */
595 static void srcu_invoke_callbacks(struct srcu_struct *sp)
596 {
597  int i;
598  struct rcu_head *head;
599 
600  for (i = 0; i < SRCU_CALLBACK_BATCH; i++) {
601  head = rcu_batch_dequeue(&sp->batch_done);
602  if (!head)
603  break;
605  head->func(head);
606  local_bh_enable();
607  }
608 }
609 
610 /*
611  * Finished one round of SRCU grace period. Start another if there are
612  * more SRCU callbacks queued, otherwise put SRCU into not-running state.
613  */
614 static void srcu_reschedule(struct srcu_struct *sp)
615 {
616  bool pending = true;
617 
618  if (rcu_batch_empty(&sp->batch_done) &&
619  rcu_batch_empty(&sp->batch_check1) &&
620  rcu_batch_empty(&sp->batch_check0) &&
621  rcu_batch_empty(&sp->batch_queue)) {
622  spin_lock_irq(&sp->queue_lock);
623  if (rcu_batch_empty(&sp->batch_done) &&
624  rcu_batch_empty(&sp->batch_check1) &&
625  rcu_batch_empty(&sp->batch_check0) &&
626  rcu_batch_empty(&sp->batch_queue)) {
627  sp->running = false;
628  pending = false;
629  }
630  spin_unlock_irq(&sp->queue_lock);
631  }
632 
633  if (pending)
635 }
636 
637 /*
638  * This is the work-queue function that handles SRCU grace periods.
639  */
640 static void process_srcu(struct work_struct *work)
641 {
642  struct srcu_struct *sp;
643 
644  sp = container_of(work, struct srcu_struct, work.work);
645 
646  srcu_collect_new(sp);
647  srcu_advance_batches(sp, 1);
648  srcu_invoke_callbacks(sp);
649  srcu_reschedule(sp);
650 }