Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
notifier.c
Go to the documentation of this file.
1 #include <linux/kdebug.h>
2 #include <linux/kprobes.h>
3 #include <linux/export.h>
4 #include <linux/notifier.h>
5 #include <linux/rcupdate.h>
6 #include <linux/vmalloc.h>
7 #include <linux/reboot.h>
8 
9 /*
10  * Notifier list for kernel code which wants to be called
11  * at shutdown. This is used to stop any idling DMA operations
12  * and the like.
13  */
14 BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
15 
16 /*
17  * Notifier chain core routines. The exported routines below
18  * are layered on top of these, with appropriate locking added.
19  */
20 
21 static int notifier_chain_register(struct notifier_block **nl,
22  struct notifier_block *n)
23 {
24  while ((*nl) != NULL) {
25  if (n->priority > (*nl)->priority)
26  break;
27  nl = &((*nl)->next);
28  }
29  n->next = *nl;
30  rcu_assign_pointer(*nl, n);
31  return 0;
32 }
33 
34 static int notifier_chain_cond_register(struct notifier_block **nl,
35  struct notifier_block *n)
36 {
37  while ((*nl) != NULL) {
38  if ((*nl) == n)
39  return 0;
40  if (n->priority > (*nl)->priority)
41  break;
42  nl = &((*nl)->next);
43  }
44  n->next = *nl;
45  rcu_assign_pointer(*nl, n);
46  return 0;
47 }
48 
49 static int notifier_chain_unregister(struct notifier_block **nl,
50  struct notifier_block *n)
51 {
52  while ((*nl) != NULL) {
53  if ((*nl) == n) {
54  rcu_assign_pointer(*nl, n->next);
55  return 0;
56  }
57  nl = &((*nl)->next);
58  }
59  return -ENOENT;
60 }
61 
74 static int __kprobes notifier_call_chain(struct notifier_block **nl,
75  unsigned long val, void *v,
76  int nr_to_call, int *nr_calls)
77 {
78  int ret = NOTIFY_DONE;
79  struct notifier_block *nb, *next_nb;
80 
81  nb = rcu_dereference_raw(*nl);
82 
83  while (nb && nr_to_call) {
84  next_nb = rcu_dereference_raw(nb->next);
85 
86 #ifdef CONFIG_DEBUG_NOTIFIERS
88  WARN(1, "Invalid notifier called!");
89  nb = next_nb;
90  continue;
91  }
92 #endif
93  ret = nb->notifier_call(nb, val, v);
94 
95  if (nr_calls)
96  (*nr_calls)++;
97 
98  if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
99  break;
100  nb = next_nb;
101  nr_to_call--;
102  }
103  return ret;
104 }
105 
106 /*
107  * Atomic notifier chain routines. Registration and unregistration
108  * use a spinlock, and call_chain is synchronized by RCU (no locks).
109  */
110 
121  struct notifier_block *n)
122 {
123  unsigned long flags;
124  int ret;
125 
126  spin_lock_irqsave(&nh->lock, flags);
127  ret = notifier_chain_register(&nh->head, n);
128  spin_unlock_irqrestore(&nh->lock, flags);
129  return ret;
130 }
132 
143  struct notifier_block *n)
144 {
145  unsigned long flags;
146  int ret;
147 
148  spin_lock_irqsave(&nh->lock, flags);
149  ret = notifier_chain_unregister(&nh->head, n);
150  spin_unlock_irqrestore(&nh->lock, flags);
151  synchronize_rcu();
152  return ret;
153 }
155 
176  unsigned long val, void *v,
177  int nr_to_call, int *nr_calls)
178 {
179  int ret;
180 
181  rcu_read_lock();
182  ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
183  rcu_read_unlock();
184  return ret;
185 }
187 
189  unsigned long val, void *v)
190 {
191  return __atomic_notifier_call_chain(nh, val, v, -1, NULL);
192 }
194 
195 /*
196  * Blocking notifier chain routines. All access to the chain is
197  * synchronized by an rwsem.
198  */
199 
211  struct notifier_block *n)
212 {
213  int ret;
214 
215  /*
216  * This code gets used during boot-up, when task switching is
217  * not yet working and interrupts must remain disabled. At
218  * such times we must not call down_write().
219  */
221  return notifier_chain_register(&nh->head, n);
222 
223  down_write(&nh->rwsem);
224  ret = notifier_chain_register(&nh->head, n);
225  up_write(&nh->rwsem);
226  return ret;
227 }
229 
242  struct notifier_block *n)
243 {
244  int ret;
245 
246  down_write(&nh->rwsem);
247  ret = notifier_chain_cond_register(&nh->head, n);
248  up_write(&nh->rwsem);
249  return ret;
250 }
252 
264  struct notifier_block *n)
265 {
266  int ret;
267 
268  /*
269  * This code gets used during boot-up, when task switching is
270  * not yet working and interrupts must remain disabled. At
271  * such times we must not call down_write().
272  */
274  return notifier_chain_unregister(&nh->head, n);
275 
276  down_write(&nh->rwsem);
277  ret = notifier_chain_unregister(&nh->head, n);
278  up_write(&nh->rwsem);
279  return ret;
280 }
282 
302  unsigned long val, void *v,
303  int nr_to_call, int *nr_calls)
304 {
305  int ret = NOTIFY_DONE;
306 
307  /*
308  * We check the head outside the lock, but if this access is
309  * racy then it does not matter what the result of the test
310  * is, we re-check the list after having taken the lock anyway:
311  */
312  if (rcu_dereference_raw(nh->head)) {
313  down_read(&nh->rwsem);
314  ret = notifier_call_chain(&nh->head, val, v, nr_to_call,
315  nr_calls);
316  up_read(&nh->rwsem);
317  }
318  return ret;
319 }
321 
323  unsigned long val, void *v)
324 {
325  return __blocking_notifier_call_chain(nh, val, v, -1, NULL);
326 }
328 
329 /*
330  * Raw notifier chain routines. There is no protection;
331  * the caller must provide it. Use at your own risk!
332  */
333 
345  struct notifier_block *n)
346 {
347  return notifier_chain_register(&nh->head, n);
348 }
350 
362  struct notifier_block *n)
363 {
364  return notifier_chain_unregister(&nh->head, n);
365 }
367 
388  unsigned long val, void *v,
389  int nr_to_call, int *nr_calls)
390 {
391  return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
392 }
394 
396  unsigned long val, void *v)
397 {
398  return __raw_notifier_call_chain(nh, val, v, -1, NULL);
399 }
401 
402 /*
403  * SRCU notifier chain routines. Registration and unregistration
404  * use a mutex, and call_chain is synchronized by SRCU (no locks).
405  */
406 
418  struct notifier_block *n)
419 {
420  int ret;
421 
422  /*
423  * This code gets used during boot-up, when task switching is
424  * not yet working and interrupts must remain disabled. At
425  * such times we must not call mutex_lock().
426  */
428  return notifier_chain_register(&nh->head, n);
429 
430  mutex_lock(&nh->mutex);
431  ret = notifier_chain_register(&nh->head, n);
432  mutex_unlock(&nh->mutex);
433  return ret;
434 }
436 
448  struct notifier_block *n)
449 {
450  int ret;
451 
452  /*
453  * This code gets used during boot-up, when task switching is
454  * not yet working and interrupts must remain disabled. At
455  * such times we must not call mutex_lock().
456  */
458  return notifier_chain_unregister(&nh->head, n);
459 
460  mutex_lock(&nh->mutex);
461  ret = notifier_chain_unregister(&nh->head, n);
462  mutex_unlock(&nh->mutex);
463  synchronize_srcu(&nh->srcu);
464  return ret;
465 }
467 
487  unsigned long val, void *v,
488  int nr_to_call, int *nr_calls)
489 {
490  int ret;
491  int idx;
492 
493  idx = srcu_read_lock(&nh->srcu);
494  ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
495  srcu_read_unlock(&nh->srcu, idx);
496  return ret;
497 }
499 
501  unsigned long val, void *v)
502 {
503  return __srcu_notifier_call_chain(nh, val, v, -1, NULL);
504 }
506 
520 {
521  mutex_init(&nh->mutex);
522  if (init_srcu_struct(&nh->srcu) < 0)
523  BUG();
524  nh->head = NULL;
525 }
527 
528 static ATOMIC_NOTIFIER_HEAD(die_chain);
529 
530 int notrace __kprobes notify_die(enum die_val val, const char *str,
531  struct pt_regs *regs, long err, int trap, int sig)
532 {
533  struct die_args args = {
534  .regs = regs,
535  .str = str,
536  .err = err,
537  .trapnr = trap,
538  .signr = sig,
539 
540  };
541  return atomic_notifier_call_chain(&die_chain, val, &args);
542 }
543 
545 {
547  return atomic_notifier_chain_register(&die_chain, nb);
548 }
550 
552 {
553  return atomic_notifier_chain_unregister(&die_chain, nb);
554 }