Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
lockdep.h
Go to the documentation of this file.
1 /*
2  * Runtime locking correctness validator
3  *
4  * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <[email protected]>
5  * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <[email protected]>
6  *
7  * see Documentation/lockdep-design.txt for more details.
8  */
9 #ifndef __LINUX_LOCKDEP_H
10 #define __LINUX_LOCKDEP_H
11 
12 struct task_struct;
13 struct lockdep_map;
14 
15 /* for sysctl */
16 extern int prove_locking;
17 extern int lock_stat;
18 
19 #ifdef CONFIG_LOCKDEP
20 
21 #include <linux/linkage.h>
22 #include <linux/list.h>
23 #include <linux/debug_locks.h>
24 #include <linux/stacktrace.h>
25 
26 /*
27  * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
28  * the total number of states... :-(
29  */
30 #define XXX_LOCK_USAGE_STATES (1+3*4)
31 
32 #define MAX_LOCKDEP_SUBCLASSES 8UL
33 
34 /*
35  * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
36  * cached in the instance of lockdep_map
37  *
38  * Currently main class (subclass == 0) and signle depth subclass
39  * are cached in lockdep_map. This optimization is mainly targeting
40  * on rq->lock. double_rq_lock() acquires this highly competitive with
41  * single depth.
42  */
43 #define NR_LOCKDEP_CACHING_CLASSES 2
44 
45 /*
46  * Lock-classes are keyed via unique addresses, by embedding the
47  * lockclass-key into the kernel (or module) .data section. (For
48  * static locks we use the lock address itself as the key.)
49  */
50 struct lockdep_subclass_key {
51  char __one_byte;
52 } __attribute__ ((__packed__));
53 
54 struct lock_class_key {
55  struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
56 };
57 
59 
60 #define LOCKSTAT_POINTS 4
61 
62 /*
63  * The lock-class itself:
64  */
65 struct lock_class {
66  /*
67  * class-hash:
68  */
69  struct list_head hash_entry;
70 
71  /*
72  * global list of all lock-classes:
73  */
74  struct list_head lock_entry;
75 
76  struct lockdep_subclass_key *key;
77  unsigned int subclass;
78  unsigned int dep_gen_id;
79 
80  /*
81  * IRQ/softirq usage tracking bits:
82  */
83  unsigned long usage_mask;
84  struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
85 
86  /*
87  * These fields represent a directed graph of lock dependencies,
88  * to every node we attach a list of "forward" and a list of
89  * "backward" graph nodes.
90  */
91  struct list_head locks_after, locks_before;
92 
93  /*
94  * Generation counter, when doing certain classes of graph walking,
95  * to ensure that we check one node only once:
96  */
97  unsigned int version;
98 
99  /*
100  * Statistics counter:
101  */
102  unsigned long ops;
103 
104  const char *name;
105  int name_version;
106 
107 #ifdef CONFIG_LOCK_STAT
108  unsigned long contention_point[LOCKSTAT_POINTS];
109  unsigned long contending_point[LOCKSTAT_POINTS];
110 #endif
111 };
112 
113 #ifdef CONFIG_LOCK_STAT
114 struct lock_time {
115  s64 min;
116  s64 max;
117  s64 total;
118  unsigned long nr;
119 };
120 
121 enum bounce_type {
122  bounce_acquired_write,
123  bounce_acquired_read,
124  bounce_contended_write,
125  bounce_contended_read,
126  nr_bounce_types,
127 
128  bounce_acquired = bounce_acquired_write,
129  bounce_contended = bounce_contended_write,
130 };
131 
132 struct lock_class_stats {
133  unsigned long contention_point[4];
134  unsigned long contending_point[4];
135  struct lock_time read_waittime;
136  struct lock_time write_waittime;
137  struct lock_time read_holdtime;
138  struct lock_time write_holdtime;
139  unsigned long bounces[nr_bounce_types];
140 };
141 
142 struct lock_class_stats lock_stats(struct lock_class *class);
143 void clear_lock_stats(struct lock_class *class);
144 #endif
145 
146 /*
147  * Map the lock object (the lock instance) to the lock-class object.
148  * This is embedded into specific lock instances:
149  */
150 struct lockdep_map {
151  struct lock_class_key *key;
152  struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
153  const char *name;
154 #ifdef CONFIG_LOCK_STAT
155  int cpu;
156  unsigned long ip;
157 #endif
158 };
159 
160 static inline void lockdep_copy_map(struct lockdep_map *to,
161  struct lockdep_map *from)
162 {
163  int i;
164 
165  *to = *from;
166  /*
167  * Since the class cache can be modified concurrently we could observe
168  * half pointers (64bit arch using 32bit copy insns). Therefore clear
169  * the caches and take the performance hit.
170  *
171  * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
172  * that relies on cache abuse.
173  */
174  for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
175  to->class_cache[i] = NULL;
176 }
177 
178 /*
179  * Every lock has a list of other locks that were taken after it.
180  * We only grow the list, never remove from it:
181  */
182 struct lock_list {
183  struct list_head entry;
184  struct lock_class *class;
185  struct stack_trace trace;
186  int distance;
187 
188  /*
189  * The parent field is used to implement breadth-first search, and the
190  * bit 0 is reused to indicate if the lock has been accessed in BFS.
191  */
192  struct lock_list *parent;
193 };
194 
195 /*
196  * We record lock dependency chains, so that we can cache them:
197  */
198 struct lock_chain {
199  u8 irq_context;
200  u8 depth;
201  u16 base;
202  struct list_head entry;
203  u64 chain_key;
204 };
205 
206 #define MAX_LOCKDEP_KEYS_BITS 13
207 /*
208  * Subtract one because we offset hlock->class_idx by 1 in order
209  * to make 0 mean no class. This avoids overflowing the class_idx
210  * bitfield and hitting the BUG in hlock_class().
211  */
212 #define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
213 
214 struct held_lock {
215  /*
216  * One-way hash of the dependency chain up to this point. We
217  * hash the hashes step by step as the dependency chain grows.
218  *
219  * We use it for dependency-caching and we skip detection
220  * passes and dependency-updates if there is a cache-hit, so
221  * it is absolutely critical for 100% coverage of the validator
222  * to have a unique key value for every unique dependency path
223  * that can occur in the system, to make a unique hash value
224  * as likely as possible - hence the 64-bit width.
225  *
226  * The task struct holds the current hash value (initialized
227  * with zero), here we store the previous hash value:
228  */
229  u64 prev_chain_key;
230  unsigned long acquire_ip;
231  struct lockdep_map *instance;
232  struct lockdep_map *nest_lock;
233 #ifdef CONFIG_LOCK_STAT
234  u64 waittime_stamp;
235  u64 holdtime_stamp;
236 #endif
237  unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
238  /*
239  * The lock-stack is unified in that the lock chains of interrupt
240  * contexts nest ontop of process context chains, but we 'separate'
241  * the hashes by starting with 0 if we cross into an interrupt
242  * context, and we also keep do not add cross-context lock
243  * dependencies - the lock usage graph walking covers that area
244  * anyway, and we'd just unnecessarily increase the number of
245  * dependencies otherwise. [Note: hardirq and softirq contexts
246  * are separated from each other too.]
247  *
248  * The following field is used to detect when we cross into an
249  * interrupt context:
250  */
251  unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
252  unsigned int trylock:1; /* 16 bits */
253 
254  unsigned int read:2; /* see lock_acquire() comment */
255  unsigned int check:2; /* see lock_acquire() comment */
256  unsigned int hardirqs_off:1;
257  unsigned int references:11; /* 32 bits */
258 };
259 
260 /*
261  * Initialization, self-test and debugging-output methods:
262  */
263 extern void lockdep_init(void);
264 extern void lockdep_info(void);
265 extern void lockdep_reset(void);
266 extern void lockdep_reset_lock(struct lockdep_map *lock);
267 extern void lockdep_free_key_range(void *start, unsigned long size);
268 extern void lockdep_sys_exit(void);
269 
270 extern void lockdep_off(void);
271 extern void lockdep_on(void);
272 
273 /*
274  * These methods are used by specific locking variants (spinlocks,
275  * rwlocks, mutexes and rwsems) to pass init/acquire/release events
276  * to lockdep:
277  */
278 
279 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
280  struct lock_class_key *key, int subclass);
281 
282 /*
283  * To initialize a lockdep_map statically use this macro.
284  * Note that _name must not be NULL.
285  */
286 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
287  { .name = (_name), .key = (void *)(_key), }
288 
289 /*
290  * Reinitialize a lock key - for cases where there is special locking or
291  * special initialization of locks so that the validator gets the scope
292  * of dependencies wrong: they are either too broad (they need a class-split)
293  * or they are too narrow (they suffer from a false class-split):
294  */
295 #define lockdep_set_class(lock, key) \
296  lockdep_init_map(&(lock)->dep_map, #key, key, 0)
297 #define lockdep_set_class_and_name(lock, key, name) \
298  lockdep_init_map(&(lock)->dep_map, name, key, 0)
299 #define lockdep_set_class_and_subclass(lock, key, sub) \
300  lockdep_init_map(&(lock)->dep_map, #key, key, sub)
301 #define lockdep_set_subclass(lock, sub) \
302  lockdep_init_map(&(lock)->dep_map, #lock, \
303  (lock)->dep_map.key, sub)
304 
305 #define lockdep_set_novalidate_class(lock) \
306  lockdep_set_class(lock, &__lockdep_no_validate__)
307 /*
308  * Compare locking classes
309  */
310 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
311 
312 static inline int lockdep_match_key(struct lockdep_map *lock,
313  struct lock_class_key *key)
314 {
315  return lock->key == key;
316 }
317 
318 /*
319  * Acquire a lock.
320  *
321  * Values for "read":
322  *
323  * 0: exclusive (write) acquire
324  * 1: read-acquire (no recursion allowed)
325  * 2: read-acquire with same-instance recursion allowed
326  *
327  * Values for check:
328  *
329  * 0: disabled
330  * 1: simple checks (freeing, held-at-exit-time, etc.)
331  * 2: full validation
332  */
333 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
334  int trylock, int read, int check,
335  struct lockdep_map *nest_lock, unsigned long ip);
336 
337 extern void lock_release(struct lockdep_map *lock, int nested,
338  unsigned long ip);
339 
340 #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
341 
342 extern int lock_is_held(struct lockdep_map *lock);
343 
344 extern void lock_set_class(struct lockdep_map *lock, const char *name,
345  struct lock_class_key *key, unsigned int subclass,
346  unsigned long ip);
347 
348 static inline void lock_set_subclass(struct lockdep_map *lock,
349  unsigned int subclass, unsigned long ip)
350 {
351  lock_set_class(lock, lock->name, lock->key, subclass, ip);
352 }
353 
355 extern void lockdep_clear_current_reclaim_state(void);
356 extern void lockdep_trace_alloc(gfp_t mask);
357 
358 # define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
359 
360 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
361 
362 #define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l))
363 
364 #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
365 
366 #else /* !LOCKDEP */
367 
368 static inline void lockdep_off(void)
369 {
370 }
371 
372 static inline void lockdep_on(void)
373 {
374 }
375 
376 # define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
377 # define lock_release(l, n, i) do { } while (0)
378 # define lock_set_class(l, n, k, s, i) do { } while (0)
379 # define lock_set_subclass(l, s, i) do { } while (0)
380 # define lockdep_set_current_reclaim_state(g) do { } while (0)
381 # define lockdep_clear_current_reclaim_state() do { } while (0)
382 # define lockdep_trace_alloc(g) do { } while (0)
383 # define lockdep_init() do { } while (0)
384 # define lockdep_info() do { } while (0)
385 # define lockdep_init_map(lock, name, key, sub) \
386  do { (void)(name); (void)(key); } while (0)
387 # define lockdep_set_class(lock, key) do { (void)(key); } while (0)
388 # define lockdep_set_class_and_name(lock, key, name) \
389  do { (void)(key); (void)(name); } while (0)
390 #define lockdep_set_class_and_subclass(lock, key, sub) \
391  do { (void)(key); } while (0)
392 #define lockdep_set_subclass(lock, sub) do { } while (0)
393 
394 #define lockdep_set_novalidate_class(lock) do { } while (0)
395 
396 /*
397  * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
398  * case since the result is not well defined and the caller should rather
399  * #ifdef the call himself.
400  */
401 
402 # define INIT_LOCKDEP
403 # define lockdep_reset() do { debug_locks = 1; } while (0)
404 # define lockdep_free_key_range(start, size) do { } while (0)
405 # define lockdep_sys_exit() do { } while (0)
406 /*
407  * The class key takes no space if lockdep is disabled:
408  */
409 struct lock_class_key { };
410 
411 #define lockdep_depth(tsk) (0)
412 
413 #define lockdep_assert_held(l) do { } while (0)
414 
415 #define lockdep_recursing(tsk) (0)
416 
417 #endif /* !LOCKDEP */
418 
419 #ifdef CONFIG_LOCK_STAT
420 
421 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
422 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
423 
424 #define LOCK_CONTENDED(_lock, try, lock) \
425 do { \
426  if (!try(_lock)) { \
427  lock_contended(&(_lock)->dep_map, _RET_IP_); \
428  lock(_lock); \
429  } \
430  lock_acquired(&(_lock)->dep_map, _RET_IP_); \
431 } while (0)
432 
433 #else /* CONFIG_LOCK_STAT */
434 
435 #define lock_contended(lockdep_map, ip) do {} while (0)
436 #define lock_acquired(lockdep_map, ip) do {} while (0)
437 
438 #define LOCK_CONTENDED(_lock, try, lock) \
439  lock(_lock)
440 
441 #endif /* CONFIG_LOCK_STAT */
442 
443 #ifdef CONFIG_LOCKDEP
444 
445 /*
446  * On lockdep we dont want the hand-coded irq-enable of
447  * _raw_*_lock_flags() code, because lockdep assumes
448  * that interrupts are not re-enabled during lock-acquire:
449  */
450 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
451  LOCK_CONTENDED((_lock), (try), (lock))
452 
453 #else /* CONFIG_LOCKDEP */
454 
455 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
456  lockfl((_lock), (flags))
457 
458 #endif /* CONFIG_LOCKDEP */
459 
460 #ifdef CONFIG_TRACE_IRQFLAGS
461 extern void print_irqtrace_events(struct task_struct *curr);
462 #else
463 static inline void print_irqtrace_events(struct task_struct *curr)
464 {
465 }
466 #endif
467 
468 /*
469  * For trivial one-depth nesting of a lock-class, the following
470  * global define can be used. (Subsystems with multiple levels
471  * of nesting should define their own lock-nesting subclasses.)
472  */
473 #define SINGLE_DEPTH_NESTING 1
474 
475 /*
476  * Map the dependency ops to NOP or to real lockdep ops, depending
477  * on the per lock-class debug mode:
478  */
479 
480 #ifdef CONFIG_DEBUG_LOCK_ALLOC
481 # ifdef CONFIG_PROVE_LOCKING
482 # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
483 # define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
484 # else
485 # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
486 # define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i)
487 # endif
488 # define spin_release(l, n, i) lock_release(l, n, i)
489 #else
490 # define spin_acquire(l, s, t, i) do { } while (0)
491 # define spin_release(l, n, i) do { } while (0)
492 #endif
493 
494 #ifdef CONFIG_DEBUG_LOCK_ALLOC
495 # ifdef CONFIG_PROVE_LOCKING
496 # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
497 # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i)
498 # else
499 # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
500 # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i)
501 # endif
502 # define rwlock_release(l, n, i) lock_release(l, n, i)
503 #else
504 # define rwlock_acquire(l, s, t, i) do { } while (0)
505 # define rwlock_acquire_read(l, s, t, i) do { } while (0)
506 # define rwlock_release(l, n, i) do { } while (0)
507 #endif
508 
509 #ifdef CONFIG_DEBUG_LOCK_ALLOC
510 # ifdef CONFIG_PROVE_LOCKING
511 # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
512 # define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
513 # else
514 # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
515 # define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
516 # endif
517 # define mutex_release(l, n, i) lock_release(l, n, i)
518 #else
519 # define mutex_acquire(l, s, t, i) do { } while (0)
520 # define mutex_acquire_nest(l, s, t, n, i) do { } while (0)
521 # define mutex_release(l, n, i) do { } while (0)
522 #endif
523 
524 #ifdef CONFIG_DEBUG_LOCK_ALLOC
525 # ifdef CONFIG_PROVE_LOCKING
526 # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
527 # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
528 # else
529 # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
530 # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
531 # endif
532 # define rwsem_release(l, n, i) lock_release(l, n, i)
533 #else
534 # define rwsem_acquire(l, s, t, i) do { } while (0)
535 # define rwsem_acquire_read(l, s, t, i) do { } while (0)
536 # define rwsem_release(l, n, i) do { } while (0)
537 #endif
538 
539 #ifdef CONFIG_DEBUG_LOCK_ALLOC
540 # ifdef CONFIG_PROVE_LOCKING
541 # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
542 # define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_)
543 # else
544 # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
545 # define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_)
546 # endif
547 # define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
548 #else
549 # define lock_map_acquire(l) do { } while (0)
550 # define lock_map_acquire_read(l) do { } while (0)
551 # define lock_map_release(l) do { } while (0)
552 #endif
553 
554 #ifdef CONFIG_PROVE_LOCKING
555 # define might_lock(lock) \
556 do { \
557  typecheck(struct lockdep_map *, &(lock)->dep_map); \
558  lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \
559  lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
560 } while (0)
561 # define might_lock_read(lock) \
562 do { \
563  typecheck(struct lockdep_map *, &(lock)->dep_map); \
564  lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \
565  lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
566 } while (0)
567 #else
568 # define might_lock(lock) do { } while (0)
569 # define might_lock_read(lock) do { } while (0)
570 #endif
571 
572 #ifdef CONFIG_PROVE_RCU
573 void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
574 #endif
575 
576 #endif /* __LINUX_LOCKDEP_H */