28 #define DISABLE_BRANCH_PROFILING
30 #include <linux/sched.h>
32 #include <linux/module.h>
41 #include <linux/utsname.h>
42 #include <linux/hash.h>
45 #include <linux/bitops.h>
49 #include <asm/sections.h>
53 #define CREATE_TRACE_POINTS
56 #ifdef CONFIG_PROVE_LOCKING
60 #define prove_locking 0
63 #ifdef CONFIG_LOCK_STAT
80 static int graph_lock(
void)
98 static inline int graph_unlock(
void)
117 static inline int debug_locks_off_graph_unlock(
void)
126 static int lockdep_initialized;
138 static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
140 static inline struct lock_class *hlock_class(
struct held_lock *hlock)
142 if (!hlock->class_idx) {
149 return lock_classes + hlock->class_idx - 1;
152 #ifdef CONFIG_LOCK_STAT
156 static inline u64 lockstat_clock(
void)
161 static int lock_point(
unsigned long points[],
unsigned long ip)
165 for (i = 0; i < LOCKSTAT_POINTS; i++) {
166 if (points[i] == 0) {
177 static void lock_time_inc(
struct lock_time *lt,
u64 time)
182 if (time < lt->
min || !lt->nr)
189 static inline void lock_time_add(
struct lock_time *
src,
struct lock_time *
dst)
194 if (src->max > dst->max)
197 if (src->min < dst->min || !dst->nr)
200 dst->total += src->total;
204 struct lock_class_stats lock_stats(
struct lock_class *
class)
206 struct lock_class_stats
stats;
209 memset(&
stats, 0,
sizeof(
struct lock_class_stats));
211 struct lock_class_stats *pcs =
212 &
per_cpu(cpu_lock_stats, cpu)[
class - lock_classes];
215 stats.contention_point[i] += pcs->contention_point[i];
218 stats.contending_point[i] += pcs->contending_point[i];
220 lock_time_add(&pcs->read_waittime, &
stats.read_waittime);
221 lock_time_add(&pcs->write_waittime, &
stats.write_waittime);
223 lock_time_add(&pcs->read_holdtime, &
stats.read_holdtime);
224 lock_time_add(&pcs->write_holdtime, &
stats.write_holdtime);
227 stats.bounces[i] += pcs->bounces[i];
233 void clear_lock_stats(
struct lock_class *
class)
238 struct lock_class_stats *cpu_stats =
239 &
per_cpu(cpu_lock_stats, cpu)[
class - lock_classes];
241 memset(cpu_stats, 0,
sizeof(
struct lock_class_stats));
247 static struct lock_class_stats *get_lock_stats(
struct lock_class *
class)
249 return &
get_cpu_var(cpu_lock_stats)[
class - lock_classes];
252 static void put_lock_stats(
struct lock_class_stats *
stats)
257 static void lock_release_holdtime(
struct held_lock *hlock)
259 struct lock_class_stats *
stats;
265 holdtime = lockstat_clock() - hlock->holdtime_stamp;
267 stats = get_lock_stats(hlock_class(hlock));
269 lock_time_inc(&stats->read_holdtime, holdtime);
271 lock_time_inc(&stats->write_holdtime, holdtime);
272 put_lock_stats(stats);
275 static inline void lock_release_holdtime(
struct held_lock *hlock)
290 #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
291 #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
292 #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
293 #define classhashentry(key) (classhash_table + __classhashfn((key)))
301 #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
302 #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
303 #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
304 #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
314 #define iterate_chain_key(key1, key2) \
315 (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
316 ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
336 #define VERY_VERBOSE 0
339 # define HARDIRQ_VERBOSE 1
340 # define SOFTIRQ_VERBOSE 1
341 # define RECLAIM_VERBOSE 1
343 # define HARDIRQ_VERBOSE 0
344 # define SOFTIRQ_VERBOSE 0
345 # define RECLAIM_VERBOSE 0
348 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
352 static int class_filter(
struct lock_class *
class)
356 if (
class->name_version == 1 &&
359 if (
class->name_version == 1 &&
368 static int verbose(
struct lock_class *
class)
371 return class_filter(
class);
383 static int save_trace(
struct stack_trace *
trace)
385 trace->nr_entries = 0;
400 if (trace->nr_entries != 0 &&
401 trace->entries[trace->nr_entries-1] ==
ULONG_MAX)
404 trace->max_entries = trace->nr_entries;
406 nr_stack_trace_entries += trace->nr_entries;
409 if (!debug_locks_off_graph_unlock())
412 printk(
"BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
413 printk(
"turning off the locking correctness validator.\n");
427 #ifdef CONFIG_DEBUG_LOCKDEP
433 static int lockdep_init_error;
434 static const char *lock_init_error;
435 static unsigned long lockdep_init_trace_data[20];
436 static struct stack_trace lockdep_init_trace = {
437 .max_entries =
ARRAY_SIZE(lockdep_init_trace_data),
438 .entries = lockdep_init_trace_data,
451 #define __USAGE(__STATE) \
452 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
453 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
454 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
455 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
457 static const char *usage_str[] =
459 #define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
479 if (
class->usage_mask & lock_flag(bit + 2))
481 if (
class->usage_mask & lock_flag(bit)) {
483 if (
class->usage_mask & lock_flag(bit + 2))
494 #define LOCKDEP_STATE(__STATE) \
495 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
496 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
503 static void __print_lock_name(
struct lock_class *
class)
514 if (
class->name_version > 1)
521 static void print_lock_name(
struct lock_class *
class)
528 __print_lock_name(
class);
532 static void print_lockdep_cache(
struct lockdep_map *lock)
544 static void print_lock(
struct held_lock *hlock)
546 print_lock_name(hlock_class(hlock));
548 print_ip_sym(hlock->acquire_ip);
553 int i,
depth = curr->lockdep_depth;
556 printk(
"no locks held by %s/%d.\n", curr->
comm, task_pid_nr(curr));
559 printk(
"%d lock%s held by %s/%d:\n",
560 depth, depth > 1 ?
"s" :
"", curr->
comm, task_pid_nr(curr));
562 for (i = 0; i <
depth; i++) {
564 print_lock(curr->held_locks + i);
568 static void print_kernel_ident(
void)
576 static int very_verbose(
struct lock_class *
class)
579 return class_filter(
class);
587 static int static_obj(
void *obj)
589 unsigned long start = (
unsigned long) &_stext,
596 if ((addr >= start) && (addr <
end))
618 static int count_matching_names(
struct lock_class *new_class)
620 struct lock_class *
class;
623 if (!new_class->name)
627 if (new_class->key - new_class->subclass ==
class->key)
628 return class->name_version;
630 count =
max(count,
class->name_version);
641 static inline struct lock_class *
642 look_up_lock_class(
struct lockdep_map *lock,
unsigned int subclass)
644 struct lockdep_subclass_key *
key;
646 struct lock_class *
class;
648 #ifdef CONFIG_DEBUG_LOCKDEP
654 if (
unlikely(!lockdep_initialized)) {
656 lockdep_init_error = 1;
657 lock_init_error = lock->name;
662 if (
unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
665 "BUG: looking up invalid subclass: %u\n", subclass);
667 "turning off the locking correctness validator.\n");
677 lock->key = (
void *)lock;
686 sizeof(
struct lockdep_map));
688 key = lock->key->subkeys + subclass;
697 if (
class->key == key) {
715 static inline struct lock_class *
716 register_lock_class(
struct lockdep_map *lock,
unsigned int subclass,
int force)
718 struct lockdep_subclass_key *
key;
720 struct lock_class *
class;
723 class = look_up_lock_class(lock, subclass);
725 goto out_set_class_cache;
730 if (!static_obj(lock->key)) {
732 printk(
"INFO: trying to register non-static key.\n");
733 printk(
"the code is fine but needs lockdep annotation.\n");
734 printk(
"turning off the locking correctness validator.\n");
740 key = lock->key->subkeys + subclass;
759 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
760 if (!debug_locks_off_graph_unlock()) {
766 printk(
"BUG: MAX_LOCKDEP_KEYS too low!\n");
767 printk(
"turning off the locking correctness validator.\n");
771 class = lock_classes + nr_lock_classes++;
774 class->name = lock->name;
775 class->subclass = subclass;
776 INIT_LIST_HEAD(&
class->lock_entry);
777 INIT_LIST_HEAD(&
class->locks_before);
778 INIT_LIST_HEAD(&
class->locks_after);
779 class->name_version = count_matching_names(
class);
784 list_add_tail_rcu(&
class->hash_entry, hash_head);
788 list_add_tail_rcu(&
class->lock_entry, &all_lock_classes);
795 if (
class->name_version > 1)
811 if (!subclass || force)
812 lock->class_cache[0] =
class;
813 else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
814 lock->class_cache[subclass] =
class;
826 #ifdef CONFIG_PROVE_LOCKING
831 static struct lock_list *alloc_list_entry(
void)
834 if (!debug_locks_off_graph_unlock())
837 printk(
"BUG: MAX_LOCKDEP_ENTRIES too low!\n");
838 printk(
"turning off the locking correctness validator.\n");
842 return list_entries + nr_list_entries++;
848 static int add_lock_to_list(
struct lock_class *
class,
struct lock_class *
this,
850 int distance,
struct stack_trace *
trace)
852 struct lock_list *
entry;
857 entry = alloc_list_entry();
862 entry->distance = distance;
863 entry->trace = *
trace;
871 list_add_tail_rcu(&entry->entry, head);
879 #define MAX_CIRCULAR_QUEUE_SIZE 4096UL
880 #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
888 struct circular_queue {
889 unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
890 unsigned int front, rear;
893 static struct circular_queue lock_cq;
897 static unsigned int lockdep_dependency_gen_id;
899 static inline void __cq_init(
struct circular_queue *cq)
901 cq->front = cq->rear = 0;
902 lockdep_dependency_gen_id++;
905 static inline int __cq_empty(
struct circular_queue *cq)
907 return (cq->front == cq->rear);
910 static inline int __cq_full(
struct circular_queue *cq)
912 return ((cq->rear + 1) & CQ_MASK) == cq->front;
915 static inline int __cq_enqueue(
struct circular_queue *cq,
unsigned long elem)
920 cq->element[cq->rear] =
elem;
921 cq->rear = (cq->rear + 1) & CQ_MASK;
925 static inline int __cq_dequeue(
struct circular_queue *cq,
unsigned long *elem)
930 *elem = cq->element[cq->front];
931 cq->front = (cq->front + 1) & CQ_MASK;
935 static inline unsigned int __cq_get_elem_count(
struct circular_queue *cq)
937 return (cq->rear - cq->front) & CQ_MASK;
940 static inline void mark_lock_accessed(
struct lock_list *lock,
941 struct lock_list *parent)
945 nr = lock - list_entries;
946 WARN_ON(nr >= nr_list_entries);
947 lock->parent = parent;
948 lock->class->dep_gen_id = lockdep_dependency_gen_id;
951 static inline unsigned long lock_accessed(
struct lock_list *lock)
955 nr = lock - list_entries;
956 WARN_ON(nr >= nr_list_entries);
957 return lock->class->dep_gen_id == lockdep_dependency_gen_id;
960 static inline struct lock_list *get_lock_parent(
struct lock_list *
child)
962 return child->parent;
965 static inline int get_lock_depth(
struct lock_list *
child)
970 while ((parent = get_lock_parent(child))) {
977 static int __bfs(
struct lock_list *source_entry,
979 int (*
match)(
struct lock_list *entry,
void *data),
980 struct lock_list **target_entry,
983 struct lock_list *
entry;
985 struct circular_queue *cq = &lock_cq;
988 if (
match(source_entry, data)) {
989 *target_entry = source_entry;
995 head = &source_entry->class->locks_after;
997 head = &source_entry->class->locks_before;
999 if (list_empty(head))
1003 __cq_enqueue(cq, (
unsigned long)source_entry);
1005 while (!__cq_empty(cq)) {
1006 struct lock_list *lock;
1008 __cq_dequeue(cq, (
unsigned long *)&lock);
1016 head = &lock->class->locks_after;
1018 head = &lock->class->locks_before;
1021 if (!lock_accessed(entry)) {
1022 unsigned int cq_depth;
1023 mark_lock_accessed(entry, lock);
1024 if (
match(entry, data)) {
1025 *target_entry =
entry;
1030 if (__cq_enqueue(cq, (
unsigned long)entry)) {
1034 cq_depth = __cq_get_elem_count(cq);
1035 if (max_bfs_queue_depth < cq_depth)
1036 max_bfs_queue_depth = cq_depth;
1044 static inline int __bfs_forwards(
struct lock_list *src_entry,
1046 int (*
match)(
struct lock_list *entry,
void *data),
1047 struct lock_list **target_entry)
1049 return __bfs(src_entry, data,
match, target_entry, 1);
1053 static inline int __bfs_backwards(
struct lock_list *src_entry,
1055 int (*
match)(
struct lock_list *entry,
void *data),
1056 struct lock_list **target_entry)
1058 return __bfs(src_entry, data,
match, target_entry, 0);
1073 print_circular_bug_entry(
struct lock_list *
target,
int depth)
1077 printk(
"\n-> #%u", depth);
1078 print_lock_name(target->class);
1086 print_circular_lock_scenario(
struct held_lock *src,
1087 struct held_lock *tgt,
1088 struct lock_list *prt)
1090 struct lock_class *
source = hlock_class(src);
1091 struct lock_class *target = hlock_class(tgt);
1092 struct lock_class *parent = prt->class;
1107 if (parent != source) {
1108 printk(
"Chain exists of:\n ");
1109 __print_lock_name(source);
1111 __print_lock_name(parent);
1113 __print_lock_name(target);
1117 printk(
" Possible unsafe locking scenario:\n\n");
1121 __print_lock_name(target);
1124 __print_lock_name(parent);
1127 __print_lock_name(target);
1130 __print_lock_name(source);
1132 printk(
"\n *** DEADLOCK ***\n\n");
1140 print_circular_bug_header(
struct lock_list *entry,
unsigned int depth,
1141 struct held_lock *check_src,
1142 struct held_lock *check_tgt)
1150 printk(
"======================================================\n");
1151 printk(
"[ INFO: possible circular locking dependency detected ]\n");
1152 print_kernel_ident();
1153 printk(
"-------------------------------------------------------\n");
1154 printk(
"%s/%d is trying to acquire lock:\n",
1155 curr->
comm, task_pid_nr(curr));
1156 print_lock(check_src);
1157 printk(
"\nbut task is already holding lock:\n");
1158 print_lock(check_tgt);
1159 printk(
"\nwhich lock already depends on the new lock.\n\n");
1160 printk(
"\nthe existing dependency chain (in reverse order) is:\n");
1162 print_circular_bug_entry(entry, depth);
1167 static inline int class_equal(
struct lock_list *entry,
void *data)
1169 return entry->class ==
data;
1172 static noinline int print_circular_bug(
struct lock_list *
this,
1173 struct lock_list *target,
1174 struct held_lock *check_src,
1175 struct held_lock *check_tgt)
1178 struct lock_list *parent;
1179 struct lock_list *first_parent;
1185 if (!save_trace(&this->trace))
1188 depth = get_lock_depth(target);
1190 print_circular_bug_header(target, depth, check_src, check_tgt);
1192 parent = get_lock_parent(target);
1193 first_parent = parent;
1196 print_circular_bug_entry(parent, --depth);
1197 parent = get_lock_parent(parent);
1200 printk(
"\nother info that might help us debug this:\n\n");
1201 print_circular_lock_scenario(check_src, check_tgt,
1204 lockdep_print_held_locks(curr);
1206 printk(
"\nstack backtrace:\n");
1212 static noinline int print_bfs_bug(
int ret)
1214 if (!debug_locks_off_graph_unlock())
1220 WARN(1,
"lockdep bfs error:%d\n", ret);
1225 static int noop_count(
struct lock_list *entry,
void *data)
1227 (*(
unsigned long *)data)++;
1231 unsigned long __lockdep_count_forward_deps(
struct lock_list *
this)
1233 unsigned long count = 0;
1236 __bfs_forwards(
this, (
void *)&count, noop_count, &target_entry);
1240 unsigned long lockdep_count_forward_deps(
struct lock_class *
class)
1243 struct lock_list this;
1250 ret = __lockdep_count_forward_deps(&
this);
1257 unsigned long __lockdep_count_backward_deps(
struct lock_list *
this)
1259 unsigned long count = 0;
1262 __bfs_backwards(
this, (
void *)&count, noop_count, &target_entry);
1267 unsigned long lockdep_count_backward_deps(
struct lock_class *
class)
1270 struct lock_list this;
1277 ret = __lockdep_count_backward_deps(&
this);
1289 check_noncircular(
struct lock_list *root,
struct lock_class *target,
1290 struct lock_list **target_entry)
1296 result = __bfs_forwards(root, target, class_equal, target_entry);
1301 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1308 static inline int usage_match(
struct lock_list *entry,
void *bit)
1310 return entry->class->usage_mask & (1 << (
enum lock_usage_bit)bit);
1326 find_usage_forwards(
struct lock_list *root,
enum lock_usage_bit bit,
1327 struct lock_list **target_entry)
1333 result = __bfs_forwards(root, (
void *)bit, usage_match, target_entry);
1349 find_usage_backwards(
struct lock_list *root,
enum lock_usage_bit bit,
1350 struct lock_list **target_entry)
1356 result = __bfs_backwards(root, (
void *)bit, usage_match, target_entry);
1361 static void print_lock_class_header(
struct lock_class *
class,
int depth)
1365 printk(
"%*s->", depth,
"");
1366 print_lock_name(
class);
1371 if (
class->usage_mask & (1 << bit)) {
1374 len +=
printk(
"%*s %s", depth,
"", usage_str[bit]);
1379 printk(
"%*s }\n", depth,
"");
1381 printk(
"%*s ... key at: ",depth,
"");
1382 print_ip_sym((
unsigned long)
class->key);
1389 print_shortest_lock_dependencies(
struct lock_list *
leaf,
1390 struct lock_list *root)
1392 struct lock_list *entry = leaf;
1396 depth = get_lock_depth(leaf);
1399 print_lock_class_header(entry->class, depth);
1400 printk(
"%*s ... acquired at:\n", depth,
"");
1404 if (depth == 0 && (entry != root)) {
1405 printk(
"lockdep:%s bad path found in chain graph\n", __func__);
1409 entry = get_lock_parent(entry);
1411 }
while (entry && (depth >= 0));
1417 print_irq_lock_scenario(
struct lock_list *safe_entry,
1418 struct lock_list *unsafe_entry,
1419 struct lock_class *prev_class,
1420 struct lock_class *next_class)
1422 struct lock_class *safe_class = safe_entry->class;
1423 struct lock_class *unsafe_class = unsafe_entry->class;
1424 struct lock_class *middle_class = prev_class;
1426 if (middle_class == safe_class)
1427 middle_class = next_class;
1442 if (middle_class != unsafe_class) {
1443 printk(
"Chain exists of:\n ");
1444 __print_lock_name(safe_class);
1446 __print_lock_name(middle_class);
1448 __print_lock_name(unsafe_class);
1452 printk(
" Possible interrupt unsafe locking scenario:\n\n");
1456 __print_lock_name(unsafe_class);
1458 printk(
" local_irq_disable();\n");
1460 __print_lock_name(safe_class);
1463 __print_lock_name(middle_class);
1465 printk(
" <Interrupt>\n");
1467 __print_lock_name(safe_class);
1469 printk(
"\n *** DEADLOCK ***\n\n");
1473 print_bad_irq_dependency(
struct task_struct *curr,
1474 struct lock_list *prev_root,
1475 struct lock_list *next_root,
1476 struct lock_list *backwards_entry,
1477 struct lock_list *forwards_entry,
1478 struct held_lock *
prev,
1479 struct held_lock *
next,
1482 const char *irqclass)
1488 printk(
"======================================================\n");
1489 printk(
"[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
1490 irqclass, irqclass);
1491 print_kernel_ident();
1492 printk(
"------------------------------------------------------\n");
1493 printk(
"%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1494 curr->
comm, task_pid_nr(curr),
1497 curr->hardirqs_enabled,
1498 curr->softirqs_enabled);
1501 printk(
"\nand this task is already holding:\n");
1503 printk(
"which would create a new lock dependency:\n");
1504 print_lock_name(hlock_class(prev));
1506 print_lock_name(hlock_class(next));
1509 printk(
"\nbut this new dependency connects a %s-irq-safe lock:\n",
1511 print_lock_name(backwards_entry->class);
1512 printk(
"\n... which became %s-irq-safe at:\n", irqclass);
1516 printk(
"\nto a %s-irq-unsafe lock:\n", irqclass);
1517 print_lock_name(forwards_entry->class);
1518 printk(
"\n... which became %s-irq-unsafe at:\n", irqclass);
1523 printk(
"\nother info that might help us debug this:\n\n");
1524 print_irq_lock_scenario(backwards_entry, forwards_entry,
1525 hlock_class(prev), hlock_class(next));
1527 lockdep_print_held_locks(curr);
1529 printk(
"\nthe dependencies between %s-irq-safe lock", irqclass);
1530 printk(
" and the holding lock:\n");
1531 if (!save_trace(&prev_root->trace))
1533 print_shortest_lock_dependencies(backwards_entry, prev_root);
1535 printk(
"\nthe dependencies between the lock to be acquired");
1536 printk(
" and %s-irq-unsafe lock:\n", irqclass);
1537 if (!save_trace(&next_root->trace))
1539 print_shortest_lock_dependencies(forwards_entry, next_root);
1541 printk(
"\nstack backtrace:\n");
1548 check_usage(
struct task_struct *curr,
struct held_lock *prev,
1553 struct lock_list this, that;
1559 this.
class = hlock_class(prev);
1560 ret = find_usage_backwards(&
this, bit_backwards, &target_entry);
1562 return print_bfs_bug(ret);
1567 that.class = hlock_class(next);
1568 ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1570 return print_bfs_bug(ret);
1574 return print_bad_irq_dependency(curr, &
this, &that,
1575 target_entry, target_entry1,
1577 bit_backwards, bit_forwards, irqclass);
1580 static const char *state_names[] = {
1581 #define LOCKDEP_STATE(__STATE) \
1582 __stringify(__STATE),
1584 #undef LOCKDEP_STATE
1587 static const char *state_rnames[] = {
1588 #define LOCKDEP_STATE(__STATE) \
1589 __stringify(__STATE)"-READ",
1591 #undef LOCKDEP_STATE
1596 return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
1599 static int exclusive_bit(
int new_bit)
1612 int state = new_bit & ~3;
1613 int dir = new_bit & 2;
1618 return state | (dir ^ 2);
1621 static int check_irq_usage(
struct task_struct *curr,
struct held_lock *prev,
1630 if (!check_usage(curr, prev, next, bit,
1631 exclusive_bit(bit), state_name(bit)))
1642 if (!check_usage(curr, prev, next, bit,
1643 exclusive_bit(bit), state_name(bit)))
1650 check_prev_add_irq(
struct task_struct *curr,
struct held_lock *prev,
1651 struct held_lock *next)
1653 #define LOCKDEP_STATE(__STATE) \
1654 if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
1657 #undef LOCKDEP_STATE
1662 static void inc_chains(
void)
1665 nr_hardirq_chains++;
1668 nr_softirq_chains++;
1670 nr_process_chains++;
1677 check_prev_add_irq(
struct task_struct *curr,
struct held_lock *prev,
1678 struct held_lock *next)
1683 static inline void inc_chains(
void)
1685 nr_process_chains++;
1691 print_deadlock_scenario(
struct held_lock *nxt,
1692 struct held_lock *prv)
1694 struct lock_class *next = hlock_class(nxt);
1695 struct lock_class *prev = hlock_class(prv);
1697 printk(
" Possible unsafe locking scenario:\n\n");
1701 __print_lock_name(prev);
1704 __print_lock_name(next);
1706 printk(
"\n *** DEADLOCK ***\n\n");
1707 printk(
" May be due to missing lock nesting notation\n\n");
1711 print_deadlock_bug(
struct task_struct *curr,
struct held_lock *prev,
1712 struct held_lock *next)
1718 printk(
"=============================================\n");
1719 printk(
"[ INFO: possible recursive locking detected ]\n");
1720 print_kernel_ident();
1721 printk(
"---------------------------------------------\n");
1722 printk(
"%s/%d is trying to acquire lock:\n",
1723 curr->
comm, task_pid_nr(curr));
1725 printk(
"\nbut task is already holding lock:\n");
1728 printk(
"\nother info that might help us debug this:\n");
1729 print_deadlock_scenario(next, prev);
1730 lockdep_print_held_locks(curr);
1732 printk(
"\nstack backtrace:\n");
1747 check_deadlock(
struct task_struct *curr,
struct held_lock *next,
1748 struct lockdep_map *next_instance,
int read)
1750 struct held_lock *
prev;
1751 struct held_lock *nest =
NULL;
1754 for (i = 0; i < curr->lockdep_depth; i++) {
1755 prev = curr->held_locks +
i;
1757 if (prev->instance == next->nest_lock)
1760 if (hlock_class(prev) != hlock_class(next))
1767 if ((read == 2) && prev->read)
1777 return print_deadlock_bug(curr, prev, next);
1805 check_prev_add(
struct task_struct *curr,
struct held_lock *prev,
1806 struct held_lock *next,
int distance,
int trylock_loop)
1808 struct lock_list *
entry;
1810 struct lock_list this;
1819 static struct stack_trace trace;
1830 this.
class = hlock_class(next);
1832 ret = check_noncircular(&
this, hlock_class(prev), &target_entry);
1834 return print_circular_bug(&
this, target_entry, next, prev);
1836 return print_bfs_bug(ret);
1838 if (!check_prev_add_irq(curr, prev, next))
1849 if (next->read == 2 || prev->read == 2)
1860 if (entry->class == hlock_class(next)) {
1862 entry->distance = 1;
1867 if (!trylock_loop && !save_trace(&trace))
1874 ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
1875 &hlock_class(prev)->locks_after,
1876 next->acquire_ip, distance, &trace);
1881 ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
1882 &hlock_class(next)->locks_before,
1883 next->acquire_ip, distance, &trace);
1892 printk(
"\n new dependency: ");
1893 print_lock_name(hlock_class(prev));
1895 print_lock_name(hlock_class(next));
1898 return graph_lock();
1910 check_prevs_add(
struct task_struct *curr,
struct held_lock *next)
1912 int depth = curr->lockdep_depth;
1913 int trylock_loop = 0;
1914 struct held_lock *hlock;
1927 if (curr->held_locks[depth].irq_context !=
1928 curr->held_locks[depth-1].irq_context)
1932 int distance = curr->lockdep_depth - depth + 1;
1933 hlock = curr->held_locks + depth-1;
1938 if (hlock->read != 2) {
1939 if (!check_prev_add(curr, hlock, next,
1940 distance, trylock_loop))
1948 if (!hlock->trylock)
1960 if (curr->held_locks[depth].irq_context !=
1961 curr->held_locks[depth-1].irq_context)
1967 if (!debug_locks_off_graph_unlock())
1987 return lock_classes + chain_hlocks[chain->base +
i];
1996 static inline int lookup_chain_cache(
struct task_struct *curr,
1997 struct held_lock *hlock,
2000 struct lock_class *
class = hlock_class(hlock);
2002 struct lock_chain *
chain;
2003 struct held_lock *hlock_curr, *hlock_next;
2018 if (
chain->chain_key == chain_key) {
2021 if (very_verbose(
class))
2022 printk(
"\nhash chain already cached, key: "
2023 "%016Lx tail class: [%p] %s\n",
2024 (
unsigned long long)chain_key,
2029 if (very_verbose(
class))
2030 printk(
"\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
2042 if (
chain->chain_key == chain_key) {
2048 if (!debug_locks_off_graph_unlock())
2051 printk(
"BUG: MAX_LOCKDEP_CHAINS too low!\n");
2052 printk(
"turning off the locking correctness validator.\n");
2057 chain->chain_key = chain_key;
2058 chain->irq_context = hlock->irq_context;
2061 for (i = curr->lockdep_depth - 1; i >= 0; i--) {
2062 hlock_curr = curr->held_locks +
i;
2063 if (hlock_curr->irq_context != hlock_next->irq_context)
2068 chain->depth = curr->lockdep_depth + 1 -
i;
2070 chain->base = nr_chain_hlocks;
2071 nr_chain_hlocks +=
chain->depth;
2072 for (
j = 0;
j <
chain->depth - 1;
j++, i++) {
2073 int lock_id = curr->held_locks[
i].class_idx - 1;
2074 chain_hlocks[
chain->base +
j] = lock_id;
2076 chain_hlocks[
chain->base +
j] =
class - lock_classes;
2078 list_add_tail_rcu(&
chain->entry, hash_head);
2085 static int validate_chain(
struct task_struct *curr,
struct lockdep_map *lock,
2086 struct held_lock *hlock,
int chain_head,
u64 chain_key)
2098 if (!hlock->trylock && (hlock->check == 2) &&
2099 lookup_chain_cache(curr, hlock, chain_key)) {
2112 int ret = check_deadlock(curr, hlock, lock, hlock->read);
2127 if (!chain_head && ret != 2)
2128 if (!check_prevs_add(curr, hlock))
2139 static inline int validate_chain(
struct task_struct *curr,
2140 struct lockdep_map *lock,
struct held_lock *hlock,
2141 int chain_head,
u64 chain_key)
2151 static void check_chain_key(
struct task_struct *curr)
2153 #ifdef CONFIG_DEBUG_LOCKDEP
2154 struct held_lock *hlock, *prev_hlock =
NULL;
2158 for (i = 0; i < curr->lockdep_depth; i++) {
2159 hlock = curr->held_locks +
i;
2160 if (chain_key != hlock->prev_chain_key) {
2166 WARN(1,
"hm#1, depth: %u [%u], %016Lx != %016Lx\n",
2167 curr->lockdep_depth, i,
2168 (
unsigned long long)chain_key,
2169 (
unsigned long long)hlock->prev_chain_key);
2172 id = hlock->class_idx - 1;
2179 if (prev_hlock && (prev_hlock->irq_context !=
2180 hlock->irq_context))
2185 if (chain_key != curr->curr_chain_key) {
2191 WARN(1,
"hm#2, depth: %u [%u], %016Lx != %016Lx\n",
2192 curr->lockdep_depth, i,
2193 (
unsigned long long)chain_key,
2194 (
unsigned long long)curr->curr_chain_key);
2200 print_usage_bug_scenario(
struct held_lock *lock)
2202 struct lock_class *
class = hlock_class(lock);
2204 printk(
" Possible unsafe locking scenario:\n\n");
2208 __print_lock_name(
class);
2210 printk(
" <Interrupt>\n");
2212 __print_lock_name(
class);
2214 printk(
"\n *** DEADLOCK ***\n\n");
2218 print_usage_bug(
struct task_struct *curr,
struct held_lock *
this,
2225 printk(
"=================================\n");
2226 printk(
"[ INFO: inconsistent lock state ]\n");
2227 print_kernel_ident();
2228 printk(
"---------------------------------\n");
2230 printk(
"inconsistent {%s} -> {%s} usage.\n",
2231 usage_str[prev_bit], usage_str[new_bit]);
2233 printk(
"%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
2234 curr->
comm, task_pid_nr(curr),
2241 printk(
"{%s} state was registered at:\n", usage_str[prev_bit]);
2244 print_irqtrace_events(curr);
2245 printk(
"\nother info that might help us debug this:\n");
2246 print_usage_bug_scenario(
this);
2248 lockdep_print_held_locks(curr);
2250 printk(
"\nstack backtrace:\n");
2263 if (
unlikely(hlock_class(
this)->usage_mask & (1 << bad_bit)))
2264 return print_usage_bug(curr,
this, bad_bit, new_bit);
2268 static int mark_lock(
struct task_struct *curr,
struct held_lock *
this,
2271 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2278 struct lock_list *root,
struct lock_list *
other,
2279 struct held_lock *
this,
int forwards,
2280 const char *irqclass)
2282 struct lock_list *entry =
other;
2283 struct lock_list *middle =
NULL;
2290 printk(
"=========================================================\n");
2291 printk(
"[ INFO: possible irq lock inversion dependency detected ]\n");
2292 print_kernel_ident();
2293 printk(
"---------------------------------------------------------\n");
2294 printk(
"%s/%d just changed the state of lock:\n",
2295 curr->
comm, task_pid_nr(curr));
2298 printk(
"but this lock took another, %s-unsafe lock in the past:\n", irqclass);
2300 printk(
"but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
2301 print_lock_name(other->class);
2302 printk(
"\n\nand interrupts could create inverse lock ordering between them.\n\n");
2304 printk(
"\nother info that might help us debug this:\n");
2307 depth = get_lock_depth(other);
2309 if (depth == 0 && (entry != root)) {
2310 printk(
"lockdep:%s bad path found in chain graph\n", __func__);
2314 entry = get_lock_parent(entry);
2316 }
while (entry && entry != root && (depth >= 0));
2318 print_irq_lock_scenario(root, other,
2319 middle ? middle->class : root->class, other->class);
2321 print_irq_lock_scenario(other, root,
2322 middle ? middle->class : other->class, root->class);
2324 lockdep_print_held_locks(curr);
2326 printk(
"\nthe shortest dependencies between 2nd lock and 1st lock:\n");
2327 if (!save_trace(&root->trace))
2329 print_shortest_lock_dependencies(other, root);
2331 printk(
"\nstack backtrace:\n");
2342 check_usage_forwards(
struct task_struct *curr,
struct held_lock *
this,
2346 struct lock_list root;
2350 root.class = hlock_class(
this);
2351 ret = find_usage_forwards(&root, bit, &target_entry);
2353 return print_bfs_bug(ret);
2357 return print_irq_inversion_bug(curr, &root, target_entry,
2366 check_usage_backwards(
struct task_struct *curr,
struct held_lock *
this,
2370 struct lock_list root;
2374 root.class = hlock_class(
this);
2375 ret = find_usage_backwards(&root, bit, &target_entry);
2377 return print_bfs_bug(ret);
2381 return print_irq_inversion_bug(curr, &root, target_entry,
2385 void print_irqtrace_events(
struct task_struct *curr)
2387 printk(
"irq event stamp: %u\n", curr->irq_events);
2388 printk(
"hardirqs last enabled at (%u): ", curr->hardirq_enable_event);
2389 print_ip_sym(curr->hardirq_enable_ip);
2390 printk(
"hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
2391 print_ip_sym(curr->hardirq_disable_ip);
2392 printk(
"softirqs last enabled at (%u): ", curr->softirq_enable_event);
2393 print_ip_sym(curr->softirq_enable_ip);
2394 printk(
"softirqs last disabled at (%u): ", curr->softirq_disable_event);
2395 print_ip_sym(curr->softirq_disable_ip);
2398 static int HARDIRQ_verbose(
struct lock_class *
class)
2401 return class_filter(
class);
2406 static int SOFTIRQ_verbose(
struct lock_class *
class)
2409 return class_filter(
class);
2414 static int RECLAIM_FS_verbose(
struct lock_class *
class)
2417 return class_filter(
class);
2422 #define STRICT_READ_CHECKS 1
2424 static int (*state_verbose_f[])(
struct lock_class *
class) = {
2425 #define LOCKDEP_STATE(__STATE) \
2428 #undef LOCKDEP_STATE
2432 struct lock_class *
class)
2434 return state_verbose_f[bit >> 2](
class);
2437 typedef int (*check_usage_f)(
struct task_struct *,
struct held_lock *,
2441 mark_lock_irq(
struct task_struct *curr,
struct held_lock *
this,
2444 int excl_bit = exclusive_bit(new_bit);
2445 int read = new_bit & 1;
2446 int dir = new_bit & 2;
2455 check_usage_f
usage = dir ?
2456 check_usage_backwards : check_usage_forwards;
2469 if ((!read || !dir || STRICT_READ_CHECKS) &&
2470 !
usage(curr,
this, excl_bit, state_name(new_bit & ~1)))
2477 if (!
valid_state(curr,
this, new_bit, excl_bit + 1))
2480 if (STRICT_READ_CHECKS &&
2481 !
usage(curr,
this, excl_bit + 1,
2482 state_name(new_bit + 1)))
2486 if (state_verbose(new_bit, hlock_class(
this)))
2493 #define LOCKDEP_STATE(__STATE) __STATE,
2495 #undef LOCKDEP_STATE
2505 struct held_lock *hlock;
2508 for (i = 0; i < curr->lockdep_depth; i++) {
2509 hlock = curr->held_locks +
i;
2511 usage_bit = 2 + (mark << 2);
2515 BUG_ON(usage_bit >= LOCK_USAGE_STATES);
2520 if (!mark_lock(curr, hlock, usage_bit))
2530 static void __trace_hardirqs_on_caller(
unsigned long ip)
2535 curr->hardirqs_enabled = 1;
2541 if (!mark_held_locks(curr, HARDIRQ))
2548 if (curr->softirqs_enabled)
2549 if (!mark_held_locks(curr, SOFTIRQ))
2552 curr->hardirq_enable_ip =
ip;
2553 curr->hardirq_enable_event = ++curr->irq_events;
2557 void trace_hardirqs_on_caller(
unsigned long ip)
2595 current->lockdep_recursion = 1;
2596 __trace_hardirqs_on_caller(ip);
2597 current->lockdep_recursion = 0;
2610 void trace_hardirqs_off_caller(
unsigned long ip)
2626 if (curr->hardirqs_enabled) {
2630 curr->hardirqs_enabled = 0;
2631 curr->hardirq_disable_ip =
ip;
2632 curr->hardirq_disable_event = ++curr->irq_events;
2662 if (curr->softirqs_enabled) {
2667 current->lockdep_recursion = 1;
2671 curr->softirqs_enabled = 1;
2672 curr->softirq_enable_ip =
ip;
2673 curr->softirq_enable_event = ++curr->irq_events;
2680 if (curr->hardirqs_enabled)
2681 mark_held_locks(curr, SOFTIRQ);
2682 current->lockdep_recursion = 0;
2701 if (curr->softirqs_enabled) {
2705 curr->softirqs_enabled = 0;
2706 curr->softirq_disable_ip =
ip;
2707 curr->softirq_disable_event = ++curr->irq_events;
2717 static void __lockdep_trace_alloc(
gfp_t gfp_mask,
unsigned long flags)
2742 mark_held_locks(curr, RECLAIM_FS);
2745 static void check_flags(
unsigned long flags);
2749 unsigned long flags;
2756 current->lockdep_recursion = 1;
2757 __lockdep_trace_alloc(gfp_mask, flags);
2758 current->lockdep_recursion = 0;
2762 static int mark_irqflags(
struct task_struct *curr,
struct held_lock *hlock)
2768 if (!hlock->trylock) {
2770 if (curr->hardirq_context)
2771 if (!mark_lock(curr, hlock,
2772 LOCK_USED_IN_HARDIRQ_READ))
2774 if (curr->softirq_context)
2775 if (!mark_lock(curr, hlock,
2776 LOCK_USED_IN_SOFTIRQ_READ))
2779 if (curr->hardirq_context)
2780 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2782 if (curr->softirq_context)
2783 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2787 if (!hlock->hardirqs_off) {
2789 if (!mark_lock(curr, hlock,
2790 LOCK_ENABLED_HARDIRQ_READ))
2792 if (curr->softirqs_enabled)
2793 if (!mark_lock(curr, hlock,
2794 LOCK_ENABLED_SOFTIRQ_READ))
2797 if (!mark_lock(curr, hlock,
2798 LOCK_ENABLED_HARDIRQ))
2800 if (curr->softirqs_enabled)
2801 if (!mark_lock(curr, hlock,
2802 LOCK_ENABLED_SOFTIRQ))
2813 if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
2815 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
2818 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
2826 static int separate_irq_context(
struct task_struct *curr,
2827 struct held_lock *hlock)
2829 unsigned int depth = curr->lockdep_depth;
2834 hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2835 curr->softirq_context;
2837 struct held_lock *prev_hlock;
2839 prev_hlock = curr->held_locks + depth-1;
2845 if (prev_hlock->irq_context != hlock->irq_context)
2854 int mark_lock_irq(
struct task_struct *curr,
struct held_lock *
this,
2861 static inline int mark_irqflags(
struct task_struct *curr,
2862 struct held_lock *hlock)
2867 static inline int separate_irq_context(
struct task_struct *curr,
2868 struct held_lock *hlock)
2882 static int mark_lock(
struct task_struct *curr,
struct held_lock *
this,
2885 unsigned int new_mask = 1 << new_bit, ret = 1;
2891 if (
likely(hlock_class(
this)->usage_mask & new_mask))
2899 if (
unlikely(hlock_class(
this)->usage_mask & new_mask)) {
2904 hlock_class(
this)->usage_mask |= new_mask;
2906 if (!save_trace(hlock_class(
this)->usage_traces + new_bit))
2910 #define LOCKDEP_STATE(__STATE) \
2911 case LOCK_USED_IN_##__STATE: \
2912 case LOCK_USED_IN_##__STATE##_READ: \
2913 case LOCK_ENABLED_##__STATE: \
2914 case LOCK_ENABLED_##__STATE##_READ:
2916 #undef LOCKDEP_STATE
2917 ret = mark_lock_irq(curr,
this, new_bit);
2925 if (!debug_locks_off_graph_unlock())
2937 printk(
"\nmarked lock as {%s}:\n", usage_str[new_bit]);
2939 print_irqtrace_events(curr);
2956 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
2957 lock->class_cache[i] =
NULL;
2959 #ifdef CONFIG_LOCK_STAT
2967 lock->name =
"NULL";
2981 if (!static_obj(key)) {
2982 printk(
"BUG: key %p not in .data!\n", key);
2995 register_lock_class(lock, subclass, 1);
3002 print_lock_nested_lock_not_held(
struct task_struct *curr,
3003 struct held_lock *hlock,
3012 printk(
"==================================\n");
3013 printk(
"[ BUG: Nested lock was not taken ]\n");
3014 print_kernel_ident();
3015 printk(
"----------------------------------\n");
3017 printk(
"%s/%d is trying to lock:\n", curr->
comm, task_pid_nr(curr));
3020 printk(
"\nbut this task is not holding:\n");
3021 printk(
"%s\n", hlock->nest_lock->name);
3023 printk(
"\nstack backtrace:\n");
3026 printk(
"\nother info that might help us debug this:\n");
3027 lockdep_print_held_locks(curr);
3029 printk(
"\nstack backtrace:\n");
3035 static int __lock_is_held(
struct lockdep_map *lock);
3041 static int __lock_acquire(
struct lockdep_map *lock,
unsigned int subclass,
3042 int trylock,
int read,
int check,
int hardirqs_off,
3043 struct lockdep_map *nest_lock,
unsigned long ip,
3047 struct lock_class *
class =
NULL;
3048 struct held_lock *hlock;
3071 if (subclass < NR_LOCKDEP_CACHING_CLASSES)
3072 class = lock->class_cache[subclass];
3077 class = register_lock_class(lock, subclass, 0);
3082 if (very_verbose(
class)) {
3084 if (
class->name_version > 1)
3095 depth = curr->lockdep_depth;
3102 class_idx =
class - lock_classes + 1;
3105 hlock = curr->held_locks + depth - 1;
3106 if (hlock->class_idx == class_idx && nest_lock) {
3107 if (hlock->references)
3108 hlock->references++;
3110 hlock->references = 2;
3116 hlock = curr->held_locks +
depth;
3123 hlock->class_idx = class_idx;
3124 hlock->acquire_ip =
ip;
3125 hlock->instance = lock;
3126 hlock->nest_lock = nest_lock;
3127 hlock->trylock = trylock;
3129 hlock->check =
check;
3130 hlock->hardirqs_off = !!hardirqs_off;
3131 hlock->references = references;
3132 #ifdef CONFIG_LOCK_STAT
3133 hlock->waittime_stamp = 0;
3134 hlock->holdtime_stamp = lockstat_clock();
3137 if (check == 2 && !mark_irqflags(curr, hlock))
3154 id =
class - lock_classes;
3161 chain_key = curr->curr_chain_key;
3171 hlock->prev_chain_key = chain_key;
3172 if (separate_irq_context(curr, hlock)) {
3178 if (nest_lock && !__lock_is_held(nest_lock))
3179 return print_lock_nested_lock_not_held(curr, hlock, ip);
3181 if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
3184 curr->curr_chain_key = chain_key;
3185 curr->lockdep_depth++;
3186 check_chain_key(curr);
3187 #ifdef CONFIG_DEBUG_LOCKDEP
3193 printk(
"BUG: MAX_LOCK_DEPTH too low!\n");
3194 printk(
"turning off the locking correctness validator.\n");
3199 if (
unlikely(curr->lockdep_depth > max_lockdep_depth))
3200 max_lockdep_depth = curr->lockdep_depth;
3206 print_unlock_inbalance_bug(
struct task_struct *curr,
struct lockdep_map *lock,
3215 printk(
"=====================================\n");
3216 printk(
"[ BUG: bad unlock balance detected! ]\n");
3217 print_kernel_ident();
3218 printk(
"-------------------------------------\n");
3219 printk(
"%s/%d is trying to release lock (",
3220 curr->
comm, task_pid_nr(curr));
3221 print_lockdep_cache(lock);
3224 printk(
"but there are no more locks to release!\n");
3225 printk(
"\nother info that might help us debug this:\n");
3226 lockdep_print_held_locks(curr);
3228 printk(
"\nstack backtrace:\n");
3237 static int check_unlock(
struct task_struct *curr,
struct lockdep_map *lock,
3248 if (curr->lockdep_depth <= 0)
3249 return print_unlock_inbalance_bug(curr, lock, ip);
3254 static int match_held_lock(
struct held_lock *hlock,
struct lockdep_map *lock)
3256 if (hlock->instance == lock)
3259 if (hlock->references) {
3260 struct lock_class *
class = lock->class_cache[0];
3263 class = look_up_lock_class(lock, 0);
3282 if (hlock->class_idx ==
class - lock_classes + 1)
3290 __lock_set_class(
struct lockdep_map *lock,
const char *name,
3295 struct held_lock *hlock, *prev_hlock;
3296 struct lock_class *
class;
3300 depth = curr->lockdep_depth;
3309 for (i = depth-1; i >= 0; i--) {
3310 hlock = curr->held_locks +
i;
3314 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3316 if (match_held_lock(hlock, lock))
3320 return print_unlock_inbalance_bug(curr, lock, ip);
3324 class = register_lock_class(lock, subclass, 0);
3325 hlock->class_idx =
class - lock_classes + 1;
3327 curr->lockdep_depth =
i;
3328 curr->curr_chain_key = hlock->prev_chain_key;
3330 for (; i <
depth; i++) {
3331 hlock = curr->held_locks +
i;
3332 if (!__lock_acquire(hlock->instance,
3333 hlock_class(hlock)->subclass, hlock->trylock,
3334 hlock->read, hlock->check, hlock->hardirqs_off,
3335 hlock->nest_lock, hlock->acquire_ip,
3357 struct lockdep_map *lock,
unsigned long ip)
3359 struct held_lock *hlock, *prev_hlock;
3367 depth = curr->lockdep_depth;
3376 for (i = depth-1; i >= 0; i--) {
3377 hlock = curr->held_locks +
i;
3381 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3383 if (match_held_lock(hlock, lock))
3387 return print_unlock_inbalance_bug(curr, lock, ip);
3390 if (hlock->instance == lock)
3391 lock_release_holdtime(hlock);
3393 if (hlock->references) {
3394 hlock->references--;
3395 if (hlock->references) {
3411 curr->lockdep_depth =
i;
3412 curr->curr_chain_key = hlock->prev_chain_key;
3414 for (i++; i <
depth; i++) {
3415 hlock = curr->held_locks +
i;
3416 if (!__lock_acquire(hlock->instance,
3417 hlock_class(hlock)->subclass, hlock->trylock,
3418 hlock->read, hlock->check, hlock->hardirqs_off,
3419 hlock->nest_lock, hlock->acquire_ip,
3439 static int lock_release_nested(
struct task_struct *curr,
3440 struct lockdep_map *lock,
unsigned long ip)
3442 struct held_lock *hlock;
3448 depth = curr->lockdep_depth - 1;
3449 hlock = curr->held_locks +
depth;
3454 if (hlock->instance != lock || hlock->references)
3455 return lock_release_non_nested(curr, lock, ip);
3456 curr->lockdep_depth--;
3464 curr->curr_chain_key = hlock->prev_chain_key;
3466 lock_release_holdtime(hlock);
3468 #ifdef CONFIG_DEBUG_LOCKDEP
3469 hlock->prev_chain_key = 0;
3470 hlock->class_idx = 0;
3471 hlock->acquire_ip = 0;
3472 hlock->irq_context = 0;
3484 __lock_release(
struct lockdep_map *lock,
int nested,
unsigned long ip)
3488 if (!check_unlock(curr, lock, ip))
3492 if (!lock_release_nested(curr, lock, ip))
3495 if (!lock_release_non_nested(curr, lock, ip))
3499 check_chain_key(curr);
3502 static int __lock_is_held(
struct lockdep_map *lock)
3507 for (i = 0; i < curr->lockdep_depth; i++) {
3508 struct held_lock *hlock = curr->held_locks +
i;
3510 if (match_held_lock(hlock, lock))
3520 static void check_flags(
unsigned long flags)
3522 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
3523 defined(CONFIG_TRACE_IRQFLAGS)
3529 printk(
"possible reason: unannotated irqs-off.\n");
3533 printk(
"possible reason: unannotated irqs-on.\n");
3553 print_irqtrace_events(
current);
3561 unsigned long flags;
3567 current->lockdep_recursion = 1;
3569 if (__lock_set_class(lock, name, key, subclass, ip))
3571 current->lockdep_recursion = 0;
3581 int trylock,
int read,
int check,
3582 struct lockdep_map *nest_lock,
unsigned long ip)
3584 unsigned long flags;
3592 current->lockdep_recursion = 1;
3593 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3594 __lock_acquire(lock, subclass, trylock, read, check,
3596 current->lockdep_recursion = 0;
3604 unsigned long flags;
3611 current->lockdep_recursion = 1;
3612 trace_lock_release(lock, ip);
3613 __lock_release(lock, nested, ip);
3614 current->lockdep_recursion = 0;
3621 unsigned long flags;
3630 current->lockdep_recursion = 1;
3631 ret = __lock_is_held(lock);
3632 current->lockdep_recursion = 0;
3646 current->lockdep_reclaim_gfp = 0;
3649 #ifdef CONFIG_LOCK_STAT
3651 print_lock_contention_bug(
struct task_struct *curr,
struct lockdep_map *lock,
3660 printk(
"=================================\n");
3661 printk(
"[ BUG: bad contention detected! ]\n");
3662 print_kernel_ident();
3663 printk(
"---------------------------------\n");
3664 printk(
"%s/%d is trying to contend lock (",
3665 curr->
comm, task_pid_nr(curr));
3666 print_lockdep_cache(lock);
3669 printk(
"but there are no locks held!\n");
3670 printk(
"\nother info that might help us debug this:\n");
3671 lockdep_print_held_locks(curr);
3673 printk(
"\nstack backtrace:\n");
3680 __lock_contended(
struct lockdep_map *lock,
unsigned long ip)
3683 struct held_lock *hlock, *prev_hlock;
3684 struct lock_class_stats *
stats;
3686 int i, contention_point, contending_point;
3688 depth = curr->lockdep_depth;
3697 for (i = depth-1; i >= 0; i--) {
3698 hlock = curr->held_locks +
i;
3702 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3704 if (match_held_lock(hlock, lock))
3708 print_lock_contention_bug(curr, lock, ip);
3712 if (hlock->instance != lock)
3715 hlock->waittime_stamp = lockstat_clock();
3717 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3718 contending_point = lock_point(hlock_class(hlock)->contending_point,
3721 stats = get_lock_stats(hlock_class(hlock));
3722 if (contention_point < LOCKSTAT_POINTS)
3723 stats->contention_point[contention_point]++;
3724 if (contending_point < LOCKSTAT_POINTS)
3725 stats->contending_point[contending_point]++;
3727 stats->bounces[bounce_contended + !!hlock->read]++;
3728 put_lock_stats(stats);
3732 __lock_acquired(
struct lockdep_map *lock,
unsigned long ip)
3735 struct held_lock *hlock, *prev_hlock;
3736 struct lock_class_stats *
stats;
3738 u64 now, waittime = 0;
3741 depth = curr->lockdep_depth;
3750 for (i = depth-1; i >= 0; i--) {
3751 hlock = curr->held_locks +
i;
3755 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3757 if (match_held_lock(hlock, lock))
3761 print_lock_contention_bug(curr, lock,
_RET_IP_);
3765 if (hlock->instance != lock)
3769 if (hlock->waittime_stamp) {
3770 now = lockstat_clock();
3771 waittime = now - hlock->waittime_stamp;
3772 hlock->holdtime_stamp = now;
3775 trace_lock_acquired(lock, ip);
3777 stats = get_lock_stats(hlock_class(hlock));
3780 lock_time_inc(&stats->read_waittime, waittime);
3782 lock_time_inc(&stats->write_waittime, waittime);
3784 if (lock->cpu != cpu)
3785 stats->bounces[bounce_acquired + !!hlock->read]++;
3786 put_lock_stats(stats);
3794 unsigned long flags;
3804 current->lockdep_recursion = 1;
3805 trace_lock_contended(lock, ip);
3806 __lock_contended(lock, ip);
3807 current->lockdep_recursion = 0;
3812 void lock_acquired(
struct lockdep_map *lock,
unsigned long ip)
3814 unsigned long flags;
3824 current->lockdep_recursion = 1;
3825 __lock_acquired(lock, ip);
3826 current->lockdep_recursion = 0;
3839 unsigned long flags;
3845 current->lockdep_recursion = 0;
3847 nr_hardirq_chains = 0;
3848 nr_softirq_chains = 0;
3849 nr_process_chains = 0;
3852 INIT_LIST_HEAD(chainhash_table + i);
3856 static void zap_class(
struct lock_class *
class)
3865 if (list_entries[i].
class ==
class)
3866 list_del_rcu(&list_entries[i].entry);
3871 list_del_rcu(&
class->hash_entry);
3872 list_del_rcu(&
class->lock_entry);
3877 static inline int within(
const void *addr,
void *start,
unsigned long size)
3879 return addr >= start && addr < start +
size;
3884 struct lock_class *
class, *
next;
3886 unsigned long flags;
3891 locked = graph_lock();
3897 head = classhash_table +
i;
3898 if (list_empty(head))
3901 if (within(
class->key, start, size))
3903 else if (within(
class->
name, start, size))
3915 struct lock_class *
class, *
next;
3917 unsigned long flags;
3926 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
3930 class = look_up_lock_class(lock, j);
3938 locked = graph_lock();
3940 head = classhash_table +
i;
3941 if (list_empty(head))
3946 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
3947 match |=
class == lock->class_cache[j];
3950 if (debug_locks_off_graph_unlock()) {
3977 if (lockdep_initialized)
3981 INIT_LIST_HEAD(classhash_table + i);
3984 INIT_LIST_HEAD(chainhash_table + i);
3986 lockdep_initialized = 1;
3991 printk(
"Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
3993 printk(
"... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
3995 printk(
"... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
4001 printk(
" memory used by lock dependency info: %lu kB\n",
4002 (
sizeof(
struct lock_class) * MAX_LOCKDEP_KEYS +
4007 #ifdef CONFIG_PROVE_LOCKING
4008 +
sizeof(
struct circular_queue)
4013 printk(
" per task-struct memory footprint: %lu bytes\n",
4016 #ifdef CONFIG_DEBUG_LOCKDEP
4017 if (lockdep_init_error) {
4018 printk(
"WARNING: lockdep init error! lock-%s was acquired"
4019 "before lockdep_init\n", lock_init_error);
4020 printk(
"Call stack leading to lockdep invocation was:\n");
4027 print_freed_lock_bug(
struct task_struct *curr,
const void *mem_from,
4028 const void *mem_to,
struct held_lock *hlock)
4036 printk(
"=========================\n");
4037 printk(
"[ BUG: held lock freed! ]\n");
4038 print_kernel_ident();
4039 printk(
"-------------------------\n");
4040 printk(
"%s/%d is freeing memory %p-%p, with a lock still held there!\n",
4041 curr->
comm, task_pid_nr(curr), mem_from, mem_to-1);
4043 lockdep_print_held_locks(curr);
4045 printk(
"\nstack backtrace:\n");
4049 static inline int not_in_range(
const void* mem_from,
unsigned long mem_len,
4050 const void* lock_from,
unsigned long lock_len)
4052 return lock_from + lock_len <= mem_from ||
4053 mem_from + mem_len <= lock_from;
4064 struct held_lock *hlock;
4065 unsigned long flags;
4072 for (i = 0; i < curr->lockdep_depth; i++) {
4073 hlock = curr->held_locks +
i;
4075 if (not_in_range(mem_from, mem_len, hlock->instance,
4076 sizeof(*hlock->instance)))
4079 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
4086 static void print_held_locks_bug(
struct task_struct *curr)
4094 printk(
"=====================================\n");
4095 printk(
"[ BUG: lock held at task exit time! ]\n");
4096 print_kernel_ident();
4097 printk(
"-------------------------------------\n");
4098 printk(
"%s/%d is exiting with locks still held!\n",
4099 curr->
comm, task_pid_nr(curr));
4100 lockdep_print_held_locks(curr);
4102 printk(
"\nstack backtrace:\n");
4108 if (
unlikely(task->lockdep_depth > 0))
4109 print_held_locks_bug(task);
4119 printk(
"INFO: lockdep is turned off.\n");
4122 printk(
"\nShowing all locks held in the system:\n");
4133 printk(
"hm, tasklist_lock locked, retrying... ");
4136 printk(
" #%d", 10-count);
4140 printk(
" ignoring it.\n");
4155 if (p->lockdep_depth)
4156 lockdep_print_held_locks(p);
4163 printk(
"=============================================\n\n");
4177 printk(
"INFO: lockdep is turned off.\n");
4180 lockdep_print_held_locks(task);
4188 if (
unlikely(curr->lockdep_depth)) {
4192 printk(
"================================================\n");
4193 printk(
"[ BUG: lock held when returning to user space! ]\n");
4194 print_kernel_ident();
4195 printk(
"------------------------------------------------\n");
4196 printk(
"%s/%d is leaving the kernel with locks still held!\n",
4198 lockdep_print_held_locks(curr);
4206 #ifndef CONFIG_PROVE_RCU_REPEATEDLY
4212 printk(
"===============================\n");
4213 printk(
"[ INFO: suspicious RCU usage. ]\n");
4214 print_kernel_ident();
4215 printk(
"-------------------------------\n");
4216 printk(
"%s:%d %s!\n", file, line, s);
4217 printk(
"\nother info that might help us debug this:\n\n");
4218 printk(
"\n%srcu_scheduler_active = %d, debug_locks = %d\n",
4219 !rcu_lockdep_current_cpu_online()
4220 ?
"RCU used illegally from offline CPU!\n"
4222 ?
"RCU used illegally from idle CPU!\n"
4245 printk(
"RCU used illegally from extended quiescent state!\n");
4247 lockdep_print_held_locks(curr);
4248 printk(
"\nstack backtrace:\n");