2 * kernel/lockdep_proc.c
4 * Runtime locking correctness validator
6 * Started by Ingo Molnar:
8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
11 * Code for /proc/lockdep and /proc/lockdep_stats:
14 #include <linux/module.h>
15 #include <linux/proc_fs.h>
16 #include <linux/seq_file.h>
17 #include <linux/kallsyms.h>
18 #include <linux/debug_locks.h>
19 #include <linux/vmalloc.h>
20 #include <linux/sort.h>
21 #include <asm/uaccess.h>
22 #include <asm/div64.h>
24 #include "lockdep_internals.h"
26 static void *l_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
28 return seq_list_next(v
, &all_lock_classes
, pos
);
31 static void *l_start(struct seq_file
*m
, loff_t
*pos
)
33 return seq_list_start_head(&all_lock_classes
, *pos
);
36 static void l_stop(struct seq_file
*m
, void *v
)
40 static void print_name(struct seq_file
*m
, struct lock_class
*class)
43 const char *name
= class->name
;
46 name
= __get_key_name(class->key
, str
);
47 seq_printf(m
, "%s", name
);
49 seq_printf(m
, "%s", name
);
50 if (class->name_version
> 1)
51 seq_printf(m
, "#%d", class->name_version
);
53 seq_printf(m
, "/%d", class->subclass
);
57 static int l_show(struct seq_file
*m
, void *v
)
59 struct lock_class
*class = list_entry(v
, struct lock_class
, lock_entry
);
60 struct lock_list
*entry
;
61 char usage
[LOCK_USAGE_CHARS
];
63 if (v
== &all_lock_classes
) {
64 seq_printf(m
, "all lock classes:\n");
68 seq_printf(m
, "%p", class->key
);
69 #ifdef CONFIG_DEBUG_LOCKDEP
70 seq_printf(m
, " OPS:%8ld", class->ops
);
72 #ifdef CONFIG_PROVE_LOCKING
73 seq_printf(m
, " FD:%5ld", lockdep_count_forward_deps(class));
74 seq_printf(m
, " BD:%5ld", lockdep_count_backward_deps(class));
77 get_usage_chars(class, usage
);
78 seq_printf(m
, " %s", usage
);
84 list_for_each_entry(entry
, &class->locks_after
, entry
) {
85 if (entry
->distance
== 1) {
86 seq_printf(m
, " -> [%p] ", entry
->class->key
);
87 print_name(m
, entry
->class);
96 static const struct seq_operations lockdep_ops
= {
103 static int lockdep_open(struct inode
*inode
, struct file
*file
)
105 return seq_open(file
, &lockdep_ops
);
108 static const struct file_operations proc_lockdep_operations
= {
109 .open
= lockdep_open
,
112 .release
= seq_release
,
115 #ifdef CONFIG_PROVE_LOCKING
116 static void *lc_start(struct seq_file
*m
, loff_t
*pos
)
119 return SEQ_START_TOKEN
;
121 if (*pos
- 1 < nr_lock_chains
)
122 return lock_chains
+ (*pos
- 1);
127 static void *lc_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
130 return lc_start(m
, pos
);
133 static void lc_stop(struct seq_file
*m
, void *v
)
137 static int lc_show(struct seq_file
*m
, void *v
)
139 struct lock_chain
*chain
= v
;
140 struct lock_class
*class;
143 if (v
== SEQ_START_TOKEN
) {
144 seq_printf(m
, "all lock chains:\n");
148 seq_printf(m
, "irq_context: %d\n", chain
->irq_context
);
150 for (i
= 0; i
< chain
->depth
; i
++) {
151 class = lock_chain_get_class(chain
, i
);
155 seq_printf(m
, "[%p] ", class->key
);
156 print_name(m
, class);
164 static const struct seq_operations lockdep_chains_ops
= {
171 static int lockdep_chains_open(struct inode
*inode
, struct file
*file
)
173 return seq_open(file
, &lockdep_chains_ops
);
176 static const struct file_operations proc_lockdep_chains_operations
= {
177 .open
= lockdep_chains_open
,
180 .release
= seq_release
,
182 #endif /* CONFIG_PROVE_LOCKING */
184 static void lockdep_stats_debug_show(struct seq_file
*m
)
186 #ifdef CONFIG_DEBUG_LOCKDEP
187 unsigned int hi1
= debug_atomic_read(&hardirqs_on_events
),
188 hi2
= debug_atomic_read(&hardirqs_off_events
),
189 hr1
= debug_atomic_read(&redundant_hardirqs_on
),
190 hr2
= debug_atomic_read(&redundant_hardirqs_off
),
191 si1
= debug_atomic_read(&softirqs_on_events
),
192 si2
= debug_atomic_read(&softirqs_off_events
),
193 sr1
= debug_atomic_read(&redundant_softirqs_on
),
194 sr2
= debug_atomic_read(&redundant_softirqs_off
);
196 seq_printf(m
, " chain lookup misses: %11u\n",
197 debug_atomic_read(&chain_lookup_misses
));
198 seq_printf(m
, " chain lookup hits: %11u\n",
199 debug_atomic_read(&chain_lookup_hits
));
200 seq_printf(m
, " cyclic checks: %11u\n",
201 debug_atomic_read(&nr_cyclic_checks
));
202 seq_printf(m
, " find-mask forwards checks: %11u\n",
203 debug_atomic_read(&nr_find_usage_forwards_checks
));
204 seq_printf(m
, " find-mask backwards checks: %11u\n",
205 debug_atomic_read(&nr_find_usage_backwards_checks
));
207 seq_printf(m
, " hardirq on events: %11u\n", hi1
);
208 seq_printf(m
, " hardirq off events: %11u\n", hi2
);
209 seq_printf(m
, " redundant hardirq ons: %11u\n", hr1
);
210 seq_printf(m
, " redundant hardirq offs: %11u\n", hr2
);
211 seq_printf(m
, " softirq on events: %11u\n", si1
);
212 seq_printf(m
, " softirq off events: %11u\n", si2
);
213 seq_printf(m
, " redundant softirq ons: %11u\n", sr1
);
214 seq_printf(m
, " redundant softirq offs: %11u\n", sr2
);
218 static int lockdep_stats_show(struct seq_file
*m
, void *v
)
220 struct lock_class
*class;
221 unsigned long nr_unused
= 0, nr_uncategorized
= 0,
222 nr_irq_safe
= 0, nr_irq_unsafe
= 0,
223 nr_softirq_safe
= 0, nr_softirq_unsafe
= 0,
224 nr_hardirq_safe
= 0, nr_hardirq_unsafe
= 0,
225 nr_irq_read_safe
= 0, nr_irq_read_unsafe
= 0,
226 nr_softirq_read_safe
= 0, nr_softirq_read_unsafe
= 0,
227 nr_hardirq_read_safe
= 0, nr_hardirq_read_unsafe
= 0,
228 sum_forward_deps
= 0, factor
= 0;
230 list_for_each_entry(class, &all_lock_classes
, lock_entry
) {
232 if (class->usage_mask
== 0)
234 if (class->usage_mask
== LOCKF_USED
)
236 if (class->usage_mask
& LOCKF_USED_IN_IRQ
)
238 if (class->usage_mask
& LOCKF_ENABLED_IRQ
)
240 if (class->usage_mask
& LOCKF_USED_IN_SOFTIRQ
)
242 if (class->usage_mask
& LOCKF_ENABLED_SOFTIRQ
)
244 if (class->usage_mask
& LOCKF_USED_IN_HARDIRQ
)
246 if (class->usage_mask
& LOCKF_ENABLED_HARDIRQ
)
248 if (class->usage_mask
& LOCKF_USED_IN_IRQ_READ
)
250 if (class->usage_mask
& LOCKF_ENABLED_IRQ_READ
)
251 nr_irq_read_unsafe
++;
252 if (class->usage_mask
& LOCKF_USED_IN_SOFTIRQ_READ
)
253 nr_softirq_read_safe
++;
254 if (class->usage_mask
& LOCKF_ENABLED_SOFTIRQ_READ
)
255 nr_softirq_read_unsafe
++;
256 if (class->usage_mask
& LOCKF_USED_IN_HARDIRQ_READ
)
257 nr_hardirq_read_safe
++;
258 if (class->usage_mask
& LOCKF_ENABLED_HARDIRQ_READ
)
259 nr_hardirq_read_unsafe
++;
261 #ifdef CONFIG_PROVE_LOCKING
262 sum_forward_deps
+= lockdep_count_forward_deps(class);
265 #ifdef CONFIG_DEBUG_LOCKDEP
266 DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks
) != nr_unused
);
268 seq_printf(m
, " lock-classes: %11lu [max: %lu]\n",
269 nr_lock_classes
, MAX_LOCKDEP_KEYS
);
270 seq_printf(m
, " direct dependencies: %11lu [max: %lu]\n",
271 nr_list_entries
, MAX_LOCKDEP_ENTRIES
);
272 seq_printf(m
, " indirect dependencies: %11lu\n",
276 * Total number of dependencies:
278 * All irq-safe locks may nest inside irq-unsafe locks,
279 * plus all the other known dependencies:
281 seq_printf(m
, " all direct dependencies: %11lu\n",
282 nr_irq_unsafe
* nr_irq_safe
+
283 nr_hardirq_unsafe
* nr_hardirq_safe
+
287 * Estimated factor between direct and indirect
291 factor
= sum_forward_deps
/ nr_list_entries
;
293 #ifdef CONFIG_PROVE_LOCKING
294 seq_printf(m
, " dependency chains: %11lu [max: %lu]\n",
295 nr_lock_chains
, MAX_LOCKDEP_CHAINS
);
296 seq_printf(m
, " dependency chain hlocks: %11d [max: %lu]\n",
297 nr_chain_hlocks
, MAX_LOCKDEP_CHAIN_HLOCKS
);
300 #ifdef CONFIG_TRACE_IRQFLAGS
301 seq_printf(m
, " in-hardirq chains: %11u\n",
303 seq_printf(m
, " in-softirq chains: %11u\n",
306 seq_printf(m
, " in-process chains: %11u\n",
308 seq_printf(m
, " stack-trace entries: %11lu [max: %lu]\n",
309 nr_stack_trace_entries
, MAX_STACK_TRACE_ENTRIES
);
310 seq_printf(m
, " combined max dependencies: %11u\n",
311 (nr_hardirq_chains
+ 1) *
312 (nr_softirq_chains
+ 1) *
313 (nr_process_chains
+ 1)
315 seq_printf(m
, " hardirq-safe locks: %11lu\n",
317 seq_printf(m
, " hardirq-unsafe locks: %11lu\n",
319 seq_printf(m
, " softirq-safe locks: %11lu\n",
321 seq_printf(m
, " softirq-unsafe locks: %11lu\n",
323 seq_printf(m
, " irq-safe locks: %11lu\n",
325 seq_printf(m
, " irq-unsafe locks: %11lu\n",
328 seq_printf(m
, " hardirq-read-safe locks: %11lu\n",
329 nr_hardirq_read_safe
);
330 seq_printf(m
, " hardirq-read-unsafe locks: %11lu\n",
331 nr_hardirq_read_unsafe
);
332 seq_printf(m
, " softirq-read-safe locks: %11lu\n",
333 nr_softirq_read_safe
);
334 seq_printf(m
, " softirq-read-unsafe locks: %11lu\n",
335 nr_softirq_read_unsafe
);
336 seq_printf(m
, " irq-read-safe locks: %11lu\n",
338 seq_printf(m
, " irq-read-unsafe locks: %11lu\n",
341 seq_printf(m
, " uncategorized locks: %11lu\n",
343 seq_printf(m
, " unused locks: %11lu\n",
345 seq_printf(m
, " max locking depth: %11u\n",
347 #ifdef CONFIG_PROVE_LOCKING
348 seq_printf(m
, " max bfs queue depth: %11u\n",
349 max_bfs_queue_depth
);
351 lockdep_stats_debug_show(m
);
352 seq_printf(m
, " debug_locks: %11u\n",
358 static int lockdep_stats_open(struct inode
*inode
, struct file
*file
)
360 return single_open(file
, lockdep_stats_show
, NULL
);
363 static const struct file_operations proc_lockdep_stats_operations
= {
364 .open
= lockdep_stats_open
,
367 .release
= single_release
,
370 #ifdef CONFIG_LOCK_STAT
372 struct lock_stat_data
{
373 struct lock_class
*class;
374 struct lock_class_stats stats
;
377 struct lock_stat_seq
{
378 struct lock_stat_data
*iter_end
;
379 struct lock_stat_data stats
[MAX_LOCKDEP_KEYS
];
383 * sort on absolute number of contentions
385 static int lock_stat_cmp(const void *l
, const void *r
)
387 const struct lock_stat_data
*dl
= l
, *dr
= r
;
388 unsigned long nl
, nr
;
390 nl
= dl
->stats
.read_waittime
.nr
+ dl
->stats
.write_waittime
.nr
;
391 nr
= dr
->stats
.read_waittime
.nr
+ dr
->stats
.write_waittime
.nr
;
396 static void seq_line(struct seq_file
*m
, char c
, int offset
, int length
)
400 for (i
= 0; i
< offset
; i
++)
402 for (i
= 0; i
< length
; i
++)
403 seq_printf(m
, "%c", c
);
407 static void snprint_time(char *buf
, size_t bufsiz
, s64 nr
)
412 nr
+= 5; /* for display rounding */
413 div
= div_s64_rem(nr
, 1000, &rem
);
414 snprintf(buf
, bufsiz
, "%lld.%02d", (long long)div
, (int)rem
/10);
417 static void seq_time(struct seq_file
*m
, s64 time
)
421 snprint_time(num
, sizeof(num
), time
);
422 seq_printf(m
, " %14s", num
);
425 static void seq_lock_time(struct seq_file
*m
, struct lock_time
*lt
)
427 seq_printf(m
, "%14lu", lt
->nr
);
428 seq_time(m
, lt
->min
);
429 seq_time(m
, lt
->max
);
430 seq_time(m
, lt
->total
);
433 static void seq_stats(struct seq_file
*m
, struct lock_stat_data
*data
)
436 struct lock_class
*class;
437 struct lock_class_stats
*stats
;
441 stats
= &data
->stats
;
444 if (class->name_version
> 1)
445 namelen
-= 2; /* XXX truncates versions > 9 */
450 char str
[KSYM_NAME_LEN
];
451 const char *key_name
;
453 key_name
= __get_key_name(class->key
, str
);
454 snprintf(name
, namelen
, "%s", key_name
);
456 snprintf(name
, namelen
, "%s", class->name
);
458 namelen
= strlen(name
);
459 if (class->name_version
> 1) {
460 snprintf(name
+namelen
, 3, "#%d", class->name_version
);
463 if (class->subclass
) {
464 snprintf(name
+namelen
, 3, "/%d", class->subclass
);
468 if (stats
->write_holdtime
.nr
) {
469 if (stats
->read_holdtime
.nr
)
470 seq_printf(m
, "%38s-W:", name
);
472 seq_printf(m
, "%40s:", name
);
474 seq_printf(m
, "%14lu ", stats
->bounces
[bounce_contended_write
]);
475 seq_lock_time(m
, &stats
->write_waittime
);
476 seq_printf(m
, " %14lu ", stats
->bounces
[bounce_acquired_write
]);
477 seq_lock_time(m
, &stats
->write_holdtime
);
481 if (stats
->read_holdtime
.nr
) {
482 seq_printf(m
, "%38s-R:", name
);
483 seq_printf(m
, "%14lu ", stats
->bounces
[bounce_contended_read
]);
484 seq_lock_time(m
, &stats
->read_waittime
);
485 seq_printf(m
, " %14lu ", stats
->bounces
[bounce_acquired_read
]);
486 seq_lock_time(m
, &stats
->read_holdtime
);
490 if (stats
->read_waittime
.nr
+ stats
->write_waittime
.nr
== 0)
493 if (stats
->read_holdtime
.nr
)
496 for (i
= 0; i
< LOCKSTAT_POINTS
; i
++) {
497 char sym
[KSYM_SYMBOL_LEN
];
500 if (class->contention_point
[i
] == 0)
504 seq_line(m
, '-', 40-namelen
, namelen
);
506 sprint_symbol(sym
, class->contention_point
[i
]);
507 snprintf(ip
, sizeof(ip
), "[<%p>]",
508 (void *)class->contention_point
[i
]);
509 seq_printf(m
, "%40s %14lu %29s %s\n", name
,
510 stats
->contention_point
[i
],
513 for (i
= 0; i
< LOCKSTAT_POINTS
; i
++) {
514 char sym
[KSYM_SYMBOL_LEN
];
517 if (class->contending_point
[i
] == 0)
521 seq_line(m
, '-', 40-namelen
, namelen
);
523 sprint_symbol(sym
, class->contending_point
[i
]);
524 snprintf(ip
, sizeof(ip
), "[<%p>]",
525 (void *)class->contending_point
[i
]);
526 seq_printf(m
, "%40s %14lu %29s %s\n", name
,
527 stats
->contending_point
[i
],
532 seq_line(m
, '.', 0, 40 + 1 + 10 * (14 + 1));
537 static void seq_header(struct seq_file
*m
)
539 seq_printf(m
, "lock_stat version 0.3\n");
541 if (unlikely(!debug_locks
))
542 seq_printf(m
, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n");
544 seq_line(m
, '-', 0, 40 + 1 + 10 * (14 + 1));
545 seq_printf(m
, "%40s %14s %14s %14s %14s %14s %14s %14s %14s "
558 seq_line(m
, '-', 0, 40 + 1 + 10 * (14 + 1));
562 static void *ls_start(struct seq_file
*m
, loff_t
*pos
)
564 struct lock_stat_seq
*data
= m
->private;
565 struct lock_stat_data
*iter
;
568 return SEQ_START_TOKEN
;
570 iter
= data
->stats
+ (*pos
- 1);
571 if (iter
>= data
->iter_end
)
577 static void *ls_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
580 return ls_start(m
, pos
);
583 static void ls_stop(struct seq_file
*m
, void *v
)
587 static int ls_show(struct seq_file
*m
, void *v
)
589 if (v
== SEQ_START_TOKEN
)
597 static struct seq_operations lockstat_ops
= {
604 static int lock_stat_open(struct inode
*inode
, struct file
*file
)
607 struct lock_class
*class;
608 struct lock_stat_seq
*data
= vmalloc(sizeof(struct lock_stat_seq
));
613 res
= seq_open(file
, &lockstat_ops
);
615 struct lock_stat_data
*iter
= data
->stats
;
616 struct seq_file
*m
= file
->private_data
;
618 list_for_each_entry(class, &all_lock_classes
, lock_entry
) {
620 iter
->stats
= lock_stats(class);
623 data
->iter_end
= iter
;
625 sort(data
->stats
, data
->iter_end
- data
->stats
,
626 sizeof(struct lock_stat_data
),
627 lock_stat_cmp
, NULL
);
636 static ssize_t
lock_stat_write(struct file
*file
, const char __user
*buf
,
637 size_t count
, loff_t
*ppos
)
639 struct lock_class
*class;
643 if (get_user(c
, buf
))
649 list_for_each_entry(class, &all_lock_classes
, lock_entry
)
650 clear_lock_stats(class);
655 static int lock_stat_release(struct inode
*inode
, struct file
*file
)
657 struct seq_file
*seq
= file
->private_data
;
660 return seq_release(inode
, file
);
663 static const struct file_operations proc_lock_stat_operations
= {
664 .open
= lock_stat_open
,
665 .write
= lock_stat_write
,
668 .release
= lock_stat_release
,
670 #endif /* CONFIG_LOCK_STAT */
672 static int __init
lockdep_proc_init(void)
674 proc_create("lockdep", S_IRUSR
, NULL
, &proc_lockdep_operations
);
675 #ifdef CONFIG_PROVE_LOCKING
676 proc_create("lockdep_chains", S_IRUSR
, NULL
,
677 &proc_lockdep_chains_operations
);
679 proc_create("lockdep_stats", S_IRUSR
, NULL
,
680 &proc_lockdep_stats_operations
);
682 #ifdef CONFIG_LOCK_STAT
683 proc_create("lock_stat", S_IRUSR
| S_IWUSR
, NULL
,
684 &proc_lock_stat_operations
);
690 __initcall(lockdep_proc_init
);