ACPI: thinkpad-acpi: preserve radio state across shutdown
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / lockdep_proc.c
blob20dbcbf9c7dd2cf34486f3e9f307f1e3cf8a97bc
1 /*
2 * kernel/lockdep_proc.c
4 * Runtime locking correctness validator
6 * Started by Ingo Molnar:
8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
11 * Code for /proc/lockdep and /proc/lockdep_stats:
14 #include <linux/module.h>
15 #include <linux/proc_fs.h>
16 #include <linux/seq_file.h>
17 #include <linux/kallsyms.h>
18 #include <linux/debug_locks.h>
19 #include <linux/vmalloc.h>
20 #include <linux/sort.h>
21 #include <asm/uaccess.h>
22 #include <asm/div64.h>
24 #include "lockdep_internals.h"
26 static void *l_next(struct seq_file *m, void *v, loff_t *pos)
28 struct lock_class *class;
30 (*pos)++;
32 if (v == SEQ_START_TOKEN)
33 class = m->private;
34 else {
35 class = v;
37 if (class->lock_entry.next != &all_lock_classes)
38 class = list_entry(class->lock_entry.next,
39 struct lock_class, lock_entry);
40 else
41 class = NULL;
44 return class;
47 static void *l_start(struct seq_file *m, loff_t *pos)
49 struct lock_class *class;
50 loff_t i = 0;
52 if (*pos == 0)
53 return SEQ_START_TOKEN;
55 list_for_each_entry(class, &all_lock_classes, lock_entry) {
56 if (++i == *pos)
57 return class;
59 return NULL;
62 static void l_stop(struct seq_file *m, void *v)
66 static void print_name(struct seq_file *m, struct lock_class *class)
68 char str[128];
69 const char *name = class->name;
71 if (!name) {
72 name = __get_key_name(class->key, str);
73 seq_printf(m, "%s", name);
74 } else{
75 seq_printf(m, "%s", name);
76 if (class->name_version > 1)
77 seq_printf(m, "#%d", class->name_version);
78 if (class->subclass)
79 seq_printf(m, "/%d", class->subclass);
83 static int l_show(struct seq_file *m, void *v)
85 struct lock_class *class = v;
86 struct lock_list *entry;
87 char c1, c2, c3, c4;
89 if (v == SEQ_START_TOKEN) {
90 seq_printf(m, "all lock classes:\n");
91 return 0;
94 seq_printf(m, "%p", class->key);
95 #ifdef CONFIG_DEBUG_LOCKDEP
96 seq_printf(m, " OPS:%8ld", class->ops);
97 #endif
98 #ifdef CONFIG_PROVE_LOCKING
99 seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class));
100 seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class));
101 #endif
103 get_usage_chars(class, &c1, &c2, &c3, &c4);
104 seq_printf(m, " %c%c%c%c", c1, c2, c3, c4);
106 seq_printf(m, ": ");
107 print_name(m, class);
108 seq_puts(m, "\n");
110 list_for_each_entry(entry, &class->locks_after, entry) {
111 if (entry->distance == 1) {
112 seq_printf(m, " -> [%p] ", entry->class->key);
113 print_name(m, entry->class);
114 seq_puts(m, "\n");
117 seq_puts(m, "\n");
119 return 0;
122 static const struct seq_operations lockdep_ops = {
123 .start = l_start,
124 .next = l_next,
125 .stop = l_stop,
126 .show = l_show,
129 static int lockdep_open(struct inode *inode, struct file *file)
131 int res = seq_open(file, &lockdep_ops);
132 if (!res) {
133 struct seq_file *m = file->private_data;
135 if (!list_empty(&all_lock_classes))
136 m->private = list_entry(all_lock_classes.next,
137 struct lock_class, lock_entry);
138 else
139 m->private = NULL;
141 return res;
144 static const struct file_operations proc_lockdep_operations = {
145 .open = lockdep_open,
146 .read = seq_read,
147 .llseek = seq_lseek,
148 .release = seq_release,
151 #ifdef CONFIG_PROVE_LOCKING
152 static void *lc_next(struct seq_file *m, void *v, loff_t *pos)
154 struct lock_chain *chain;
156 (*pos)++;
158 if (v == SEQ_START_TOKEN)
159 chain = m->private;
160 else {
161 chain = v;
163 if (*pos < nr_lock_chains)
164 chain = lock_chains + *pos;
165 else
166 chain = NULL;
169 return chain;
172 static void *lc_start(struct seq_file *m, loff_t *pos)
174 if (*pos == 0)
175 return SEQ_START_TOKEN;
177 if (*pos < nr_lock_chains)
178 return lock_chains + *pos;
180 return NULL;
183 static void lc_stop(struct seq_file *m, void *v)
187 static int lc_show(struct seq_file *m, void *v)
189 struct lock_chain *chain = v;
190 struct lock_class *class;
191 int i;
193 if (v == SEQ_START_TOKEN) {
194 seq_printf(m, "all lock chains:\n");
195 return 0;
198 seq_printf(m, "irq_context: %d\n", chain->irq_context);
200 for (i = 0; i < chain->depth; i++) {
201 class = lock_chain_get_class(chain, i);
202 if (!class->key)
203 continue;
205 seq_printf(m, "[%p] ", class->key);
206 print_name(m, class);
207 seq_puts(m, "\n");
209 seq_puts(m, "\n");
211 return 0;
214 static const struct seq_operations lockdep_chains_ops = {
215 .start = lc_start,
216 .next = lc_next,
217 .stop = lc_stop,
218 .show = lc_show,
221 static int lockdep_chains_open(struct inode *inode, struct file *file)
223 int res = seq_open(file, &lockdep_chains_ops);
224 if (!res) {
225 struct seq_file *m = file->private_data;
227 if (nr_lock_chains)
228 m->private = lock_chains;
229 else
230 m->private = NULL;
232 return res;
235 static const struct file_operations proc_lockdep_chains_operations = {
236 .open = lockdep_chains_open,
237 .read = seq_read,
238 .llseek = seq_lseek,
239 .release = seq_release,
241 #endif /* CONFIG_PROVE_LOCKING */
243 static void lockdep_stats_debug_show(struct seq_file *m)
245 #ifdef CONFIG_DEBUG_LOCKDEP
246 unsigned int hi1 = debug_atomic_read(&hardirqs_on_events),
247 hi2 = debug_atomic_read(&hardirqs_off_events),
248 hr1 = debug_atomic_read(&redundant_hardirqs_on),
249 hr2 = debug_atomic_read(&redundant_hardirqs_off),
250 si1 = debug_atomic_read(&softirqs_on_events),
251 si2 = debug_atomic_read(&softirqs_off_events),
252 sr1 = debug_atomic_read(&redundant_softirqs_on),
253 sr2 = debug_atomic_read(&redundant_softirqs_off);
255 seq_printf(m, " chain lookup misses: %11u\n",
256 debug_atomic_read(&chain_lookup_misses));
257 seq_printf(m, " chain lookup hits: %11u\n",
258 debug_atomic_read(&chain_lookup_hits));
259 seq_printf(m, " cyclic checks: %11u\n",
260 debug_atomic_read(&nr_cyclic_checks));
261 seq_printf(m, " cyclic-check recursions: %11u\n",
262 debug_atomic_read(&nr_cyclic_check_recursions));
263 seq_printf(m, " find-mask forwards checks: %11u\n",
264 debug_atomic_read(&nr_find_usage_forwards_checks));
265 seq_printf(m, " find-mask forwards recursions: %11u\n",
266 debug_atomic_read(&nr_find_usage_forwards_recursions));
267 seq_printf(m, " find-mask backwards checks: %11u\n",
268 debug_atomic_read(&nr_find_usage_backwards_checks));
269 seq_printf(m, " find-mask backwards recursions:%11u\n",
270 debug_atomic_read(&nr_find_usage_backwards_recursions));
272 seq_printf(m, " hardirq on events: %11u\n", hi1);
273 seq_printf(m, " hardirq off events: %11u\n", hi2);
274 seq_printf(m, " redundant hardirq ons: %11u\n", hr1);
275 seq_printf(m, " redundant hardirq offs: %11u\n", hr2);
276 seq_printf(m, " softirq on events: %11u\n", si1);
277 seq_printf(m, " softirq off events: %11u\n", si2);
278 seq_printf(m, " redundant softirq ons: %11u\n", sr1);
279 seq_printf(m, " redundant softirq offs: %11u\n", sr2);
280 #endif
283 static int lockdep_stats_show(struct seq_file *m, void *v)
285 struct lock_class *class;
286 unsigned long nr_unused = 0, nr_uncategorized = 0,
287 nr_irq_safe = 0, nr_irq_unsafe = 0,
288 nr_softirq_safe = 0, nr_softirq_unsafe = 0,
289 nr_hardirq_safe = 0, nr_hardirq_unsafe = 0,
290 nr_irq_read_safe = 0, nr_irq_read_unsafe = 0,
291 nr_softirq_read_safe = 0, nr_softirq_read_unsafe = 0,
292 nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
293 sum_forward_deps = 0, factor = 0;
295 list_for_each_entry(class, &all_lock_classes, lock_entry) {
297 if (class->usage_mask == 0)
298 nr_unused++;
299 if (class->usage_mask == LOCKF_USED)
300 nr_uncategorized++;
301 if (class->usage_mask & LOCKF_USED_IN_IRQ)
302 nr_irq_safe++;
303 if (class->usage_mask & LOCKF_ENABLED_IRQS)
304 nr_irq_unsafe++;
305 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
306 nr_softirq_safe++;
307 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
308 nr_softirq_unsafe++;
309 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
310 nr_hardirq_safe++;
311 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
312 nr_hardirq_unsafe++;
313 if (class->usage_mask & LOCKF_USED_IN_IRQ_READ)
314 nr_irq_read_safe++;
315 if (class->usage_mask & LOCKF_ENABLED_IRQS_READ)
316 nr_irq_read_unsafe++;
317 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ)
318 nr_softirq_read_safe++;
319 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
320 nr_softirq_read_unsafe++;
321 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ)
322 nr_hardirq_read_safe++;
323 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
324 nr_hardirq_read_unsafe++;
326 #ifdef CONFIG_PROVE_LOCKING
327 sum_forward_deps += lockdep_count_forward_deps(class);
328 #endif
330 #ifdef CONFIG_DEBUG_LOCKDEP
331 DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused);
332 #endif
333 seq_printf(m, " lock-classes: %11lu [max: %lu]\n",
334 nr_lock_classes, MAX_LOCKDEP_KEYS);
335 seq_printf(m, " direct dependencies: %11lu [max: %lu]\n",
336 nr_list_entries, MAX_LOCKDEP_ENTRIES);
337 seq_printf(m, " indirect dependencies: %11lu\n",
338 sum_forward_deps);
341 * Total number of dependencies:
343 * All irq-safe locks may nest inside irq-unsafe locks,
344 * plus all the other known dependencies:
346 seq_printf(m, " all direct dependencies: %11lu\n",
347 nr_irq_unsafe * nr_irq_safe +
348 nr_hardirq_unsafe * nr_hardirq_safe +
349 nr_list_entries);
352 * Estimated factor between direct and indirect
353 * dependencies:
355 if (nr_list_entries)
356 factor = sum_forward_deps / nr_list_entries;
358 #ifdef CONFIG_PROVE_LOCKING
359 seq_printf(m, " dependency chains: %11lu [max: %lu]\n",
360 nr_lock_chains, MAX_LOCKDEP_CHAINS);
361 seq_printf(m, " dependency chain hlocks: %11d [max: %lu]\n",
362 nr_chain_hlocks, MAX_LOCKDEP_CHAIN_HLOCKS);
363 #endif
365 #ifdef CONFIG_TRACE_IRQFLAGS
366 seq_printf(m, " in-hardirq chains: %11u\n",
367 nr_hardirq_chains);
368 seq_printf(m, " in-softirq chains: %11u\n",
369 nr_softirq_chains);
370 #endif
371 seq_printf(m, " in-process chains: %11u\n",
372 nr_process_chains);
373 seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
374 nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
375 seq_printf(m, " combined max dependencies: %11u\n",
376 (nr_hardirq_chains + 1) *
377 (nr_softirq_chains + 1) *
378 (nr_process_chains + 1)
380 seq_printf(m, " hardirq-safe locks: %11lu\n",
381 nr_hardirq_safe);
382 seq_printf(m, " hardirq-unsafe locks: %11lu\n",
383 nr_hardirq_unsafe);
384 seq_printf(m, " softirq-safe locks: %11lu\n",
385 nr_softirq_safe);
386 seq_printf(m, " softirq-unsafe locks: %11lu\n",
387 nr_softirq_unsafe);
388 seq_printf(m, " irq-safe locks: %11lu\n",
389 nr_irq_safe);
390 seq_printf(m, " irq-unsafe locks: %11lu\n",
391 nr_irq_unsafe);
393 seq_printf(m, " hardirq-read-safe locks: %11lu\n",
394 nr_hardirq_read_safe);
395 seq_printf(m, " hardirq-read-unsafe locks: %11lu\n",
396 nr_hardirq_read_unsafe);
397 seq_printf(m, " softirq-read-safe locks: %11lu\n",
398 nr_softirq_read_safe);
399 seq_printf(m, " softirq-read-unsafe locks: %11lu\n",
400 nr_softirq_read_unsafe);
401 seq_printf(m, " irq-read-safe locks: %11lu\n",
402 nr_irq_read_safe);
403 seq_printf(m, " irq-read-unsafe locks: %11lu\n",
404 nr_irq_read_unsafe);
406 seq_printf(m, " uncategorized locks: %11lu\n",
407 nr_uncategorized);
408 seq_printf(m, " unused locks: %11lu\n",
409 nr_unused);
410 seq_printf(m, " max locking depth: %11u\n",
411 max_lockdep_depth);
412 seq_printf(m, " max recursion depth: %11u\n",
413 max_recursion_depth);
414 lockdep_stats_debug_show(m);
415 seq_printf(m, " debug_locks: %11u\n",
416 debug_locks);
418 return 0;
421 static int lockdep_stats_open(struct inode *inode, struct file *file)
423 return single_open(file, lockdep_stats_show, NULL);
426 static const struct file_operations proc_lockdep_stats_operations = {
427 .open = lockdep_stats_open,
428 .read = seq_read,
429 .llseek = seq_lseek,
430 .release = single_release,
433 #ifdef CONFIG_LOCK_STAT
435 struct lock_stat_data {
436 struct lock_class *class;
437 struct lock_class_stats stats;
440 struct lock_stat_seq {
441 struct lock_stat_data *iter;
442 struct lock_stat_data *iter_end;
443 struct lock_stat_data stats[MAX_LOCKDEP_KEYS];
447 * sort on absolute number of contentions
449 static int lock_stat_cmp(const void *l, const void *r)
451 const struct lock_stat_data *dl = l, *dr = r;
452 unsigned long nl, nr;
454 nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr;
455 nr = dr->stats.read_waittime.nr + dr->stats.write_waittime.nr;
457 return nr - nl;
460 static void seq_line(struct seq_file *m, char c, int offset, int length)
462 int i;
464 for (i = 0; i < offset; i++)
465 seq_puts(m, " ");
466 for (i = 0; i < length; i++)
467 seq_printf(m, "%c", c);
468 seq_puts(m, "\n");
471 static void snprint_time(char *buf, size_t bufsiz, s64 nr)
473 unsigned long rem;
475 nr += 5; /* for display rounding */
476 rem = do_div(nr, 1000); /* XXX: do_div_signed */
477 snprintf(buf, bufsiz, "%lld.%02d", (long long)nr, (int)rem/10);
480 static void seq_time(struct seq_file *m, s64 time)
482 char num[15];
484 snprint_time(num, sizeof(num), time);
485 seq_printf(m, " %14s", num);
488 static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
490 seq_printf(m, "%14lu", lt->nr);
491 seq_time(m, lt->min);
492 seq_time(m, lt->max);
493 seq_time(m, lt->total);
496 static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
498 char name[39];
499 struct lock_class *class;
500 struct lock_class_stats *stats;
501 int i, namelen;
503 class = data->class;
504 stats = &data->stats;
506 namelen = 38;
507 if (class->name_version > 1)
508 namelen -= 2; /* XXX truncates versions > 9 */
509 if (class->subclass)
510 namelen -= 2;
512 if (!class->name) {
513 char str[KSYM_NAME_LEN];
514 const char *key_name;
516 key_name = __get_key_name(class->key, str);
517 snprintf(name, namelen, "%s", key_name);
518 } else {
519 snprintf(name, namelen, "%s", class->name);
521 namelen = strlen(name);
522 if (class->name_version > 1) {
523 snprintf(name+namelen, 3, "#%d", class->name_version);
524 namelen += 2;
526 if (class->subclass) {
527 snprintf(name+namelen, 3, "/%d", class->subclass);
528 namelen += 2;
531 if (stats->write_holdtime.nr) {
532 if (stats->read_holdtime.nr)
533 seq_printf(m, "%38s-W:", name);
534 else
535 seq_printf(m, "%40s:", name);
537 seq_printf(m, "%14lu ", stats->bounces[bounce_contended_write]);
538 seq_lock_time(m, &stats->write_waittime);
539 seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_write]);
540 seq_lock_time(m, &stats->write_holdtime);
541 seq_puts(m, "\n");
544 if (stats->read_holdtime.nr) {
545 seq_printf(m, "%38s-R:", name);
546 seq_printf(m, "%14lu ", stats->bounces[bounce_contended_read]);
547 seq_lock_time(m, &stats->read_waittime);
548 seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_read]);
549 seq_lock_time(m, &stats->read_holdtime);
550 seq_puts(m, "\n");
553 if (stats->read_waittime.nr + stats->write_waittime.nr == 0)
554 return;
556 if (stats->read_holdtime.nr)
557 namelen += 2;
559 for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) {
560 char sym[KSYM_SYMBOL_LEN];
561 char ip[32];
563 if (class->contention_point[i] == 0)
564 break;
566 if (!i)
567 seq_line(m, '-', 40-namelen, namelen);
569 sprint_symbol(sym, class->contention_point[i]);
570 snprintf(ip, sizeof(ip), "[<%p>]",
571 (void *)class->contention_point[i]);
572 seq_printf(m, "%40s %14lu %29s %s\n", name,
573 stats->contention_point[i],
574 ip, sym);
576 if (i) {
577 seq_puts(m, "\n");
578 seq_line(m, '.', 0, 40 + 1 + 10 * (14 + 1));
579 seq_puts(m, "\n");
583 static void seq_header(struct seq_file *m)
585 seq_printf(m, "lock_stat version 0.2\n");
586 seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
587 seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s "
588 "%14s %14s\n",
589 "class name",
590 "con-bounces",
591 "contentions",
592 "waittime-min",
593 "waittime-max",
594 "waittime-total",
595 "acq-bounces",
596 "acquisitions",
597 "holdtime-min",
598 "holdtime-max",
599 "holdtime-total");
600 seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
601 seq_printf(m, "\n");
604 static void *ls_start(struct seq_file *m, loff_t *pos)
606 struct lock_stat_seq *data = m->private;
608 if (*pos == 0)
609 return SEQ_START_TOKEN;
611 data->iter = data->stats + *pos;
612 if (data->iter >= data->iter_end)
613 data->iter = NULL;
615 return data->iter;
618 static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
620 struct lock_stat_seq *data = m->private;
622 (*pos)++;
624 if (v == SEQ_START_TOKEN)
625 data->iter = data->stats;
626 else {
627 data->iter = v;
628 data->iter++;
631 if (data->iter == data->iter_end)
632 data->iter = NULL;
634 return data->iter;
637 static void ls_stop(struct seq_file *m, void *v)
641 static int ls_show(struct seq_file *m, void *v)
643 if (v == SEQ_START_TOKEN)
644 seq_header(m);
645 else
646 seq_stats(m, v);
648 return 0;
651 static struct seq_operations lockstat_ops = {
652 .start = ls_start,
653 .next = ls_next,
654 .stop = ls_stop,
655 .show = ls_show,
658 static int lock_stat_open(struct inode *inode, struct file *file)
660 int res;
661 struct lock_class *class;
662 struct lock_stat_seq *data = vmalloc(sizeof(struct lock_stat_seq));
664 if (!data)
665 return -ENOMEM;
667 res = seq_open(file, &lockstat_ops);
668 if (!res) {
669 struct lock_stat_data *iter = data->stats;
670 struct seq_file *m = file->private_data;
672 data->iter = iter;
673 list_for_each_entry(class, &all_lock_classes, lock_entry) {
674 iter->class = class;
675 iter->stats = lock_stats(class);
676 iter++;
678 data->iter_end = iter;
680 sort(data->stats, data->iter_end - data->iter,
681 sizeof(struct lock_stat_data),
682 lock_stat_cmp, NULL);
684 m->private = data;
685 } else
686 vfree(data);
688 return res;
691 static ssize_t lock_stat_write(struct file *file, const char __user *buf,
692 size_t count, loff_t *ppos)
694 struct lock_class *class;
695 char c;
697 if (count) {
698 if (get_user(c, buf))
699 return -EFAULT;
701 if (c != '0')
702 return count;
704 list_for_each_entry(class, &all_lock_classes, lock_entry)
705 clear_lock_stats(class);
707 return count;
710 static int lock_stat_release(struct inode *inode, struct file *file)
712 struct seq_file *seq = file->private_data;
714 vfree(seq->private);
715 seq->private = NULL;
716 return seq_release(inode, file);
719 static const struct file_operations proc_lock_stat_operations = {
720 .open = lock_stat_open,
721 .write = lock_stat_write,
722 .read = seq_read,
723 .llseek = seq_lseek,
724 .release = lock_stat_release,
726 #endif /* CONFIG_LOCK_STAT */
728 static int __init lockdep_proc_init(void)
730 proc_create("lockdep", S_IRUSR, NULL, &proc_lockdep_operations);
731 #ifdef CONFIG_PROVE_LOCKING
732 proc_create("lockdep_chains", S_IRUSR, NULL,
733 &proc_lockdep_chains_operations);
734 #endif
735 proc_create("lockdep_stats", S_IRUSR, NULL,
736 &proc_lockdep_stats_operations);
738 #ifdef CONFIG_LOCK_STAT
739 proc_create("lock_stat", S_IRUSR, NULL, &proc_lock_stat_operations);
740 #endif
742 return 0;
745 __initcall(lockdep_proc_init);