2 * kernel/lockdep_proc.c
4 * Runtime locking correctness validator
6 * Started by Ingo Molnar:
8 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Code for /proc/lockdep and /proc/lockdep_stats:
13 #include <linux/sched.h>
14 #include <linux/module.h>
15 #include <linux/proc_fs.h>
16 #include <linux/seq_file.h>
17 #include <linux/kallsyms.h>
18 #include <linux/debug_locks.h>
20 #include "lockdep_internals.h"
22 static void *l_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
24 struct lock_class
*class = v
;
28 if (class->lock_entry
.next
!= &all_lock_classes
)
29 class = list_entry(class->lock_entry
.next
, struct lock_class
,
38 static void *l_start(struct seq_file
*m
, loff_t
*pos
)
40 struct lock_class
*class = m
->private;
42 if (&class->lock_entry
== all_lock_classes
.next
)
43 seq_printf(m
, "all lock classes:\n");
48 static void l_stop(struct seq_file
*m
, void *v
)
52 static unsigned long count_forward_deps(struct lock_class
*class)
54 struct lock_list
*entry
;
55 unsigned long ret
= 1;
58 * Recurse this class's dependency list:
60 list_for_each_entry(entry
, &class->locks_after
, entry
)
61 ret
+= count_forward_deps(entry
->class);
66 static unsigned long count_backward_deps(struct lock_class
*class)
68 struct lock_list
*entry
;
69 unsigned long ret
= 1;
72 * Recurse this class's dependency list:
74 list_for_each_entry(entry
, &class->locks_before
, entry
)
75 ret
+= count_backward_deps(entry
->class);
80 static void print_name(struct seq_file
*m
, struct lock_class
*class)
83 const char *name
= class->name
;
86 name
= __get_key_name(class->key
, str
);
87 seq_printf(m
, "%s", name
);
89 seq_printf(m
, "%s", name
);
90 if (class->name_version
> 1)
91 seq_printf(m
, "#%d", class->name_version
);
93 seq_printf(m
, "/%d", class->subclass
);
97 static int l_show(struct seq_file
*m
, void *v
)
99 unsigned long nr_forward_deps
, nr_backward_deps
;
100 struct lock_class
*class = m
->private;
101 struct lock_list
*entry
;
104 seq_printf(m
, "%p", class->key
);
105 #ifdef CONFIG_DEBUG_LOCKDEP
106 seq_printf(m
, " OPS:%8ld", class->ops
);
108 nr_forward_deps
= count_forward_deps(class);
109 seq_printf(m
, " FD:%5ld", nr_forward_deps
);
111 nr_backward_deps
= count_backward_deps(class);
112 seq_printf(m
, " BD:%5ld", nr_backward_deps
);
114 get_usage_chars(class, &c1
, &c2
, &c3
, &c4
);
115 seq_printf(m
, " %c%c%c%c", c1
, c2
, c3
, c4
);
118 print_name(m
, class);
121 list_for_each_entry(entry
, &class->locks_after
, entry
) {
122 if (entry
->distance
== 1) {
123 seq_printf(m
, " -> [%p] ", entry
->class);
124 print_name(m
, entry
->class);
133 static const struct seq_operations lockdep_ops
= {
140 static int lockdep_open(struct inode
*inode
, struct file
*file
)
142 int res
= seq_open(file
, &lockdep_ops
);
144 struct seq_file
*m
= file
->private_data
;
146 if (!list_empty(&all_lock_classes
))
147 m
->private = list_entry(all_lock_classes
.next
,
148 struct lock_class
, lock_entry
);
155 static const struct file_operations proc_lockdep_operations
= {
156 .open
= lockdep_open
,
159 .release
= seq_release
,
162 static void lockdep_stats_debug_show(struct seq_file
*m
)
164 #ifdef CONFIG_DEBUG_LOCKDEP
165 unsigned int hi1
= debug_atomic_read(&hardirqs_on_events
),
166 hi2
= debug_atomic_read(&hardirqs_off_events
),
167 hr1
= debug_atomic_read(&redundant_hardirqs_on
),
168 hr2
= debug_atomic_read(&redundant_hardirqs_off
),
169 si1
= debug_atomic_read(&softirqs_on_events
),
170 si2
= debug_atomic_read(&softirqs_off_events
),
171 sr1
= debug_atomic_read(&redundant_softirqs_on
),
172 sr2
= debug_atomic_read(&redundant_softirqs_off
);
174 seq_printf(m
, " chain lookup misses: %11u\n",
175 debug_atomic_read(&chain_lookup_misses
));
176 seq_printf(m
, " chain lookup hits: %11u\n",
177 debug_atomic_read(&chain_lookup_hits
));
178 seq_printf(m
, " cyclic checks: %11u\n",
179 debug_atomic_read(&nr_cyclic_checks
));
180 seq_printf(m
, " cyclic-check recursions: %11u\n",
181 debug_atomic_read(&nr_cyclic_check_recursions
));
182 seq_printf(m
, " find-mask forwards checks: %11u\n",
183 debug_atomic_read(&nr_find_usage_forwards_checks
));
184 seq_printf(m
, " find-mask forwards recursions: %11u\n",
185 debug_atomic_read(&nr_find_usage_forwards_recursions
));
186 seq_printf(m
, " find-mask backwards checks: %11u\n",
187 debug_atomic_read(&nr_find_usage_backwards_checks
));
188 seq_printf(m
, " find-mask backwards recursions:%11u\n",
189 debug_atomic_read(&nr_find_usage_backwards_recursions
));
191 seq_printf(m
, " hardirq on events: %11u\n", hi1
);
192 seq_printf(m
, " hardirq off events: %11u\n", hi2
);
193 seq_printf(m
, " redundant hardirq ons: %11u\n", hr1
);
194 seq_printf(m
, " redundant hardirq offs: %11u\n", hr2
);
195 seq_printf(m
, " softirq on events: %11u\n", si1
);
196 seq_printf(m
, " softirq off events: %11u\n", si2
);
197 seq_printf(m
, " redundant softirq ons: %11u\n", sr1
);
198 seq_printf(m
, " redundant softirq offs: %11u\n", sr2
);
202 static int lockdep_stats_show(struct seq_file
*m
, void *v
)
204 struct lock_class
*class;
205 unsigned long nr_unused
= 0, nr_uncategorized
= 0,
206 nr_irq_safe
= 0, nr_irq_unsafe
= 0,
207 nr_softirq_safe
= 0, nr_softirq_unsafe
= 0,
208 nr_hardirq_safe
= 0, nr_hardirq_unsafe
= 0,
209 nr_irq_read_safe
= 0, nr_irq_read_unsafe
= 0,
210 nr_softirq_read_safe
= 0, nr_softirq_read_unsafe
= 0,
211 nr_hardirq_read_safe
= 0, nr_hardirq_read_unsafe
= 0,
212 sum_forward_deps
= 0, factor
= 0;
214 list_for_each_entry(class, &all_lock_classes
, lock_entry
) {
216 if (class->usage_mask
== 0)
218 if (class->usage_mask
== LOCKF_USED
)
220 if (class->usage_mask
& LOCKF_USED_IN_IRQ
)
222 if (class->usage_mask
& LOCKF_ENABLED_IRQS
)
224 if (class->usage_mask
& LOCKF_USED_IN_SOFTIRQ
)
226 if (class->usage_mask
& LOCKF_ENABLED_SOFTIRQS
)
228 if (class->usage_mask
& LOCKF_USED_IN_HARDIRQ
)
230 if (class->usage_mask
& LOCKF_ENABLED_HARDIRQS
)
232 if (class->usage_mask
& LOCKF_USED_IN_IRQ_READ
)
234 if (class->usage_mask
& LOCKF_ENABLED_IRQS_READ
)
235 nr_irq_read_unsafe
++;
236 if (class->usage_mask
& LOCKF_USED_IN_SOFTIRQ_READ
)
237 nr_softirq_read_safe
++;
238 if (class->usage_mask
& LOCKF_ENABLED_SOFTIRQS_READ
)
239 nr_softirq_read_unsafe
++;
240 if (class->usage_mask
& LOCKF_USED_IN_HARDIRQ_READ
)
241 nr_hardirq_read_safe
++;
242 if (class->usage_mask
& LOCKF_ENABLED_HARDIRQS_READ
)
243 nr_hardirq_read_unsafe
++;
245 sum_forward_deps
+= count_forward_deps(class);
247 #ifdef CONFIG_LOCKDEP_DEBUG
248 DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks
) != nr_unused
);
250 seq_printf(m
, " lock-classes: %11lu [max: %lu]\n",
251 nr_lock_classes
, MAX_LOCKDEP_KEYS
);
252 seq_printf(m
, " direct dependencies: %11lu [max: %lu]\n",
253 nr_list_entries
, MAX_LOCKDEP_ENTRIES
);
254 seq_printf(m
, " indirect dependencies: %11lu\n",
258 * Total number of dependencies:
260 * All irq-safe locks may nest inside irq-unsafe locks,
261 * plus all the other known dependencies:
263 seq_printf(m
, " all direct dependencies: %11lu\n",
264 nr_irq_unsafe
* nr_irq_safe
+
265 nr_hardirq_unsafe
* nr_hardirq_safe
+
269 * Estimated factor between direct and indirect
273 factor
= sum_forward_deps
/ nr_list_entries
;
275 seq_printf(m
, " dependency chains: %11lu [max: %lu]\n",
276 nr_lock_chains
, MAX_LOCKDEP_CHAINS
);
278 #ifdef CONFIG_TRACE_IRQFLAGS
279 seq_printf(m
, " in-hardirq chains: %11u\n",
281 seq_printf(m
, " in-softirq chains: %11u\n",
284 seq_printf(m
, " in-process chains: %11u\n",
286 seq_printf(m
, " stack-trace entries: %11lu [max: %lu]\n",
287 nr_stack_trace_entries
, MAX_STACK_TRACE_ENTRIES
);
288 seq_printf(m
, " combined max dependencies: %11u\n",
289 (nr_hardirq_chains
+ 1) *
290 (nr_softirq_chains
+ 1) *
291 (nr_process_chains
+ 1)
293 seq_printf(m
, " hardirq-safe locks: %11lu\n",
295 seq_printf(m
, " hardirq-unsafe locks: %11lu\n",
297 seq_printf(m
, " softirq-safe locks: %11lu\n",
299 seq_printf(m
, " softirq-unsafe locks: %11lu\n",
301 seq_printf(m
, " irq-safe locks: %11lu\n",
303 seq_printf(m
, " irq-unsafe locks: %11lu\n",
306 seq_printf(m
, " hardirq-read-safe locks: %11lu\n",
307 nr_hardirq_read_safe
);
308 seq_printf(m
, " hardirq-read-unsafe locks: %11lu\n",
309 nr_hardirq_read_unsafe
);
310 seq_printf(m
, " softirq-read-safe locks: %11lu\n",
311 nr_softirq_read_safe
);
312 seq_printf(m
, " softirq-read-unsafe locks: %11lu\n",
313 nr_softirq_read_unsafe
);
314 seq_printf(m
, " irq-read-safe locks: %11lu\n",
316 seq_printf(m
, " irq-read-unsafe locks: %11lu\n",
319 seq_printf(m
, " uncategorized locks: %11lu\n",
321 seq_printf(m
, " unused locks: %11lu\n",
323 seq_printf(m
, " max locking depth: %11u\n",
325 seq_printf(m
, " max recursion depth: %11u\n",
326 max_recursion_depth
);
327 lockdep_stats_debug_show(m
);
328 seq_printf(m
, " debug_locks: %11u\n",
334 static int lockdep_stats_open(struct inode
*inode
, struct file
*file
)
336 return single_open(file
, lockdep_stats_show
, NULL
);
339 static const struct file_operations proc_lockdep_stats_operations
= {
340 .open
= lockdep_stats_open
,
343 .release
= seq_release
,
346 static int __init
lockdep_proc_init(void)
348 struct proc_dir_entry
*entry
;
350 entry
= create_proc_entry("lockdep", S_IRUSR
, NULL
);
352 entry
->proc_fops
= &proc_lockdep_operations
;
354 entry
= create_proc_entry("lockdep_stats", S_IRUSR
, NULL
);
356 entry
->proc_fops
= &proc_lockdep_stats_operations
;
361 __initcall(lockdep_proc_init
);