2 * kernel/lockdep_proc.c
4 * Runtime locking correctness validator
6 * Started by Ingo Molnar:
8 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Code for /proc/lockdep and /proc/lockdep_stats:
13 #include <linux/module.h>
14 #include <linux/proc_fs.h>
15 #include <linux/seq_file.h>
16 #include <linux/kallsyms.h>
17 #include <linux/debug_locks.h>
19 #include "lockdep_internals.h"
21 static void *l_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
23 struct lock_class
*class = v
;
27 if (class->lock_entry
.next
!= &all_lock_classes
)
28 class = list_entry(class->lock_entry
.next
, struct lock_class
,
37 static void *l_start(struct seq_file
*m
, loff_t
*pos
)
39 struct lock_class
*class = m
->private;
41 if (&class->lock_entry
== all_lock_classes
.next
)
42 seq_printf(m
, "all lock classes:\n");
47 static void l_stop(struct seq_file
*m
, void *v
)
51 static unsigned long count_forward_deps(struct lock_class
*class)
53 struct lock_list
*entry
;
54 unsigned long ret
= 1;
57 * Recurse this class's dependency list:
59 list_for_each_entry(entry
, &class->locks_after
, entry
)
60 ret
+= count_forward_deps(entry
->class);
65 static unsigned long count_backward_deps(struct lock_class
*class)
67 struct lock_list
*entry
;
68 unsigned long ret
= 1;
71 * Recurse this class's dependency list:
73 list_for_each_entry(entry
, &class->locks_before
, entry
)
74 ret
+= count_backward_deps(entry
->class);
79 static void print_name(struct seq_file
*m
, struct lock_class
*class)
82 const char *name
= class->name
;
85 name
= __get_key_name(class->key
, str
);
86 seq_printf(m
, "%s", name
);
88 seq_printf(m
, "%s", name
);
89 if (class->name_version
> 1)
90 seq_printf(m
, "#%d", class->name_version
);
92 seq_printf(m
, "/%d", class->subclass
);
96 static int l_show(struct seq_file
*m
, void *v
)
98 unsigned long nr_forward_deps
, nr_backward_deps
;
99 struct lock_class
*class = m
->private;
100 struct lock_list
*entry
;
103 seq_printf(m
, "%p", class->key
);
104 #ifdef CONFIG_DEBUG_LOCKDEP
105 seq_printf(m
, " OPS:%8ld", class->ops
);
107 nr_forward_deps
= count_forward_deps(class);
108 seq_printf(m
, " FD:%5ld", nr_forward_deps
);
110 nr_backward_deps
= count_backward_deps(class);
111 seq_printf(m
, " BD:%5ld", nr_backward_deps
);
113 get_usage_chars(class, &c1
, &c2
, &c3
, &c4
);
114 seq_printf(m
, " %c%c%c%c", c1
, c2
, c3
, c4
);
117 print_name(m
, class);
120 list_for_each_entry(entry
, &class->locks_after
, entry
) {
121 if (entry
->distance
== 1) {
122 seq_printf(m
, " -> [%p] ", entry
->class);
123 print_name(m
, entry
->class);
132 static const struct seq_operations lockdep_ops
= {
139 static int lockdep_open(struct inode
*inode
, struct file
*file
)
141 int res
= seq_open(file
, &lockdep_ops
);
143 struct seq_file
*m
= file
->private_data
;
145 if (!list_empty(&all_lock_classes
))
146 m
->private = list_entry(all_lock_classes
.next
,
147 struct lock_class
, lock_entry
);
154 static const struct file_operations proc_lockdep_operations
= {
155 .open
= lockdep_open
,
158 .release
= seq_release
,
161 static void lockdep_stats_debug_show(struct seq_file
*m
)
163 #ifdef CONFIG_DEBUG_LOCKDEP
164 unsigned int hi1
= debug_atomic_read(&hardirqs_on_events
),
165 hi2
= debug_atomic_read(&hardirqs_off_events
),
166 hr1
= debug_atomic_read(&redundant_hardirqs_on
),
167 hr2
= debug_atomic_read(&redundant_hardirqs_off
),
168 si1
= debug_atomic_read(&softirqs_on_events
),
169 si2
= debug_atomic_read(&softirqs_off_events
),
170 sr1
= debug_atomic_read(&redundant_softirqs_on
),
171 sr2
= debug_atomic_read(&redundant_softirqs_off
);
173 seq_printf(m
, " chain lookup misses: %11u\n",
174 debug_atomic_read(&chain_lookup_misses
));
175 seq_printf(m
, " chain lookup hits: %11u\n",
176 debug_atomic_read(&chain_lookup_hits
));
177 seq_printf(m
, " cyclic checks: %11u\n",
178 debug_atomic_read(&nr_cyclic_checks
));
179 seq_printf(m
, " cyclic-check recursions: %11u\n",
180 debug_atomic_read(&nr_cyclic_check_recursions
));
181 seq_printf(m
, " find-mask forwards checks: %11u\n",
182 debug_atomic_read(&nr_find_usage_forwards_checks
));
183 seq_printf(m
, " find-mask forwards recursions: %11u\n",
184 debug_atomic_read(&nr_find_usage_forwards_recursions
));
185 seq_printf(m
, " find-mask backwards checks: %11u\n",
186 debug_atomic_read(&nr_find_usage_backwards_checks
));
187 seq_printf(m
, " find-mask backwards recursions:%11u\n",
188 debug_atomic_read(&nr_find_usage_backwards_recursions
));
190 seq_printf(m
, " hardirq on events: %11u\n", hi1
);
191 seq_printf(m
, " hardirq off events: %11u\n", hi2
);
192 seq_printf(m
, " redundant hardirq ons: %11u\n", hr1
);
193 seq_printf(m
, " redundant hardirq offs: %11u\n", hr2
);
194 seq_printf(m
, " softirq on events: %11u\n", si1
);
195 seq_printf(m
, " softirq off events: %11u\n", si2
);
196 seq_printf(m
, " redundant softirq ons: %11u\n", sr1
);
197 seq_printf(m
, " redundant softirq offs: %11u\n", sr2
);
201 static int lockdep_stats_show(struct seq_file
*m
, void *v
)
203 struct lock_class
*class;
204 unsigned long nr_unused
= 0, nr_uncategorized
= 0,
205 nr_irq_safe
= 0, nr_irq_unsafe
= 0,
206 nr_softirq_safe
= 0, nr_softirq_unsafe
= 0,
207 nr_hardirq_safe
= 0, nr_hardirq_unsafe
= 0,
208 nr_irq_read_safe
= 0, nr_irq_read_unsafe
= 0,
209 nr_softirq_read_safe
= 0, nr_softirq_read_unsafe
= 0,
210 nr_hardirq_read_safe
= 0, nr_hardirq_read_unsafe
= 0,
211 sum_forward_deps
= 0, factor
= 0;
213 list_for_each_entry(class, &all_lock_classes
, lock_entry
) {
215 if (class->usage_mask
== 0)
217 if (class->usage_mask
== LOCKF_USED
)
219 if (class->usage_mask
& LOCKF_USED_IN_IRQ
)
221 if (class->usage_mask
& LOCKF_ENABLED_IRQS
)
223 if (class->usage_mask
& LOCKF_USED_IN_SOFTIRQ
)
225 if (class->usage_mask
& LOCKF_ENABLED_SOFTIRQS
)
227 if (class->usage_mask
& LOCKF_USED_IN_HARDIRQ
)
229 if (class->usage_mask
& LOCKF_ENABLED_HARDIRQS
)
231 if (class->usage_mask
& LOCKF_USED_IN_IRQ_READ
)
233 if (class->usage_mask
& LOCKF_ENABLED_IRQS_READ
)
234 nr_irq_read_unsafe
++;
235 if (class->usage_mask
& LOCKF_USED_IN_SOFTIRQ_READ
)
236 nr_softirq_read_safe
++;
237 if (class->usage_mask
& LOCKF_ENABLED_SOFTIRQS_READ
)
238 nr_softirq_read_unsafe
++;
239 if (class->usage_mask
& LOCKF_USED_IN_HARDIRQ_READ
)
240 nr_hardirq_read_safe
++;
241 if (class->usage_mask
& LOCKF_ENABLED_HARDIRQS_READ
)
242 nr_hardirq_read_unsafe
++;
244 sum_forward_deps
+= count_forward_deps(class);
246 #ifdef CONFIG_DEBUG_LOCKDEP
247 DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks
) != nr_unused
);
249 seq_printf(m
, " lock-classes: %11lu [max: %lu]\n",
250 nr_lock_classes
, MAX_LOCKDEP_KEYS
);
251 seq_printf(m
, " direct dependencies: %11lu [max: %lu]\n",
252 nr_list_entries
, MAX_LOCKDEP_ENTRIES
);
253 seq_printf(m
, " indirect dependencies: %11lu\n",
257 * Total number of dependencies:
259 * All irq-safe locks may nest inside irq-unsafe locks,
260 * plus all the other known dependencies:
262 seq_printf(m
, " all direct dependencies: %11lu\n",
263 nr_irq_unsafe
* nr_irq_safe
+
264 nr_hardirq_unsafe
* nr_hardirq_safe
+
268 * Estimated factor between direct and indirect
272 factor
= sum_forward_deps
/ nr_list_entries
;
274 seq_printf(m
, " dependency chains: %11lu [max: %lu]\n",
275 nr_lock_chains
, MAX_LOCKDEP_CHAINS
);
277 #ifdef CONFIG_TRACE_IRQFLAGS
278 seq_printf(m
, " in-hardirq chains: %11u\n",
280 seq_printf(m
, " in-softirq chains: %11u\n",
283 seq_printf(m
, " in-process chains: %11u\n",
285 seq_printf(m
, " stack-trace entries: %11lu [max: %lu]\n",
286 nr_stack_trace_entries
, MAX_STACK_TRACE_ENTRIES
);
287 seq_printf(m
, " combined max dependencies: %11u\n",
288 (nr_hardirq_chains
+ 1) *
289 (nr_softirq_chains
+ 1) *
290 (nr_process_chains
+ 1)
292 seq_printf(m
, " hardirq-safe locks: %11lu\n",
294 seq_printf(m
, " hardirq-unsafe locks: %11lu\n",
296 seq_printf(m
, " softirq-safe locks: %11lu\n",
298 seq_printf(m
, " softirq-unsafe locks: %11lu\n",
300 seq_printf(m
, " irq-safe locks: %11lu\n",
302 seq_printf(m
, " irq-unsafe locks: %11lu\n",
305 seq_printf(m
, " hardirq-read-safe locks: %11lu\n",
306 nr_hardirq_read_safe
);
307 seq_printf(m
, " hardirq-read-unsafe locks: %11lu\n",
308 nr_hardirq_read_unsafe
);
309 seq_printf(m
, " softirq-read-safe locks: %11lu\n",
310 nr_softirq_read_safe
);
311 seq_printf(m
, " softirq-read-unsafe locks: %11lu\n",
312 nr_softirq_read_unsafe
);
313 seq_printf(m
, " irq-read-safe locks: %11lu\n",
315 seq_printf(m
, " irq-read-unsafe locks: %11lu\n",
318 seq_printf(m
, " uncategorized locks: %11lu\n",
320 seq_printf(m
, " unused locks: %11lu\n",
322 seq_printf(m
, " max locking depth: %11u\n",
324 seq_printf(m
, " max recursion depth: %11u\n",
325 max_recursion_depth
);
326 lockdep_stats_debug_show(m
);
327 seq_printf(m
, " debug_locks: %11u\n",
333 static int lockdep_stats_open(struct inode
*inode
, struct file
*file
)
335 return single_open(file
, lockdep_stats_show
, NULL
);
338 static const struct file_operations proc_lockdep_stats_operations
= {
339 .open
= lockdep_stats_open
,
342 .release
= single_release
,
345 static int __init
lockdep_proc_init(void)
347 struct proc_dir_entry
*entry
;
349 entry
= create_proc_entry("lockdep", S_IRUSR
, NULL
);
351 entry
->proc_fops
= &proc_lockdep_operations
;
353 entry
= create_proc_entry("lockdep_stats", S_IRUSR
, NULL
);
355 entry
->proc_fops
= &proc_lockdep_stats_operations
;
360 __initcall(lockdep_proc_init
);