block: move holder_dir from disk to part0
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / latencytop.c
blob5e7b45c569233536ef23dadfafd90e40d20b7f62
1 /*
2 * latencytop.c: Latency display infrastructure
4 * (C) Copyright 2008 Intel Corporation
5 * Author: Arjan van de Ven <arjan@linux.intel.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
12 #include <linux/latencytop.h>
13 #include <linux/kallsyms.h>
14 #include <linux/seq_file.h>
15 #include <linux/notifier.h>
16 #include <linux/spinlock.h>
17 #include <linux/proc_fs.h>
18 #include <linux/module.h>
19 #include <linux/sched.h>
20 #include <linux/list.h>
21 #include <linux/slab.h>
22 #include <linux/stacktrace.h>
24 static DEFINE_SPINLOCK(latency_lock);
26 #define MAXLR 128
27 static struct latency_record latency_record[MAXLR];
29 int latencytop_enabled;
31 void clear_all_latency_tracing(struct task_struct *p)
33 unsigned long flags;
35 if (!latencytop_enabled)
36 return;
38 spin_lock_irqsave(&latency_lock, flags);
39 memset(&p->latency_record, 0, sizeof(p->latency_record));
40 p->latency_record_count = 0;
41 spin_unlock_irqrestore(&latency_lock, flags);
44 static void clear_global_latency_tracing(void)
46 unsigned long flags;
48 spin_lock_irqsave(&latency_lock, flags);
49 memset(&latency_record, 0, sizeof(latency_record));
50 spin_unlock_irqrestore(&latency_lock, flags);
53 static void __sched
54 account_global_scheduler_latency(struct task_struct *tsk, struct latency_record *lat)
56 int firstnonnull = MAXLR + 1;
57 int i;
59 if (!latencytop_enabled)
60 return;
62 /* skip kernel threads for now */
63 if (!tsk->mm)
64 return;
66 for (i = 0; i < MAXLR; i++) {
67 int q, same = 1;
69 /* Nothing stored: */
70 if (!latency_record[i].backtrace[0]) {
71 if (firstnonnull > i)
72 firstnonnull = i;
73 continue;
75 for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) {
76 unsigned long record = lat->backtrace[q];
78 if (latency_record[i].backtrace[q] != record) {
79 same = 0;
80 break;
83 /* 0 and ULONG_MAX entries mean end of backtrace: */
84 if (record == 0 || record == ULONG_MAX)
85 break;
87 if (same) {
88 latency_record[i].count++;
89 latency_record[i].time += lat->time;
90 if (lat->time > latency_record[i].max)
91 latency_record[i].max = lat->time;
92 return;
96 i = firstnonnull;
97 if (i >= MAXLR - 1)
98 return;
100 /* Allocted a new one: */
101 memcpy(&latency_record[i], lat, sizeof(struct latency_record));
104 static inline void store_stacktrace(struct task_struct *tsk, struct latency_record *lat)
106 struct stack_trace trace;
108 memset(&trace, 0, sizeof(trace));
109 trace.max_entries = LT_BACKTRACEDEPTH;
110 trace.entries = &lat->backtrace[0];
111 trace.skip = 0;
112 save_stack_trace_tsk(tsk, &trace);
115 void __sched
116 account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
118 unsigned long flags;
119 int i, q;
120 struct latency_record lat;
122 if (!latencytop_enabled)
123 return;
125 /* Long interruptible waits are generally user requested... */
126 if (inter && usecs > 5000)
127 return;
129 memset(&lat, 0, sizeof(lat));
130 lat.count = 1;
131 lat.time = usecs;
132 lat.max = usecs;
133 store_stacktrace(tsk, &lat);
135 spin_lock_irqsave(&latency_lock, flags);
137 account_global_scheduler_latency(tsk, &lat);
140 * short term hack; if we're > 32 we stop; future we recycle:
142 tsk->latency_record_count++;
143 if (tsk->latency_record_count >= LT_SAVECOUNT)
144 goto out_unlock;
146 for (i = 0; i < LT_SAVECOUNT ; i++) {
147 struct latency_record *mylat;
148 int same = 1;
150 mylat = &tsk->latency_record[i];
151 for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) {
152 unsigned long record = lat.backtrace[q];
154 if (mylat->backtrace[q] != record) {
155 same = 0;
156 break;
159 /* 0 and ULONG_MAX entries mean end of backtrace: */
160 if (record == 0 || record == ULONG_MAX)
161 break;
163 if (same) {
164 mylat->count++;
165 mylat->time += lat.time;
166 if (lat.time > mylat->max)
167 mylat->max = lat.time;
168 goto out_unlock;
172 /* Allocated a new one: */
173 i = tsk->latency_record_count;
174 memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
176 out_unlock:
177 spin_unlock_irqrestore(&latency_lock, flags);
180 static int lstats_show(struct seq_file *m, void *v)
182 int i;
184 seq_puts(m, "Latency Top version : v0.1\n");
186 for (i = 0; i < MAXLR; i++) {
187 if (latency_record[i].backtrace[0]) {
188 int q;
189 seq_printf(m, "%i %li %li ",
190 latency_record[i].count,
191 latency_record[i].time,
192 latency_record[i].max);
193 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
194 char sym[KSYM_NAME_LEN];
195 char *c;
196 if (!latency_record[i].backtrace[q])
197 break;
198 if (latency_record[i].backtrace[q] == ULONG_MAX)
199 break;
200 sprint_symbol(sym, latency_record[i].backtrace[q]);
201 c = strchr(sym, '+');
202 if (c)
203 *c = 0;
204 seq_printf(m, "%s ", sym);
206 seq_printf(m, "\n");
209 return 0;
212 static ssize_t
213 lstats_write(struct file *file, const char __user *buf, size_t count,
214 loff_t *offs)
216 clear_global_latency_tracing();
218 return count;
221 static int lstats_open(struct inode *inode, struct file *filp)
223 return single_open(filp, lstats_show, NULL);
226 static struct file_operations lstats_fops = {
227 .open = lstats_open,
228 .read = seq_read,
229 .write = lstats_write,
230 .llseek = seq_lseek,
231 .release = single_release,
234 static int __init init_lstats_procfs(void)
236 proc_create("latency_stats", 0644, NULL, &lstats_fops);
237 return 0;
239 __initcall(init_lstats_procfs);