ftrace: printk and trace irqsoff and wakeups
[linux-2.6/mini2440.git] / kernel / trace / trace_sched_wakeup.c
blob5948011006bca9f4f05d97dcfc4af79de89f778a
1 /*
2 * trace task wakeup timings
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
12 #include <linux/module.h>
13 #include <linux/fs.h>
14 #include <linux/debugfs.h>
15 #include <linux/kallsyms.h>
16 #include <linux/uaccess.h>
17 #include <linux/ftrace.h>
19 #include "trace.h"
21 static struct trace_array *wakeup_trace;
22 static int __read_mostly tracer_enabled;
24 static struct task_struct *wakeup_task;
25 static int wakeup_cpu;
26 static unsigned wakeup_prio = -1;
28 static DEFINE_SPINLOCK(wakeup_lock);
30 static void __wakeup_reset(struct trace_array *tr);
33 * Should this new latency be reported/recorded?
35 static int report_latency(cycle_t delta)
37 if (tracing_thresh) {
38 if (delta < tracing_thresh)
39 return 0;
40 } else {
41 if (delta <= tracing_max_latency)
42 return 0;
44 return 1;
47 void
48 wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
50 unsigned long latency = 0, t0 = 0, t1 = 0;
51 struct trace_array *tr = wakeup_trace;
52 struct trace_array_cpu *data;
53 cycle_t T0, T1, delta;
54 unsigned long flags;
55 long disabled;
56 int cpu;
58 if (unlikely(!tracer_enabled))
59 return;
62 * When we start a new trace, we set wakeup_task to NULL
63 * and then set tracer_enabled = 1. We want to make sure
64 * that another CPU does not see the tracer_enabled = 1
65 * and the wakeup_task with an older task, that might
66 * actually be the same as next.
68 smp_rmb();
70 if (next != wakeup_task)
71 return;
73 /* The task we are waitng for is waking up */
74 data = tr->data[wakeup_cpu];
76 /* disable local data, not wakeup_cpu data */
77 cpu = raw_smp_processor_id();
78 disabled = atomic_inc_return(&tr->data[cpu]->disabled);
79 if (likely(disabled != 1))
80 goto out;
82 spin_lock_irqsave(&wakeup_lock, flags);
84 /* We could race with grabbing wakeup_lock */
85 if (unlikely(!tracer_enabled || next != wakeup_task))
86 goto out_unlock;
88 trace_function(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags);
91 * usecs conversion is slow so we try to delay the conversion
92 * as long as possible:
94 T0 = data->preempt_timestamp;
95 T1 = ftrace_now(cpu);
96 delta = T1-T0;
98 if (!report_latency(delta))
99 goto out_unlock;
101 latency = nsecs_to_usecs(delta);
103 tracing_max_latency = delta;
104 t0 = nsecs_to_usecs(T0);
105 t1 = nsecs_to_usecs(T1);
107 update_max_tr(tr, wakeup_task, wakeup_cpu);
109 out_unlock:
110 __wakeup_reset(tr);
111 spin_unlock_irqrestore(&wakeup_lock, flags);
112 out:
113 atomic_dec(&tr->data[cpu]->disabled);
116 static void __wakeup_reset(struct trace_array *tr)
118 struct trace_array_cpu *data;
119 int cpu;
121 assert_spin_locked(&wakeup_lock);
123 for_each_possible_cpu(cpu) {
124 data = tr->data[cpu];
125 tracing_reset(data);
128 wakeup_cpu = -1;
129 wakeup_prio = -1;
131 if (wakeup_task)
132 put_task_struct(wakeup_task);
134 wakeup_task = NULL;
137 static void wakeup_reset(struct trace_array *tr)
139 unsigned long flags;
141 spin_lock_irqsave(&wakeup_lock, flags);
142 __wakeup_reset(tr);
143 spin_unlock_irqrestore(&wakeup_lock, flags);
146 static void
147 wakeup_check_start(struct trace_array *tr, struct task_struct *p,
148 struct task_struct *curr)
150 int cpu = smp_processor_id();
151 unsigned long flags;
152 long disabled;
154 if (likely(!rt_task(p)) ||
155 p->prio >= wakeup_prio ||
156 p->prio >= curr->prio)
157 return;
159 disabled = atomic_inc_return(&tr->data[cpu]->disabled);
160 if (unlikely(disabled != 1))
161 goto out;
163 /* interrupts should be off from try_to_wake_up */
164 spin_lock(&wakeup_lock);
166 /* check for races. */
167 if (!tracer_enabled || p->prio >= wakeup_prio)
168 goto out_locked;
170 /* reset the trace */
171 __wakeup_reset(tr);
173 wakeup_cpu = task_cpu(p);
174 wakeup_prio = p->prio;
176 wakeup_task = p;
177 get_task_struct(wakeup_task);
179 local_save_flags(flags);
181 tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
182 trace_function(tr, tr->data[wakeup_cpu],
183 CALLER_ADDR1, CALLER_ADDR2, flags);
185 out_locked:
186 spin_unlock(&wakeup_lock);
187 out:
188 atomic_dec(&tr->data[cpu]->disabled);
191 void wakeup_sched_wakeup(struct task_struct *wakee, struct task_struct *curr)
193 if (likely(!tracer_enabled))
194 return;
196 tracing_record_cmdline(curr);
197 tracing_record_cmdline(wakee);
199 wakeup_check_start(wakeup_trace, wakee, curr);
202 static void start_wakeup_tracer(struct trace_array *tr)
204 wakeup_reset(tr);
207 * Don't let the tracer_enabled = 1 show up before
208 * the wakeup_task is reset. This may be overkill since
209 * wakeup_reset does a spin_unlock after setting the
210 * wakeup_task to NULL, but I want to be safe.
211 * This is a slow path anyway.
213 smp_wmb();
215 tracer_enabled = 1;
217 return;
220 static void stop_wakeup_tracer(struct trace_array *tr)
222 tracer_enabled = 0;
225 static void wakeup_tracer_init(struct trace_array *tr)
227 wakeup_trace = tr;
229 if (tr->ctrl)
230 start_wakeup_tracer(tr);
233 static void wakeup_tracer_reset(struct trace_array *tr)
235 if (tr->ctrl) {
236 stop_wakeup_tracer(tr);
237 /* make sure we put back any tasks we are tracing */
238 wakeup_reset(tr);
242 static void wakeup_tracer_ctrl_update(struct trace_array *tr)
244 if (tr->ctrl)
245 start_wakeup_tracer(tr);
246 else
247 stop_wakeup_tracer(tr);
250 static void wakeup_tracer_open(struct trace_iterator *iter)
252 /* stop the trace while dumping */
253 if (iter->tr->ctrl)
254 stop_wakeup_tracer(iter->tr);
257 static void wakeup_tracer_close(struct trace_iterator *iter)
259 /* forget about any processes we were recording */
260 if (iter->tr->ctrl)
261 start_wakeup_tracer(iter->tr);
264 static struct tracer wakeup_tracer __read_mostly =
266 .name = "wakeup",
267 .init = wakeup_tracer_init,
268 .reset = wakeup_tracer_reset,
269 .open = wakeup_tracer_open,
270 .close = wakeup_tracer_close,
271 .ctrl_update = wakeup_tracer_ctrl_update,
272 .print_max = 1,
273 #ifdef CONFIG_FTRACE_SELFTEST
274 .selftest = trace_selftest_startup_wakeup,
275 #endif
278 __init static int init_wakeup_tracer(void)
280 int ret;
282 ret = register_tracer(&wakeup_tracer);
283 if (ret)
284 return ret;
286 return 0;
288 device_initcall(init_wakeup_tracer);