afs: remove cache.h
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / trace / trace_sched_switch.c
blob5fca0f51fde4ac27df4c733c6ebd827c34d661b2
1 /*
2 * trace context switch
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
6 */
7 #include <linux/module.h>
8 #include <linux/fs.h>
9 #include <linux/debugfs.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <trace/events/sched.h>
15 #include "trace.h"
17 static struct trace_array *ctx_trace;
18 static int __read_mostly tracer_enabled;
19 static int sched_ref;
20 static DEFINE_MUTEX(sched_register_mutex);
21 static int sched_stopped;
24 void
25 tracing_sched_switch_trace(struct trace_array *tr,
26 struct task_struct *prev,
27 struct task_struct *next,
28 unsigned long flags, int pc)
30 struct ftrace_event_call *call = &event_context_switch;
31 struct ring_buffer *buffer = tr->buffer;
32 struct ring_buffer_event *event;
33 struct ctx_switch_entry *entry;
35 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
36 sizeof(*entry), flags, pc);
37 if (!event)
38 return;
39 entry = ring_buffer_event_data(event);
40 entry->prev_pid = prev->pid;
41 entry->prev_prio = prev->prio;
42 entry->prev_state = prev->state;
43 entry->next_pid = next->pid;
44 entry->next_prio = next->prio;
45 entry->next_state = next->state;
46 entry->next_cpu = task_cpu(next);
48 if (!filter_check_discard(call, entry, buffer, event))
49 trace_buffer_unlock_commit(buffer, event, flags, pc);
52 static void
53 probe_sched_switch(struct rq *__rq, struct task_struct *prev,
54 struct task_struct *next)
56 struct trace_array_cpu *data;
57 unsigned long flags;
58 int cpu;
59 int pc;
61 if (unlikely(!sched_ref))
62 return;
64 tracing_record_cmdline(prev);
65 tracing_record_cmdline(next);
67 if (!tracer_enabled || sched_stopped)
68 return;
70 pc = preempt_count();
71 local_irq_save(flags);
72 cpu = raw_smp_processor_id();
73 data = ctx_trace->data[cpu];
75 if (likely(!atomic_read(&data->disabled)))
76 tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
78 local_irq_restore(flags);
81 void
82 tracing_sched_wakeup_trace(struct trace_array *tr,
83 struct task_struct *wakee,
84 struct task_struct *curr,
85 unsigned long flags, int pc)
87 struct ftrace_event_call *call = &event_wakeup;
88 struct ring_buffer_event *event;
89 struct ctx_switch_entry *entry;
90 struct ring_buffer *buffer = tr->buffer;
92 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
93 sizeof(*entry), flags, pc);
94 if (!event)
95 return;
96 entry = ring_buffer_event_data(event);
97 entry->prev_pid = curr->pid;
98 entry->prev_prio = curr->prio;
99 entry->prev_state = curr->state;
100 entry->next_pid = wakee->pid;
101 entry->next_prio = wakee->prio;
102 entry->next_state = wakee->state;
103 entry->next_cpu = task_cpu(wakee);
105 if (!filter_check_discard(call, entry, buffer, event))
106 ring_buffer_unlock_commit(buffer, event);
107 ftrace_trace_stack(tr->buffer, flags, 6, pc);
108 ftrace_trace_userstack(tr->buffer, flags, pc);
111 static void
112 probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
114 struct trace_array_cpu *data;
115 unsigned long flags;
116 int cpu, pc;
118 if (unlikely(!sched_ref))
119 return;
121 tracing_record_cmdline(current);
123 if (!tracer_enabled || sched_stopped)
124 return;
126 pc = preempt_count();
127 local_irq_save(flags);
128 cpu = raw_smp_processor_id();
129 data = ctx_trace->data[cpu];
131 if (likely(!atomic_read(&data->disabled)))
132 tracing_sched_wakeup_trace(ctx_trace, wakee, current,
133 flags, pc);
135 local_irq_restore(flags);
138 static int tracing_sched_register(void)
140 int ret;
142 ret = register_trace_sched_wakeup(probe_sched_wakeup);
143 if (ret) {
144 pr_info("wakeup trace: Couldn't activate tracepoint"
145 " probe to kernel_sched_wakeup\n");
146 return ret;
149 ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
150 if (ret) {
151 pr_info("wakeup trace: Couldn't activate tracepoint"
152 " probe to kernel_sched_wakeup_new\n");
153 goto fail_deprobe;
156 ret = register_trace_sched_switch(probe_sched_switch);
157 if (ret) {
158 pr_info("sched trace: Couldn't activate tracepoint"
159 " probe to kernel_sched_switch\n");
160 goto fail_deprobe_wake_new;
163 return ret;
164 fail_deprobe_wake_new:
165 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
166 fail_deprobe:
167 unregister_trace_sched_wakeup(probe_sched_wakeup);
168 return ret;
171 static void tracing_sched_unregister(void)
173 unregister_trace_sched_switch(probe_sched_switch);
174 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
175 unregister_trace_sched_wakeup(probe_sched_wakeup);
178 static void tracing_start_sched_switch(void)
180 mutex_lock(&sched_register_mutex);
181 if (!(sched_ref++))
182 tracing_sched_register();
183 mutex_unlock(&sched_register_mutex);
186 static void tracing_stop_sched_switch(void)
188 mutex_lock(&sched_register_mutex);
189 if (!(--sched_ref))
190 tracing_sched_unregister();
191 mutex_unlock(&sched_register_mutex);
194 void tracing_start_cmdline_record(void)
196 tracing_start_sched_switch();
199 void tracing_stop_cmdline_record(void)
201 tracing_stop_sched_switch();
205 * tracing_start_sched_switch_record - start tracing context switches
207 * Turns on context switch tracing for a tracer.
209 void tracing_start_sched_switch_record(void)
211 if (unlikely(!ctx_trace)) {
212 WARN_ON(1);
213 return;
216 tracing_start_sched_switch();
218 mutex_lock(&sched_register_mutex);
219 tracer_enabled++;
220 mutex_unlock(&sched_register_mutex);
224 * tracing_stop_sched_switch_record - start tracing context switches
226 * Turns off context switch tracing for a tracer.
228 void tracing_stop_sched_switch_record(void)
230 mutex_lock(&sched_register_mutex);
231 tracer_enabled--;
232 WARN_ON(tracer_enabled < 0);
233 mutex_unlock(&sched_register_mutex);
235 tracing_stop_sched_switch();
239 * tracing_sched_switch_assign_trace - assign a trace array for ctx switch
240 * @tr: trace array pointer to assign
242 * Some tracers might want to record the context switches in their
243 * trace. This function lets those tracers assign the trace array
244 * to use.
246 void tracing_sched_switch_assign_trace(struct trace_array *tr)
248 ctx_trace = tr;
251 static void stop_sched_trace(struct trace_array *tr)
253 tracing_stop_sched_switch_record();
256 static int sched_switch_trace_init(struct trace_array *tr)
258 ctx_trace = tr;
259 tracing_reset_online_cpus(tr);
260 tracing_start_sched_switch_record();
261 return 0;
264 static void sched_switch_trace_reset(struct trace_array *tr)
266 if (sched_ref)
267 stop_sched_trace(tr);
270 static void sched_switch_trace_start(struct trace_array *tr)
272 sched_stopped = 0;
275 static void sched_switch_trace_stop(struct trace_array *tr)
277 sched_stopped = 1;
280 static struct tracer sched_switch_trace __read_mostly =
282 .name = "sched_switch",
283 .init = sched_switch_trace_init,
284 .reset = sched_switch_trace_reset,
285 .start = sched_switch_trace_start,
286 .stop = sched_switch_trace_stop,
287 .wait_pipe = poll_wait_pipe,
288 #ifdef CONFIG_FTRACE_SELFTEST
289 .selftest = trace_selftest_startup_sched_switch,
290 #endif
293 __init static int init_sched_switch_trace(void)
295 return register_tracer(&sched_switch_trace);
297 device_initcall(init_sched_switch_trace);