4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
7 #include <linux/module.h>
9 #include <linux/debugfs.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <trace/sched.h>
17 static struct trace_array
*ctx_trace
;
18 static int __read_mostly tracer_enabled
;
19 static atomic_t sched_ref
;
22 probe_sched_switch(struct rq
*__rq
, struct task_struct
*prev
,
23 struct task_struct
*next
)
25 struct trace_array_cpu
*data
;
30 if (!atomic_read(&sched_ref
))
33 tracing_record_cmdline(prev
);
34 tracing_record_cmdline(next
);
40 local_irq_save(flags
);
41 cpu
= raw_smp_processor_id();
42 data
= ctx_trace
->data
[cpu
];
44 if (likely(!atomic_read(&data
->disabled
)))
45 tracing_sched_switch_trace(ctx_trace
, data
, prev
, next
, flags
, pc
);
47 local_irq_restore(flags
);
51 probe_sched_wakeup(struct rq
*__rq
, struct task_struct
*wakee
)
53 struct trace_array_cpu
*data
;
57 if (!likely(tracer_enabled
))
61 tracing_record_cmdline(current
);
63 local_irq_save(flags
);
64 cpu
= raw_smp_processor_id();
65 data
= ctx_trace
->data
[cpu
];
67 if (likely(!atomic_read(&data
->disabled
)))
68 tracing_sched_wakeup_trace(ctx_trace
, data
, wakee
, current
,
71 local_irq_restore(flags
);
74 static void sched_switch_reset(struct trace_array
*tr
)
78 tr
->time_start
= ftrace_now(tr
->cpu
);
80 for_each_online_cpu(cpu
)
81 tracing_reset(tr
, cpu
);
84 static int tracing_sched_register(void)
88 ret
= register_trace_sched_wakeup(probe_sched_wakeup
);
90 pr_info("wakeup trace: Couldn't activate tracepoint"
91 " probe to kernel_sched_wakeup\n");
95 ret
= register_trace_sched_wakeup_new(probe_sched_wakeup
);
97 pr_info("wakeup trace: Couldn't activate tracepoint"
98 " probe to kernel_sched_wakeup_new\n");
102 ret
= register_trace_sched_switch(probe_sched_switch
);
104 pr_info("sched trace: Couldn't activate tracepoint"
105 " probe to kernel_sched_schedule\n");
106 goto fail_deprobe_wake_new
;
110 fail_deprobe_wake_new
:
111 unregister_trace_sched_wakeup_new(probe_sched_wakeup
);
113 unregister_trace_sched_wakeup(probe_sched_wakeup
);
117 static void tracing_sched_unregister(void)
119 unregister_trace_sched_switch(probe_sched_switch
);
120 unregister_trace_sched_wakeup_new(probe_sched_wakeup
);
121 unregister_trace_sched_wakeup(probe_sched_wakeup
);
124 static void tracing_start_sched_switch(void)
128 ref
= atomic_inc_return(&sched_ref
);
130 tracing_sched_register();
133 static void tracing_stop_sched_switch(void)
137 ref
= atomic_dec_and_test(&sched_ref
);
139 tracing_sched_unregister();
142 void tracing_start_cmdline_record(void)
144 tracing_start_sched_switch();
147 void tracing_stop_cmdline_record(void)
149 tracing_stop_sched_switch();
152 static void start_sched_trace(struct trace_array
*tr
)
154 sched_switch_reset(tr
);
155 tracing_start_cmdline_record();
159 static void stop_sched_trace(struct trace_array
*tr
)
162 tracing_stop_cmdline_record();
165 static void sched_switch_trace_init(struct trace_array
*tr
)
170 start_sched_trace(tr
);
173 static void sched_switch_trace_reset(struct trace_array
*tr
)
176 stop_sched_trace(tr
);
179 static void sched_switch_trace_ctrl_update(struct trace_array
*tr
)
181 /* When starting a new trace, reset the buffers */
183 start_sched_trace(tr
);
185 stop_sched_trace(tr
);
188 static struct tracer sched_switch_trace __read_mostly
=
190 .name
= "sched_switch",
191 .init
= sched_switch_trace_init
,
192 .reset
= sched_switch_trace_reset
,
193 .ctrl_update
= sched_switch_trace_ctrl_update
,
194 #ifdef CONFIG_FTRACE_SELFTEST
195 .selftest
= trace_selftest_startup_sched_switch
,
199 __init
static int init_sched_switch_trace(void)
203 if (atomic_read(&sched_ref
))
204 ret
= tracing_sched_register();
206 pr_info("error registering scheduler trace\n");
209 return register_tracer(&sched_switch_trace
);
211 device_initcall(init_sched_switch_trace
);