4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
7 #include <linux/module.h>
9 #include <linux/debugfs.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/marker.h>
13 #include <linux/ftrace.h>
17 static struct trace_array
*ctx_trace
;
18 static int __read_mostly tracer_enabled
;
21 ctx_switch_func(void *__rq
, struct task_struct
*prev
, struct task_struct
*next
)
23 struct trace_array
*tr
= ctx_trace
;
24 struct trace_array_cpu
*data
;
32 tracing_record_cmdline(prev
);
34 local_irq_save(flags
);
35 cpu
= raw_smp_processor_id();
37 disabled
= atomic_inc_return(&data
->disabled
);
39 if (likely(disabled
== 1)) {
40 tracing_sched_switch_trace(tr
, data
, prev
, next
, flags
);
41 if (trace_flags
& TRACE_ITER_SCHED_TREE
)
42 ftrace_all_fair_tasks(__rq
, tr
, data
);
45 atomic_dec(&data
->disabled
);
46 local_irq_restore(flags
);
50 wakeup_func(void *__rq
, struct task_struct
*wakee
, struct task_struct
*curr
)
52 struct trace_array
*tr
= ctx_trace
;
53 struct trace_array_cpu
*data
;
61 tracing_record_cmdline(curr
);
63 local_irq_save(flags
);
64 cpu
= raw_smp_processor_id();
66 disabled
= atomic_inc_return(&data
->disabled
);
68 if (likely(disabled
== 1)) {
69 tracing_sched_wakeup_trace(tr
, data
, wakee
, curr
, flags
);
70 if (trace_flags
& TRACE_ITER_SCHED_TREE
)
71 ftrace_all_fair_tasks(__rq
, tr
, data
);
74 atomic_dec(&data
->disabled
);
75 local_irq_restore(flags
);
79 ftrace_ctx_switch(void *__rq
, struct task_struct
*prev
,
80 struct task_struct
*next
)
83 * If tracer_switch_func only points to the local
84 * switch func, it still needs the ptr passed to it.
86 ctx_switch_func(__rq
, prev
, next
);
89 * Chain to the wakeup tracer (this is a NOP if disabled):
91 wakeup_sched_switch(prev
, next
);
95 ftrace_wake_up_task(void *__rq
, struct task_struct
*wakee
,
96 struct task_struct
*curr
)
98 wakeup_func(__rq
, wakee
, curr
);
101 * Chain to the wakeup tracer (this is a NOP if disabled):
103 wakeup_sched_wakeup(wakee
, curr
);
107 ftrace_special(unsigned long arg1
, unsigned long arg2
, unsigned long arg3
)
109 struct trace_array
*tr
= ctx_trace
;
110 struct trace_array_cpu
*data
;
118 local_irq_save(flags
);
119 cpu
= raw_smp_processor_id();
120 data
= tr
->data
[cpu
];
121 disabled
= atomic_inc_return(&data
->disabled
);
123 if (likely(disabled
== 1))
124 __trace_special(tr
, data
, arg1
, arg2
, arg3
);
126 atomic_dec(&data
->disabled
);
127 local_irq_restore(flags
);
130 static void sched_switch_reset(struct trace_array
*tr
)
134 tr
->time_start
= ftrace_now(tr
->cpu
);
136 for_each_online_cpu(cpu
)
137 tracing_reset(tr
->data
[cpu
]);
140 static void start_sched_trace(struct trace_array
*tr
)
142 sched_switch_reset(tr
);
146 static void stop_sched_trace(struct trace_array
*tr
)
151 static void sched_switch_trace_init(struct trace_array
*tr
)
156 start_sched_trace(tr
);
159 static void sched_switch_trace_reset(struct trace_array
*tr
)
162 stop_sched_trace(tr
);
165 static void sched_switch_trace_ctrl_update(struct trace_array
*tr
)
167 /* When starting a new trace, reset the buffers */
169 start_sched_trace(tr
);
171 stop_sched_trace(tr
);
174 static struct tracer sched_switch_trace __read_mostly
=
176 .name
= "sched_switch",
177 .init
= sched_switch_trace_init
,
178 .reset
= sched_switch_trace_reset
,
179 .ctrl_update
= sched_switch_trace_ctrl_update
,
180 #ifdef CONFIG_FTRACE_SELFTEST
181 .selftest
= trace_selftest_startup_sched_switch
,
185 __init
static int init_sched_switch_trace(void)
187 return register_tracer(&sched_switch_trace
);
189 device_initcall(init_sched_switch_trace
);