4 * Copyright (C) 2004-2008, Soeren Sandmann
5 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 #include <linux/kallsyms.h>
9 #include <linux/debugfs.h>
10 #include <linux/hrtimer.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <linux/module.h>
14 #include <linux/irq.h>
17 #include <asm/stacktrace.h>
21 static struct trace_array
*sysprof_trace
;
22 static int __read_mostly tracer_enabled
;
25 * 1 msec sample interval by default:
27 static unsigned long sample_period
= 1000000;
28 static const unsigned int sample_max_depth
= 512;
30 static DEFINE_MUTEX(sample_timer_lock
);
32 * Per CPU hrtimers that do the profiling:
34 static DEFINE_PER_CPU(struct hrtimer
, stack_trace_hrtimer
);
37 const void __user
*next_fp
;
38 unsigned long return_address
;
41 static int copy_stack_frame(const void __user
*fp
, struct stack_frame
*frame
)
45 if (!access_ok(VERIFY_READ
, fp
, sizeof(*frame
)))
50 if (__copy_from_user_inatomic(frame
, fp
, sizeof(*frame
)))
57 struct backtrace_info
{
58 struct trace_array_cpu
*data
;
59 struct trace_array
*tr
;
64 backtrace_warning_symbol(void *data
, char *msg
, unsigned long symbol
)
69 static void backtrace_warning(void *data
, char *msg
)
74 static int backtrace_stack(void *data
, char *name
)
76 /* Don't bother with IRQ stacks for now */
80 static void backtrace_address(void *data
, unsigned long addr
, int reliable
)
82 struct backtrace_info
*info
= data
;
84 if (info
->pos
< sample_max_depth
&& reliable
) {
85 __trace_special(info
->tr
, info
->data
, 1, addr
, 0);
91 const static struct stacktrace_ops backtrace_ops
= {
92 .warning
= backtrace_warning
,
93 .warning_symbol
= backtrace_warning_symbol
,
94 .stack
= backtrace_stack
,
95 .address
= backtrace_address
,
99 trace_kernel(struct pt_regs
*regs
, struct trace_array
*tr
,
100 struct trace_array_cpu
*data
)
102 struct backtrace_info info
;
110 __trace_special(info
.tr
, info
.data
, 1, regs
->ip
, 0);
112 stack
= ((char *)regs
+ sizeof(struct pt_regs
));
113 #ifdef CONFIG_FRAME_POINTER
119 dump_trace(NULL
, regs
, (void *)stack
, bp
, &backtrace_ops
, &info
);
124 static void timer_notify(struct pt_regs
*regs
, int cpu
)
126 struct trace_array_cpu
*data
;
127 struct stack_frame frame
;
128 struct trace_array
*tr
;
129 const void __user
*fp
;
137 data
= tr
->data
[cpu
];
138 is_user
= user_mode(regs
);
140 if (!current
|| current
->pid
== 0)
143 if (is_user
&& current
->state
!= TASK_RUNNING
)
146 __trace_special(tr
, data
, 0, 0, current
->pid
);
149 i
= trace_kernel(regs
, tr
, data
);
154 * Trace user stack if we are not a kernel thread
156 if (current
->mm
&& i
< sample_max_depth
) {
157 regs
= (struct pt_regs
*)current
->thread
.sp0
- 1;
159 fp
= (void __user
*)regs
->bp
;
161 __trace_special(tr
, data
, 2, regs
->ip
, 0);
163 while (i
< sample_max_depth
) {
164 frame
.next_fp
= NULL
;
165 frame
.return_address
= 0;
166 if (!copy_stack_frame(fp
, &frame
))
168 if ((unsigned long)fp
< regs
->sp
)
171 __trace_special(tr
, data
, 2, frame
.return_address
,
181 * Special trace entry if we overflow the max depth:
183 if (i
== sample_max_depth
)
184 __trace_special(tr
, data
, -1, -1, -1);
186 __trace_special(tr
, data
, 3, current
->pid
, i
);
189 static enum hrtimer_restart
stack_trace_timer_fn(struct hrtimer
*hrtimer
)
192 timer_notify(get_irq_regs(), smp_processor_id());
194 hrtimer_forward_now(hrtimer
, ns_to_ktime(sample_period
));
196 return HRTIMER_RESTART
;
199 static void start_stack_timer(void *unused
)
201 struct hrtimer
*hrtimer
= &__get_cpu_var(stack_trace_hrtimer
);
203 hrtimer_init(hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
204 hrtimer
->function
= stack_trace_timer_fn
;
206 hrtimer_start(hrtimer
, ns_to_ktime(sample_period
), HRTIMER_MODE_REL
);
209 static void start_stack_timers(void)
211 on_each_cpu(start_stack_timer
, NULL
, 1);
214 static void stop_stack_timer(int cpu
)
216 struct hrtimer
*hrtimer
= &per_cpu(stack_trace_hrtimer
, cpu
);
218 hrtimer_cancel(hrtimer
);
221 static void stop_stack_timers(void)
225 for_each_online_cpu(cpu
)
226 stop_stack_timer(cpu
);
229 static void start_stack_trace(struct trace_array
*tr
)
231 mutex_lock(&sample_timer_lock
);
232 tracing_reset_online_cpus(tr
);
233 start_stack_timers();
235 mutex_unlock(&sample_timer_lock
);
238 static void stop_stack_trace(struct trace_array
*tr
)
240 mutex_lock(&sample_timer_lock
);
243 mutex_unlock(&sample_timer_lock
);
246 static int stack_trace_init(struct trace_array
*tr
)
250 start_stack_trace(tr
);
254 static void stack_trace_reset(struct trace_array
*tr
)
256 stop_stack_trace(tr
);
259 static struct tracer stack_trace __read_mostly
=
262 .init
= stack_trace_init
,
263 .reset
= stack_trace_reset
,
264 #ifdef CONFIG_FTRACE_SELFTEST
265 .selftest
= trace_selftest_startup_sysprof
,
269 __init
static int init_stack_trace(void)
271 return register_tracer(&stack_trace
);
273 device_initcall(init_stack_trace
);
275 #define MAX_LONG_DIGITS 22
278 sysprof_sample_read(struct file
*filp
, char __user
*ubuf
,
279 size_t cnt
, loff_t
*ppos
)
281 char buf
[MAX_LONG_DIGITS
];
284 r
= sprintf(buf
, "%ld\n", nsecs_to_usecs(sample_period
));
286 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
290 sysprof_sample_write(struct file
*filp
, const char __user
*ubuf
,
291 size_t cnt
, loff_t
*ppos
)
293 char buf
[MAX_LONG_DIGITS
];
296 if (cnt
> MAX_LONG_DIGITS
-1)
297 cnt
= MAX_LONG_DIGITS
-1;
299 if (copy_from_user(&buf
, ubuf
, cnt
))
304 val
= simple_strtoul(buf
, NULL
, 10);
306 * Enforce a minimum sample period of 100 usecs:
311 mutex_lock(&sample_timer_lock
);
313 sample_period
= val
* 1000;
314 start_stack_timers();
315 mutex_unlock(&sample_timer_lock
);
320 static struct file_operations sysprof_sample_fops
= {
321 .read
= sysprof_sample_read
,
322 .write
= sysprof_sample_write
,
325 void init_tracer_sysprof_debugfs(struct dentry
*d_tracer
)
327 struct dentry
*entry
;
329 entry
= debugfs_create_file("sysprof_sample_period", 0644,
330 d_tracer
, NULL
, &sysprof_sample_fops
);
333 pr_warning("Could not create debugfs 'dyn_ftrace_total_info' entry\n");