4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 * Copyright (C) 2004, 2005, Soeren Sandmann
8 #include <linux/kallsyms.h>
9 #include <linux/debugfs.h>
10 #include <linux/hrtimer.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <linux/module.h>
14 #include <linux/irq.h>
19 static struct trace_array
*sysprof_trace
;
20 static int __read_mostly tracer_enabled
;
23 * 1 msec sample interval by default:
25 static unsigned long sample_period
= 1000000;
26 static const unsigned int sample_max_depth
= 512;
28 static DEFINE_MUTEX(sample_timer_lock
);
30 * Per CPU hrtimers that do the profiling:
32 static DEFINE_PER_CPU(struct hrtimer
, stack_trace_hrtimer
);
35 const void __user
*next_fp
;
36 unsigned long return_address
;
39 static int copy_stack_frame(const void __user
*fp
, struct stack_frame
*frame
)
43 if (!access_ok(VERIFY_READ
, fp
, sizeof(*frame
)))
48 if (__copy_from_user_inatomic(frame
, fp
, sizeof(*frame
)))
55 static void timer_notify(struct pt_regs
*regs
, int cpu
)
57 struct trace_array_cpu
*data
;
58 struct stack_frame frame
;
59 struct trace_array
*tr
;
60 const void __user
*fp
;
69 is_user
= user_mode(regs
);
71 if (!current
|| current
->pid
== 0)
74 if (is_user
&& current
->state
!= TASK_RUNNING
)
79 ftrace(tr
, data
, current
->pid
, 1, 0);
84 trace_special(tr
, data
, 0, current
->pid
, regs
->ip
);
86 fp
= (void __user
*)regs
->bp
;
88 for (i
= 0; i
< sample_max_depth
; i
++) {
90 frame
.return_address
= 0;
91 if (!copy_stack_frame(fp
, &frame
))
93 if ((unsigned long)fp
< regs
->sp
)
96 trace_special(tr
, data
, 1, frame
.return_address
,
101 trace_special(tr
, data
, 2, current
->pid
, i
);
104 * Special trace entry if we overflow the max depth:
106 if (i
== sample_max_depth
)
107 trace_special(tr
, data
, -1, -1, -1);
110 static enum hrtimer_restart
stack_trace_timer_fn(struct hrtimer
*hrtimer
)
113 timer_notify(get_irq_regs(), smp_processor_id());
115 hrtimer_forward_now(hrtimer
, ns_to_ktime(sample_period
));
117 return HRTIMER_RESTART
;
120 static void start_stack_timer(int cpu
)
122 struct hrtimer
*hrtimer
= &per_cpu(stack_trace_hrtimer
, cpu
);
124 hrtimer_init(hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
125 hrtimer
->function
= stack_trace_timer_fn
;
126 hrtimer
->cb_mode
= HRTIMER_CB_IRQSAFE_NO_SOFTIRQ
;
128 hrtimer_start(hrtimer
, ns_to_ktime(sample_period
), HRTIMER_MODE_REL
);
131 static void start_stack_timers(void)
133 cpumask_t saved_mask
= current
->cpus_allowed
;
136 for_each_online_cpu(cpu
) {
137 set_cpus_allowed_ptr(current
, &cpumask_of_cpu(cpu
));
138 start_stack_timer(cpu
);
140 set_cpus_allowed_ptr(current
, &saved_mask
);
143 static void stop_stack_timer(int cpu
)
145 struct hrtimer
*hrtimer
= &per_cpu(stack_trace_hrtimer
, cpu
);
147 hrtimer_cancel(hrtimer
);
150 static void stop_stack_timers(void)
154 for_each_online_cpu(cpu
)
155 stop_stack_timer(cpu
);
158 static void stack_reset(struct trace_array
*tr
)
162 tr
->time_start
= ftrace_now(tr
->cpu
);
164 for_each_online_cpu(cpu
)
165 tracing_reset(tr
->data
[cpu
]);
168 static void start_stack_trace(struct trace_array
*tr
)
170 mutex_lock(&sample_timer_lock
);
172 start_stack_timers();
174 mutex_unlock(&sample_timer_lock
);
177 static void stop_stack_trace(struct trace_array
*tr
)
179 mutex_lock(&sample_timer_lock
);
182 mutex_unlock(&sample_timer_lock
);
185 static void stack_trace_init(struct trace_array
*tr
)
190 start_stack_trace(tr
);
193 static void stack_trace_reset(struct trace_array
*tr
)
196 stop_stack_trace(tr
);
199 static void stack_trace_ctrl_update(struct trace_array
*tr
)
201 /* When starting a new trace, reset the buffers */
203 start_stack_trace(tr
);
205 stop_stack_trace(tr
);
208 static struct tracer stack_trace __read_mostly
=
211 .init
= stack_trace_init
,
212 .reset
= stack_trace_reset
,
213 .ctrl_update
= stack_trace_ctrl_update
,
214 #ifdef CONFIG_FTRACE_SELFTEST
215 .selftest
= trace_selftest_startup_sysprof
,
219 __init
static int init_stack_trace(void)
221 return register_tracer(&stack_trace
);
223 device_initcall(init_stack_trace
);
225 #define MAX_LONG_DIGITS 22
228 sysprof_sample_read(struct file
*filp
, char __user
*ubuf
,
229 size_t cnt
, loff_t
*ppos
)
231 char buf
[MAX_LONG_DIGITS
];
234 r
= sprintf(buf
, "%ld\n", nsecs_to_usecs(sample_period
));
236 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
240 sysprof_sample_write(struct file
*filp
, const char __user
*ubuf
,
241 size_t cnt
, loff_t
*ppos
)
243 char buf
[MAX_LONG_DIGITS
];
246 if (cnt
> MAX_LONG_DIGITS
-1)
247 cnt
= MAX_LONG_DIGITS
-1;
249 if (copy_from_user(&buf
, ubuf
, cnt
))
254 val
= simple_strtoul(buf
, NULL
, 10);
256 * Enforce a minimum sample period of 100 usecs:
261 mutex_lock(&sample_timer_lock
);
263 sample_period
= val
* 1000;
264 start_stack_timers();
265 mutex_unlock(&sample_timer_lock
);
270 static struct file_operations sysprof_sample_fops
= {
271 .read
= sysprof_sample_read
,
272 .write
= sysprof_sample_write
,
275 void init_tracer_sysprof_debugfs(struct dentry
*d_tracer
)
277 struct dentry
*entry
;
279 entry
= debugfs_create_file("sysprof_sample_period", 0644,
280 d_tracer
, NULL
, &sysprof_sample_fops
);
283 pr_warning("Could not create debugfs 'dyn_ftrace_total_info' entry\n");