4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
8 * @author Barry Kasindorf <barry.kasindorf@amd.com>
10 * Each CPU has a local buffer that stores PC value/event
11 * pairs. We also log context switches when we notice them.
12 * Eventually each CPU's buffer is processed into the global
13 * event buffer by sync_buffer().
15 * We use a local buffer for two reasons: an NMI or similar
16 * interrupt cannot synchronise, and high sampling rates
17 * would lead to catastrophic global synchronisation if
18 * a global buffer was used.
21 #include <linux/sched.h>
22 #include <linux/oprofile.h>
23 #include <linux/vmalloc.h>
24 #include <linux/errno.h>
26 #include "event_buffer.h"
27 #include "cpu_buffer.h"
28 #include "buffer_sync.h"
31 #define OP_BUFFER_FLAGS 0
34 * Read and write access is using spin locking. Thus, writing to the
35 * buffer by NMI handler (x86) could occur also during critical
36 * sections when reading the buffer. To avoid this, there are 2
37 * buffers for independent read and write access. Read access is in
38 * process context only, write access only in the NMI handler. If the
39 * read buffer runs empty, both buffers are swapped atomically. There
40 * is potentially a small window during swapping where the buffers are
41 * disabled and samples could be lost.
43 * Using 2 buffers is a little bit overhead, but the solution is clear
44 * and does not require changes in the ring buffer implementation. It
45 * can be changed to a single buffer solution when the ring buffer
46 * access is implemented as non-locking atomic code.
48 static struct ring_buffer
*op_ring_buffer_read
;
49 static struct ring_buffer
*op_ring_buffer_write
;
50 DEFINE_PER_CPU(struct oprofile_cpu_buffer
, cpu_buffer
);
52 static void wq_sync_buffer(struct work_struct
*work
);
54 #define DEFAULT_TIMER_EXPIRE (HZ / 10)
55 static int work_enabled
;
57 unsigned long oprofile_get_cpu_buffer_size(void)
59 return oprofile_cpu_buffer_size
;
62 void oprofile_cpu_buffer_inc_smpl_lost(void)
64 struct oprofile_cpu_buffer
*cpu_buf
65 = &__get_cpu_var(cpu_buffer
);
67 cpu_buf
->sample_lost_overflow
++;
70 void free_cpu_buffers(void)
72 if (op_ring_buffer_read
)
73 ring_buffer_free(op_ring_buffer_read
);
74 op_ring_buffer_read
= NULL
;
75 if (op_ring_buffer_write
)
76 ring_buffer_free(op_ring_buffer_write
);
77 op_ring_buffer_write
= NULL
;
80 int alloc_cpu_buffers(void)
84 unsigned long buffer_size
= oprofile_cpu_buffer_size
;
86 op_ring_buffer_read
= ring_buffer_alloc(buffer_size
, OP_BUFFER_FLAGS
);
87 if (!op_ring_buffer_read
)
89 op_ring_buffer_write
= ring_buffer_alloc(buffer_size
, OP_BUFFER_FLAGS
);
90 if (!op_ring_buffer_write
)
93 for_each_possible_cpu(i
) {
94 struct oprofile_cpu_buffer
*b
= &per_cpu(cpu_buffer
, i
);
97 b
->last_is_kernel
= -1;
99 b
->buffer_size
= buffer_size
;
102 b
->sample_received
= 0;
103 b
->sample_lost_overflow
= 0;
104 b
->backtrace_aborted
= 0;
105 b
->sample_invalid_eip
= 0;
107 INIT_DELAYED_WORK(&b
->work
, wq_sync_buffer
);
116 void start_cpu_work(void)
122 for_each_online_cpu(i
) {
123 struct oprofile_cpu_buffer
*b
= &per_cpu(cpu_buffer
, i
);
126 * Spread the work by 1 jiffy per cpu so they dont all
129 schedule_delayed_work_on(i
, &b
->work
, DEFAULT_TIMER_EXPIRE
+ i
);
133 void end_cpu_work(void)
139 for_each_online_cpu(i
) {
140 struct oprofile_cpu_buffer
*b
= &per_cpu(cpu_buffer
, i
);
142 cancel_delayed_work(&b
->work
);
145 flush_scheduled_work();
148 int op_cpu_buffer_write_entry(struct op_entry
*entry
)
150 entry
->event
= ring_buffer_lock_reserve(op_ring_buffer_write
,
151 sizeof(struct op_sample
),
154 entry
->sample
= ring_buffer_event_data(entry
->event
);
156 entry
->sample
= NULL
;
164 int op_cpu_buffer_write_commit(struct op_entry
*entry
)
166 return ring_buffer_unlock_commit(op_ring_buffer_write
, entry
->event
,
170 struct op_sample
*op_cpu_buffer_read_entry(int cpu
)
172 struct ring_buffer_event
*e
;
173 e
= ring_buffer_consume(op_ring_buffer_read
, cpu
, NULL
);
175 return ring_buffer_event_data(e
);
176 if (ring_buffer_swap_cpu(op_ring_buffer_read
,
177 op_ring_buffer_write
,
180 e
= ring_buffer_consume(op_ring_buffer_read
, cpu
, NULL
);
182 return ring_buffer_event_data(e
);
186 unsigned long op_cpu_buffer_entries(int cpu
)
188 return ring_buffer_entries_cpu(op_ring_buffer_read
, cpu
)
189 + ring_buffer_entries_cpu(op_ring_buffer_write
, cpu
);
193 add_sample(struct oprofile_cpu_buffer
*cpu_buf
,
194 unsigned long pc
, unsigned long event
)
196 struct op_entry entry
;
199 ret
= op_cpu_buffer_write_entry(&entry
);
203 entry
.sample
->eip
= pc
;
204 entry
.sample
->event
= event
;
206 return op_cpu_buffer_write_commit(&entry
);
210 add_code(struct oprofile_cpu_buffer
*buffer
, unsigned long value
)
212 return add_sample(buffer
, ESCAPE_CODE
, value
);
215 /* This must be safe from any context. It's safe writing here
216 * because of the head/tail separation of the writer and reader
219 * is_kernel is needed because on some architectures you cannot
220 * tell if you are in kernel or user space simply by looking at
221 * pc. We tag this in the buffer by generating kernel enter/exit
222 * events whenever is_kernel changes
224 static int log_sample(struct oprofile_cpu_buffer
*cpu_buf
, unsigned long pc
,
225 int is_kernel
, unsigned long event
)
227 struct task_struct
*task
;
229 cpu_buf
->sample_received
++;
231 if (pc
== ESCAPE_CODE
) {
232 cpu_buf
->sample_invalid_eip
++;
236 is_kernel
= !!is_kernel
;
240 /* notice a switch from user->kernel or vice versa */
241 if (cpu_buf
->last_is_kernel
!= is_kernel
) {
242 cpu_buf
->last_is_kernel
= is_kernel
;
243 if (add_code(cpu_buf
, is_kernel
))
247 /* notice a task switch */
248 if (cpu_buf
->last_task
!= task
) {
249 cpu_buf
->last_task
= task
;
250 if (add_code(cpu_buf
, (unsigned long)task
))
254 if (add_sample(cpu_buf
, pc
, event
))
260 cpu_buf
->sample_lost_overflow
++;
264 static inline void oprofile_begin_trace(struct oprofile_cpu_buffer
*cpu_buf
)
266 add_code(cpu_buf
, CPU_TRACE_BEGIN
);
267 cpu_buf
->tracing
= 1;
270 static inline void oprofile_end_trace(struct oprofile_cpu_buffer
*cpu_buf
)
272 cpu_buf
->tracing
= 0;
276 __oprofile_add_ext_sample(unsigned long pc
, struct pt_regs
* const regs
,
277 unsigned long event
, int is_kernel
)
279 struct oprofile_cpu_buffer
*cpu_buf
= &__get_cpu_var(cpu_buffer
);
281 if (!oprofile_backtrace_depth
) {
282 log_sample(cpu_buf
, pc
, is_kernel
, event
);
286 oprofile_begin_trace(cpu_buf
);
289 * if log_sample() fail we can't backtrace since we lost the
290 * source of this event
292 if (log_sample(cpu_buf
, pc
, is_kernel
, event
))
293 oprofile_ops
.backtrace(regs
, oprofile_backtrace_depth
);
295 oprofile_end_trace(cpu_buf
);
298 void oprofile_add_ext_sample(unsigned long pc
, struct pt_regs
* const regs
,
299 unsigned long event
, int is_kernel
)
301 __oprofile_add_ext_sample(pc
, regs
, event
, is_kernel
);
304 void oprofile_add_sample(struct pt_regs
* const regs
, unsigned long event
)
306 int is_kernel
= !user_mode(regs
);
307 unsigned long pc
= profile_pc(regs
);
309 __oprofile_add_ext_sample(pc
, regs
, event
, is_kernel
);
312 #ifdef CONFIG_OPROFILE_IBS
314 #define MAX_IBS_SAMPLE_SIZE 14
316 void oprofile_add_ibs_sample(struct pt_regs
* const regs
,
317 unsigned int * const ibs_sample
, int ibs_code
)
319 int is_kernel
= !user_mode(regs
);
320 struct oprofile_cpu_buffer
*cpu_buf
= &__get_cpu_var(cpu_buffer
);
321 struct task_struct
*task
;
324 cpu_buf
->sample_received
++;
326 /* notice a switch from user->kernel or vice versa */
327 if (cpu_buf
->last_is_kernel
!= is_kernel
) {
328 if (add_code(cpu_buf
, is_kernel
))
330 cpu_buf
->last_is_kernel
= is_kernel
;
333 /* notice a task switch */
336 if (cpu_buf
->last_task
!= task
) {
337 if (add_code(cpu_buf
, (unsigned long)task
))
339 cpu_buf
->last_task
= task
;
343 fail
= fail
|| add_code(cpu_buf
, ibs_code
);
344 fail
= fail
|| add_sample(cpu_buf
, ibs_sample
[0], ibs_sample
[1]);
345 fail
= fail
|| add_sample(cpu_buf
, ibs_sample
[2], ibs_sample
[3]);
346 fail
= fail
|| add_sample(cpu_buf
, ibs_sample
[4], ibs_sample
[5]);
348 if (ibs_code
== IBS_OP_BEGIN
) {
349 fail
= fail
|| add_sample(cpu_buf
, ibs_sample
[6], ibs_sample
[7]);
350 fail
= fail
|| add_sample(cpu_buf
, ibs_sample
[8], ibs_sample
[9]);
351 fail
= fail
|| add_sample(cpu_buf
, ibs_sample
[10], ibs_sample
[11]);
357 if (oprofile_backtrace_depth
)
358 oprofile_ops
.backtrace(regs
, oprofile_backtrace_depth
);
363 cpu_buf
->sample_lost_overflow
++;
369 void oprofile_add_pc(unsigned long pc
, int is_kernel
, unsigned long event
)
371 struct oprofile_cpu_buffer
*cpu_buf
= &__get_cpu_var(cpu_buffer
);
372 log_sample(cpu_buf
, pc
, is_kernel
, event
);
375 void oprofile_add_trace(unsigned long pc
)
377 struct oprofile_cpu_buffer
*cpu_buf
= &__get_cpu_var(cpu_buffer
);
379 if (!cpu_buf
->tracing
)
383 * broken frame can give an eip with the same value as an
384 * escape code, abort the trace if we get it
386 if (pc
== ESCAPE_CODE
)
389 if (add_sample(cpu_buf
, pc
, 0))
394 cpu_buf
->tracing
= 0;
395 cpu_buf
->backtrace_aborted
++;
400 * This serves to avoid cpu buffer overflow, and makes sure
401 * the task mortuary progresses
403 * By using schedule_delayed_work_on and then schedule_delayed_work
404 * we guarantee this will stay on the correct cpu
406 static void wq_sync_buffer(struct work_struct
*work
)
408 struct oprofile_cpu_buffer
*b
=
409 container_of(work
, struct oprofile_cpu_buffer
, work
.work
);
410 if (b
->cpu
!= smp_processor_id()) {
411 printk(KERN_DEBUG
"WQ on CPU%d, prefer CPU%d\n",
412 smp_processor_id(), b
->cpu
);
414 if (!cpu_online(b
->cpu
)) {
415 cancel_delayed_work(&b
->work
);
421 /* don't re-add the work if we're shutting down */
423 schedule_delayed_work(&b
->work
, DEFAULT_TIMER_EXPIRE
);