4 * @remark Copyright 2002-2009 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
8 * @author Barry Kasindorf <barry.kasindorf@amd.com>
9 * @author Robert Richter <robert.richter@amd.com>
11 * Each CPU has a local buffer that stores PC value/event
12 * pairs. We also log context switches when we notice them.
13 * Eventually each CPU's buffer is processed into the global
14 * event buffer by sync_buffer().
16 * We use a local buffer for two reasons: an NMI or similar
17 * interrupt cannot synchronise, and high sampling rates
18 * would lead to catastrophic global synchronisation if
19 * a global buffer was used.
22 #include <linux/sched.h>
23 #include <linux/oprofile.h>
24 #include <linux/errno.h>
26 #include "event_buffer.h"
27 #include "cpu_buffer.h"
28 #include "buffer_sync.h"
31 #define OP_BUFFER_FLAGS 0
34 * Read and write access is using spin locking. Thus, writing to the
35 * buffer by NMI handler (x86) could occur also during critical
36 * sections when reading the buffer. To avoid this, there are 2
37 * buffers for independent read and write access. Read access is in
38 * process context only, write access only in the NMI handler. If the
39 * read buffer runs empty, both buffers are swapped atomically. There
40 * is potentially a small window during swapping where the buffers are
41 * disabled and samples could be lost.
43 * Using 2 buffers is a little bit overhead, but the solution is clear
44 * and does not require changes in the ring buffer implementation. It
45 * can be changed to a single buffer solution when the ring buffer
46 * access is implemented as non-locking atomic code.
48 static struct ring_buffer
*op_ring_buffer_read
;
49 static struct ring_buffer
*op_ring_buffer_write
;
50 DEFINE_PER_CPU(struct oprofile_cpu_buffer
, op_cpu_buffer
);
52 static void wq_sync_buffer(struct work_struct
*work
);
54 #define DEFAULT_TIMER_EXPIRE (HZ / 10)
55 static int work_enabled
;
57 unsigned long oprofile_get_cpu_buffer_size(void)
59 return oprofile_cpu_buffer_size
;
62 void oprofile_cpu_buffer_inc_smpl_lost(void)
64 struct oprofile_cpu_buffer
*cpu_buf
= &__get_cpu_var(op_cpu_buffer
);
66 cpu_buf
->sample_lost_overflow
++;
69 void free_cpu_buffers(void)
71 if (op_ring_buffer_read
)
72 ring_buffer_free(op_ring_buffer_read
);
73 op_ring_buffer_read
= NULL
;
74 if (op_ring_buffer_write
)
75 ring_buffer_free(op_ring_buffer_write
);
76 op_ring_buffer_write
= NULL
;
79 #define RB_EVENT_HDR_SIZE 4
81 int alloc_cpu_buffers(void)
85 unsigned long buffer_size
= oprofile_cpu_buffer_size
;
86 unsigned long byte_size
= buffer_size
* (sizeof(struct op_sample
) +
89 op_ring_buffer_read
= ring_buffer_alloc(byte_size
, OP_BUFFER_FLAGS
);
90 if (!op_ring_buffer_read
)
92 op_ring_buffer_write
= ring_buffer_alloc(byte_size
, OP_BUFFER_FLAGS
);
93 if (!op_ring_buffer_write
)
96 for_each_possible_cpu(i
) {
97 struct oprofile_cpu_buffer
*b
= &per_cpu(op_cpu_buffer
, i
);
100 b
->last_is_kernel
= -1;
102 b
->buffer_size
= buffer_size
;
103 b
->sample_received
= 0;
104 b
->sample_lost_overflow
= 0;
105 b
->backtrace_aborted
= 0;
106 b
->sample_invalid_eip
= 0;
108 INIT_DELAYED_WORK(&b
->work
, wq_sync_buffer
);
117 void start_cpu_work(void)
123 for_each_online_cpu(i
) {
124 struct oprofile_cpu_buffer
*b
= &per_cpu(op_cpu_buffer
, i
);
127 * Spread the work by 1 jiffy per cpu so they dont all
130 schedule_delayed_work_on(i
, &b
->work
, DEFAULT_TIMER_EXPIRE
+ i
);
134 void end_cpu_work(void)
140 for_each_online_cpu(i
) {
141 struct oprofile_cpu_buffer
*b
= &per_cpu(op_cpu_buffer
, i
);
143 cancel_delayed_work(&b
->work
);
146 flush_scheduled_work();
150 * This function prepares the cpu buffer to write a sample.
152 * Struct op_entry is used during operations on the ring buffer while
153 * struct op_sample contains the data that is stored in the ring
154 * buffer. Struct entry can be uninitialized. The function reserves a
155 * data array that is specified by size. Use
156 * op_cpu_buffer_write_commit() after preparing the sample. In case of
157 * errors a null pointer is returned, otherwise the pointer to the
162 *op_cpu_buffer_write_reserve(struct op_entry
*entry
, unsigned long size
)
164 entry
->event
= ring_buffer_lock_reserve
165 (op_ring_buffer_write
, sizeof(struct op_sample
) +
166 size
* sizeof(entry
->sample
->data
[0]));
168 entry
->sample
= ring_buffer_event_data(entry
->event
);
170 entry
->sample
= NULL
;
176 entry
->data
= entry
->sample
->data
;
178 return entry
->sample
;
181 int op_cpu_buffer_write_commit(struct op_entry
*entry
)
183 return ring_buffer_unlock_commit(op_ring_buffer_write
, entry
->event
);
186 struct op_sample
*op_cpu_buffer_read_entry(struct op_entry
*entry
, int cpu
)
188 struct ring_buffer_event
*e
;
189 e
= ring_buffer_consume(op_ring_buffer_read
, cpu
, NULL
);
192 if (ring_buffer_swap_cpu(op_ring_buffer_read
,
193 op_ring_buffer_write
,
196 e
= ring_buffer_consume(op_ring_buffer_read
, cpu
, NULL
);
203 entry
->sample
= ring_buffer_event_data(e
);
204 entry
->size
= (ring_buffer_event_length(e
) - sizeof(struct op_sample
))
205 / sizeof(entry
->sample
->data
[0]);
206 entry
->data
= entry
->sample
->data
;
207 return entry
->sample
;
210 unsigned long op_cpu_buffer_entries(int cpu
)
212 return ring_buffer_entries_cpu(op_ring_buffer_read
, cpu
)
213 + ring_buffer_entries_cpu(op_ring_buffer_write
, cpu
);
217 op_add_code(struct oprofile_cpu_buffer
*cpu_buf
, unsigned long backtrace
,
218 int is_kernel
, struct task_struct
*task
)
220 struct op_entry entry
;
221 struct op_sample
*sample
;
228 flags
|= TRACE_BEGIN
;
230 /* notice a switch from user->kernel or vice versa */
231 is_kernel
= !!is_kernel
;
232 if (cpu_buf
->last_is_kernel
!= is_kernel
) {
233 cpu_buf
->last_is_kernel
= is_kernel
;
234 flags
|= KERNEL_CTX_SWITCH
;
239 /* notice a task switch */
240 if (cpu_buf
->last_task
!= task
) {
241 cpu_buf
->last_task
= task
;
242 flags
|= USER_CTX_SWITCH
;
249 if (flags
& USER_CTX_SWITCH
)
254 sample
= op_cpu_buffer_write_reserve(&entry
, size
);
258 sample
->eip
= ESCAPE_CODE
;
259 sample
->event
= flags
;
262 op_cpu_buffer_add_data(&entry
, (unsigned long)task
);
264 op_cpu_buffer_write_commit(&entry
);
270 op_add_sample(struct oprofile_cpu_buffer
*cpu_buf
,
271 unsigned long pc
, unsigned long event
)
273 struct op_entry entry
;
274 struct op_sample
*sample
;
276 sample
= op_cpu_buffer_write_reserve(&entry
, 0);
281 sample
->event
= event
;
283 return op_cpu_buffer_write_commit(&entry
);
287 * This must be safe from any context.
289 * is_kernel is needed because on some architectures you cannot
290 * tell if you are in kernel or user space simply by looking at
291 * pc. We tag this in the buffer by generating kernel enter/exit
292 * events whenever is_kernel changes
295 log_sample(struct oprofile_cpu_buffer
*cpu_buf
, unsigned long pc
,
296 unsigned long backtrace
, int is_kernel
, unsigned long event
)
298 cpu_buf
->sample_received
++;
300 if (pc
== ESCAPE_CODE
) {
301 cpu_buf
->sample_invalid_eip
++;
305 if (op_add_code(cpu_buf
, backtrace
, is_kernel
, current
))
308 if (op_add_sample(cpu_buf
, pc
, event
))
314 cpu_buf
->sample_lost_overflow
++;
318 static inline void oprofile_begin_trace(struct oprofile_cpu_buffer
*cpu_buf
)
320 cpu_buf
->tracing
= 1;
323 static inline void oprofile_end_trace(struct oprofile_cpu_buffer
*cpu_buf
)
325 cpu_buf
->tracing
= 0;
329 __oprofile_add_ext_sample(unsigned long pc
, struct pt_regs
* const regs
,
330 unsigned long event
, int is_kernel
)
332 struct oprofile_cpu_buffer
*cpu_buf
= &__get_cpu_var(op_cpu_buffer
);
333 unsigned long backtrace
= oprofile_backtrace_depth
;
336 * if log_sample() fail we can't backtrace since we lost the
337 * source of this event
339 if (!log_sample(cpu_buf
, pc
, backtrace
, is_kernel
, event
))
346 oprofile_begin_trace(cpu_buf
);
347 oprofile_ops
.backtrace(regs
, backtrace
);
348 oprofile_end_trace(cpu_buf
);
351 void oprofile_add_ext_sample(unsigned long pc
, struct pt_regs
* const regs
,
352 unsigned long event
, int is_kernel
)
354 __oprofile_add_ext_sample(pc
, regs
, event
, is_kernel
);
357 void oprofile_add_sample(struct pt_regs
* const regs
, unsigned long event
)
359 int is_kernel
= !user_mode(regs
);
360 unsigned long pc
= profile_pc(regs
);
362 __oprofile_add_ext_sample(pc
, regs
, event
, is_kernel
);
366 * Add samples with data to the ring buffer.
368 * Use oprofile_add_data(&entry, val) to add data and
369 * oprofile_write_commit(&entry) to commit the sample.
372 oprofile_write_reserve(struct op_entry
*entry
, struct pt_regs
* const regs
,
373 unsigned long pc
, int code
, int size
)
375 struct op_sample
*sample
;
376 int is_kernel
= !user_mode(regs
);
377 struct oprofile_cpu_buffer
*cpu_buf
= &__get_cpu_var(op_cpu_buffer
);
379 cpu_buf
->sample_received
++;
381 /* no backtraces for samples with data */
382 if (op_add_code(cpu_buf
, 0, is_kernel
, current
))
385 sample
= op_cpu_buffer_write_reserve(entry
, size
+ 2);
388 sample
->eip
= ESCAPE_CODE
;
389 sample
->event
= 0; /* no flags */
391 op_cpu_buffer_add_data(entry
, code
);
392 op_cpu_buffer_add_data(entry
, pc
);
398 cpu_buf
->sample_lost_overflow
++;
401 int oprofile_add_data(struct op_entry
*entry
, unsigned long val
)
405 return op_cpu_buffer_add_data(entry
, val
);
408 int oprofile_add_data64(struct op_entry
*entry
, u64 val
)
412 if (op_cpu_buffer_get_size(entry
) < 2)
414 * the function returns 0 to indicate a too small
415 * buffer, even if there is some space left
418 if (!op_cpu_buffer_add_data(entry
, (u32
)val
))
420 return op_cpu_buffer_add_data(entry
, (u32
)(val
>> 32));
423 int oprofile_write_commit(struct op_entry
*entry
)
427 return op_cpu_buffer_write_commit(entry
);
430 void oprofile_add_pc(unsigned long pc
, int is_kernel
, unsigned long event
)
432 struct oprofile_cpu_buffer
*cpu_buf
= &__get_cpu_var(op_cpu_buffer
);
433 log_sample(cpu_buf
, pc
, 0, is_kernel
, event
);
436 void oprofile_add_trace(unsigned long pc
)
438 struct oprofile_cpu_buffer
*cpu_buf
= &__get_cpu_var(op_cpu_buffer
);
440 if (!cpu_buf
->tracing
)
444 * broken frame can give an eip with the same value as an
445 * escape code, abort the trace if we get it
447 if (pc
== ESCAPE_CODE
)
450 if (op_add_sample(cpu_buf
, pc
, 0))
455 cpu_buf
->tracing
= 0;
456 cpu_buf
->backtrace_aborted
++;
461 * This serves to avoid cpu buffer overflow, and makes sure
462 * the task mortuary progresses
464 * By using schedule_delayed_work_on and then schedule_delayed_work
465 * we guarantee this will stay on the correct cpu
467 static void wq_sync_buffer(struct work_struct
*work
)
469 struct oprofile_cpu_buffer
*b
=
470 container_of(work
, struct oprofile_cpu_buffer
, work
.work
);
471 if (b
->cpu
!= smp_processor_id()) {
472 printk(KERN_DEBUG
"WQ on CPU%d, prefer CPU%d\n",
473 smp_processor_id(), b
->cpu
);
475 if (!cpu_online(b
->cpu
)) {
476 cancel_delayed_work(&b
->work
);
482 /* don't re-add the work if we're shutting down */
484 schedule_delayed_work(&b
->work
, DEFAULT_TIMER_EXPIRE
);