4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
9 * Each CPU has a local buffer that stores PC value/event
10 * pairs. We also log context switches when we notice them.
11 * Eventually each CPU's buffer is processed into the global
12 * event buffer by sync_buffer().
14 * We use a local buffer for two reasons: an NMI or similar
15 * interrupt cannot synchronise, and high sampling rates
16 * would lead to catastrophic global synchronisation if
17 * a global buffer was used.
20 #include <linux/sched.h>
21 #include <linux/oprofile.h>
22 #include <linux/vmalloc.h>
23 #include <linux/errno.h>
25 #include "event_buffer.h"
26 #include "cpu_buffer.h"
27 #include "buffer_sync.h"
30 struct oprofile_cpu_buffer cpu_buffer
[NR_CPUS
] __cacheline_aligned
;
32 static void wq_sync_buffer(struct work_struct
*work
);
34 #define DEFAULT_TIMER_EXPIRE (HZ / 10)
35 static int work_enabled
;
37 void free_cpu_buffers(void)
41 for_each_online_cpu(i
)
42 vfree(cpu_buffer
[i
].buffer
);
45 int alloc_cpu_buffers(void)
49 unsigned long buffer_size
= fs_cpu_buffer_size
;
51 for_each_online_cpu(i
) {
52 struct oprofile_cpu_buffer
* b
= &cpu_buffer
[i
];
54 b
->buffer
= vmalloc_node(sizeof(struct op_sample
) * buffer_size
,
60 b
->last_is_kernel
= -1;
62 b
->buffer_size
= buffer_size
;
65 b
->sample_received
= 0;
66 b
->sample_lost_overflow
= 0;
68 INIT_DELAYED_WORK(&b
->work
, wq_sync_buffer
);
77 void start_cpu_work(void)
83 for_each_online_cpu(i
) {
84 struct oprofile_cpu_buffer
* b
= &cpu_buffer
[i
];
87 * Spread the work by 1 jiffy per cpu so they dont all
90 schedule_delayed_work_on(i
, &b
->work
, DEFAULT_TIMER_EXPIRE
+ i
);
94 void end_cpu_work(void)
100 for_each_online_cpu(i
) {
101 struct oprofile_cpu_buffer
* b
= &cpu_buffer
[i
];
103 cancel_delayed_work(&b
->work
);
106 flush_scheduled_work();
109 /* Resets the cpu buffer to a sane state. */
110 void cpu_buffer_reset(struct oprofile_cpu_buffer
* cpu_buf
)
112 /* reset these to invalid values; the next sample
113 * collected will populate the buffer with proper
114 * values to initialize the buffer
116 cpu_buf
->last_is_kernel
= -1;
117 cpu_buf
->last_task
= NULL
;
120 /* compute number of available slots in cpu_buffer queue */
121 static unsigned long nr_available_slots(struct oprofile_cpu_buffer
const * b
)
123 unsigned long head
= b
->head_pos
;
124 unsigned long tail
= b
->tail_pos
;
127 return (tail
- head
) - 1;
129 return tail
+ (b
->buffer_size
- head
) - 1;
132 static void increment_head(struct oprofile_cpu_buffer
* b
)
134 unsigned long new_head
= b
->head_pos
+ 1;
136 /* Ensure anything written to the slot before we
137 * increment is visible */
140 if (new_head
< b
->buffer_size
)
141 b
->head_pos
= new_head
;
147 add_sample(struct oprofile_cpu_buffer
* cpu_buf
,
148 unsigned long pc
, unsigned long event
)
150 struct op_sample
* entry
= &cpu_buf
->buffer
[cpu_buf
->head_pos
];
152 entry
->event
= event
;
153 increment_head(cpu_buf
);
157 add_code(struct oprofile_cpu_buffer
* buffer
, unsigned long value
)
159 add_sample(buffer
, ESCAPE_CODE
, value
);
162 /* This must be safe from any context. It's safe writing here
163 * because of the head/tail separation of the writer and reader
166 * is_kernel is needed because on some architectures you cannot
167 * tell if you are in kernel or user space simply by looking at
168 * pc. We tag this in the buffer by generating kernel enter/exit
169 * events whenever is_kernel changes
171 static int log_sample(struct oprofile_cpu_buffer
* cpu_buf
, unsigned long pc
,
172 int is_kernel
, unsigned long event
)
174 struct task_struct
* task
;
176 cpu_buf
->sample_received
++;
178 if (nr_available_slots(cpu_buf
) < 3) {
179 cpu_buf
->sample_lost_overflow
++;
183 is_kernel
= !!is_kernel
;
187 /* notice a switch from user->kernel or vice versa */
188 if (cpu_buf
->last_is_kernel
!= is_kernel
) {
189 cpu_buf
->last_is_kernel
= is_kernel
;
190 add_code(cpu_buf
, is_kernel
);
193 /* notice a task switch */
194 if (cpu_buf
->last_task
!= task
) {
195 cpu_buf
->last_task
= task
;
196 add_code(cpu_buf
, (unsigned long)task
);
199 add_sample(cpu_buf
, pc
, event
);
203 static int oprofile_begin_trace(struct oprofile_cpu_buffer
* cpu_buf
)
205 if (nr_available_slots(cpu_buf
) < 4) {
206 cpu_buf
->sample_lost_overflow
++;
210 add_code(cpu_buf
, CPU_TRACE_BEGIN
);
211 cpu_buf
->tracing
= 1;
215 static void oprofile_end_trace(struct oprofile_cpu_buffer
* cpu_buf
)
217 cpu_buf
->tracing
= 0;
220 void oprofile_add_ext_sample(unsigned long pc
, struct pt_regs
* const regs
,
221 unsigned long event
, int is_kernel
)
223 struct oprofile_cpu_buffer
* cpu_buf
= &cpu_buffer
[smp_processor_id()];
225 if (!backtrace_depth
) {
226 log_sample(cpu_buf
, pc
, is_kernel
, event
);
230 if (!oprofile_begin_trace(cpu_buf
))
233 /* if log_sample() fail we can't backtrace since we lost the source
235 if (log_sample(cpu_buf
, pc
, is_kernel
, event
))
236 oprofile_ops
.backtrace(regs
, backtrace_depth
);
237 oprofile_end_trace(cpu_buf
);
240 void oprofile_add_sample(struct pt_regs
* const regs
, unsigned long event
)
242 int is_kernel
= !user_mode(regs
);
243 unsigned long pc
= profile_pc(regs
);
245 oprofile_add_ext_sample(pc
, regs
, event
, is_kernel
);
248 void oprofile_add_pc(unsigned long pc
, int is_kernel
, unsigned long event
)
250 struct oprofile_cpu_buffer
* cpu_buf
= &cpu_buffer
[smp_processor_id()];
251 log_sample(cpu_buf
, pc
, is_kernel
, event
);
254 void oprofile_add_trace(unsigned long pc
)
256 struct oprofile_cpu_buffer
* cpu_buf
= &cpu_buffer
[smp_processor_id()];
258 if (!cpu_buf
->tracing
)
261 if (nr_available_slots(cpu_buf
) < 1) {
262 cpu_buf
->tracing
= 0;
263 cpu_buf
->sample_lost_overflow
++;
267 /* broken frame can give an eip with the same value as an escape code,
268 * abort the trace if we get it */
269 if (pc
== ESCAPE_CODE
) {
270 cpu_buf
->tracing
= 0;
271 cpu_buf
->backtrace_aborted
++;
275 add_sample(cpu_buf
, pc
, 0);
279 * This serves to avoid cpu buffer overflow, and makes sure
280 * the task mortuary progresses
282 * By using schedule_delayed_work_on and then schedule_delayed_work
283 * we guarantee this will stay on the correct cpu
285 static void wq_sync_buffer(struct work_struct
*work
)
287 struct oprofile_cpu_buffer
* b
=
288 container_of(work
, struct oprofile_cpu_buffer
, work
.work
);
289 if (b
->cpu
!= smp_processor_id()) {
290 printk("WQ on CPU%d, prefer CPU%d\n",
291 smp_processor_id(), b
->cpu
);
295 /* don't re-add the work if we're shutting down */
297 schedule_delayed_work(&b
->work
, DEFAULT_TIMER_EXPIRE
);