net/llc: make opt unsigned in llc_ui_setsockopt()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / oprofile / cpu_buffer.c
blob5e2ac4aea949c381f492cf264f827dfe4fc59aae
1 /**
2 * @file cpu_buffer.c
4 * @remark Copyright 2002-2009 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
8 * @author Barry Kasindorf <barry.kasindorf@amd.com>
9 * @author Robert Richter <robert.richter@amd.com>
11 * Each CPU has a local buffer that stores PC value/event
12 * pairs. We also log context switches when we notice them.
13 * Eventually each CPU's buffer is processed into the global
14 * event buffer by sync_buffer().
16 * We use a local buffer for two reasons: an NMI or similar
17 * interrupt cannot synchronise, and high sampling rates
18 * would lead to catastrophic global synchronisation if
19 * a global buffer was used.
22 #include <linux/sched.h>
23 #include <linux/oprofile.h>
24 #include <linux/errno.h>
26 #include "event_buffer.h"
27 #include "cpu_buffer.h"
28 #include "buffer_sync.h"
29 #include "oprof.h"
31 #define OP_BUFFER_FLAGS 0
33 static struct ring_buffer *op_ring_buffer;
34 DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
36 static void wq_sync_buffer(struct work_struct *work);
38 #define DEFAULT_TIMER_EXPIRE (HZ / 10)
39 static int work_enabled;
41 unsigned long oprofile_get_cpu_buffer_size(void)
43 return oprofile_cpu_buffer_size;
46 void oprofile_cpu_buffer_inc_smpl_lost(void)
48 struct oprofile_cpu_buffer *cpu_buf
49 = &__get_cpu_var(cpu_buffer);
51 cpu_buf->sample_lost_overflow++;
54 void free_cpu_buffers(void)
56 if (op_ring_buffer)
57 ring_buffer_free(op_ring_buffer);
58 op_ring_buffer = NULL;
61 #define RB_EVENT_HDR_SIZE 4
63 int alloc_cpu_buffers(void)
65 int i;
67 unsigned long buffer_size = oprofile_cpu_buffer_size;
68 unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
69 RB_EVENT_HDR_SIZE);
71 op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
72 if (!op_ring_buffer)
73 goto fail;
75 for_each_possible_cpu(i) {
76 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
78 b->last_task = NULL;
79 b->last_is_kernel = -1;
80 b->tracing = 0;
81 b->buffer_size = buffer_size;
82 b->sample_received = 0;
83 b->sample_lost_overflow = 0;
84 b->backtrace_aborted = 0;
85 b->sample_invalid_eip = 0;
86 b->cpu = i;
87 INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
89 return 0;
91 fail:
92 free_cpu_buffers();
93 return -ENOMEM;
96 void start_cpu_work(void)
98 int i;
100 work_enabled = 1;
102 for_each_online_cpu(i) {
103 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
106 * Spread the work by 1 jiffy per cpu so they dont all
107 * fire at once.
109 schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
113 void end_cpu_work(void)
115 int i;
117 work_enabled = 0;
119 for_each_online_cpu(i) {
120 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
122 cancel_delayed_work(&b->work);
127 * This function prepares the cpu buffer to write a sample.
129 * Struct op_entry is used during operations on the ring buffer while
130 * struct op_sample contains the data that is stored in the ring
131 * buffer. Struct entry can be uninitialized. The function reserves a
132 * data array that is specified by size. Use
133 * op_cpu_buffer_write_commit() after preparing the sample. In case of
134 * errors a null pointer is returned, otherwise the pointer to the
135 * sample.
138 struct op_sample
139 *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
141 entry->event = ring_buffer_lock_reserve
142 (op_ring_buffer, sizeof(struct op_sample) +
143 size * sizeof(entry->sample->data[0]));
144 if (!entry->event)
145 return NULL;
146 entry->sample = ring_buffer_event_data(entry->event);
147 entry->size = size;
148 entry->data = entry->sample->data;
150 return entry->sample;
153 int op_cpu_buffer_write_commit(struct op_entry *entry)
155 return ring_buffer_unlock_commit(op_ring_buffer, entry->event);
158 struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
160 struct ring_buffer_event *e;
161 e = ring_buffer_consume(op_ring_buffer, cpu, NULL);
162 if (!e)
163 return NULL;
165 entry->event = e;
166 entry->sample = ring_buffer_event_data(e);
167 entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
168 / sizeof(entry->sample->data[0]);
169 entry->data = entry->sample->data;
170 return entry->sample;
173 unsigned long op_cpu_buffer_entries(int cpu)
175 return ring_buffer_entries_cpu(op_ring_buffer, cpu);
178 static int
179 op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace,
180 int is_kernel, struct task_struct *task)
182 struct op_entry entry;
183 struct op_sample *sample;
184 unsigned long flags;
185 int size;
187 flags = 0;
189 if (backtrace)
190 flags |= TRACE_BEGIN;
192 /* notice a switch from user->kernel or vice versa */
193 is_kernel = !!is_kernel;
194 if (cpu_buf->last_is_kernel != is_kernel) {
195 cpu_buf->last_is_kernel = is_kernel;
196 flags |= KERNEL_CTX_SWITCH;
197 if (is_kernel)
198 flags |= IS_KERNEL;
201 /* notice a task switch */
202 if (cpu_buf->last_task != task) {
203 cpu_buf->last_task = task;
204 flags |= USER_CTX_SWITCH;
207 if (!flags)
208 /* nothing to do */
209 return 0;
211 if (flags & USER_CTX_SWITCH)
212 size = 1;
213 else
214 size = 0;
216 sample = op_cpu_buffer_write_reserve(&entry, size);
217 if (!sample)
218 return -ENOMEM;
220 sample->eip = ESCAPE_CODE;
221 sample->event = flags;
223 if (size)
224 op_cpu_buffer_add_data(&entry, (unsigned long)task);
226 op_cpu_buffer_write_commit(&entry);
228 return 0;
231 static inline int
232 op_add_sample(struct oprofile_cpu_buffer *cpu_buf,
233 unsigned long pc, unsigned long event)
235 struct op_entry entry;
236 struct op_sample *sample;
238 sample = op_cpu_buffer_write_reserve(&entry, 0);
239 if (!sample)
240 return -ENOMEM;
242 sample->eip = pc;
243 sample->event = event;
245 return op_cpu_buffer_write_commit(&entry);
249 * This must be safe from any context.
251 * is_kernel is needed because on some architectures you cannot
252 * tell if you are in kernel or user space simply by looking at
253 * pc. We tag this in the buffer by generating kernel enter/exit
254 * events whenever is_kernel changes
256 static int
257 log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
258 unsigned long backtrace, int is_kernel, unsigned long event)
260 cpu_buf->sample_received++;
262 if (pc == ESCAPE_CODE) {
263 cpu_buf->sample_invalid_eip++;
264 return 0;
267 if (op_add_code(cpu_buf, backtrace, is_kernel, current))
268 goto fail;
270 if (op_add_sample(cpu_buf, pc, event))
271 goto fail;
273 return 1;
275 fail:
276 cpu_buf->sample_lost_overflow++;
277 return 0;
280 static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
282 cpu_buf->tracing = 1;
285 static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
287 cpu_buf->tracing = 0;
290 static inline void
291 __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
292 unsigned long event, int is_kernel)
294 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
295 unsigned long backtrace = oprofile_backtrace_depth;
298 * if log_sample() fail we can't backtrace since we lost the
299 * source of this event
301 if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event))
302 /* failed */
303 return;
305 if (!backtrace)
306 return;
308 oprofile_begin_trace(cpu_buf);
309 oprofile_ops.backtrace(regs, backtrace);
310 oprofile_end_trace(cpu_buf);
313 void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
314 unsigned long event, int is_kernel)
316 __oprofile_add_ext_sample(pc, regs, event, is_kernel);
319 void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
321 int is_kernel = !user_mode(regs);
322 unsigned long pc = profile_pc(regs);
324 __oprofile_add_ext_sample(pc, regs, event, is_kernel);
328 * Add samples with data to the ring buffer.
330 * Use oprofile_add_data(&entry, val) to add data and
331 * oprofile_write_commit(&entry) to commit the sample.
333 void
334 oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
335 unsigned long pc, int code, int size)
337 struct op_sample *sample;
338 int is_kernel = !user_mode(regs);
339 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
341 cpu_buf->sample_received++;
343 /* no backtraces for samples with data */
344 if (op_add_code(cpu_buf, 0, is_kernel, current))
345 goto fail;
347 sample = op_cpu_buffer_write_reserve(entry, size + 2);
348 if (!sample)
349 goto fail;
350 sample->eip = ESCAPE_CODE;
351 sample->event = 0; /* no flags */
353 op_cpu_buffer_add_data(entry, code);
354 op_cpu_buffer_add_data(entry, pc);
356 return;
358 fail:
359 entry->event = NULL;
360 cpu_buf->sample_lost_overflow++;
363 int oprofile_add_data(struct op_entry *entry, unsigned long val)
365 if (!entry->event)
366 return 0;
367 return op_cpu_buffer_add_data(entry, val);
370 int oprofile_add_data64(struct op_entry *entry, u64 val)
372 if (!entry->event)
373 return 0;
374 if (op_cpu_buffer_get_size(entry) < 2)
376 * the function returns 0 to indicate a too small
377 * buffer, even if there is some space left
379 return 0;
380 if (!op_cpu_buffer_add_data(entry, (u32)val))
381 return 0;
382 return op_cpu_buffer_add_data(entry, (u32)(val >> 32));
385 int oprofile_write_commit(struct op_entry *entry)
387 if (!entry->event)
388 return -EINVAL;
389 return op_cpu_buffer_write_commit(entry);
392 void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
394 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
395 log_sample(cpu_buf, pc, 0, is_kernel, event);
398 void oprofile_add_trace(unsigned long pc)
400 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
402 if (!cpu_buf->tracing)
403 return;
406 * broken frame can give an eip with the same value as an
407 * escape code, abort the trace if we get it
409 if (pc == ESCAPE_CODE)
410 goto fail;
412 if (op_add_sample(cpu_buf, pc, 0))
413 goto fail;
415 return;
416 fail:
417 cpu_buf->tracing = 0;
418 cpu_buf->backtrace_aborted++;
419 return;
423 * This serves to avoid cpu buffer overflow, and makes sure
424 * the task mortuary progresses
426 * By using schedule_delayed_work_on and then schedule_delayed_work
427 * we guarantee this will stay on the correct cpu
429 static void wq_sync_buffer(struct work_struct *work)
431 struct oprofile_cpu_buffer *b =
432 container_of(work, struct oprofile_cpu_buffer, work.work);
433 if (b->cpu != smp_processor_id()) {
434 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
435 smp_processor_id(), b->cpu);
437 if (!cpu_online(b->cpu)) {
438 cancel_delayed_work(&b->work);
439 return;
442 sync_buffer(b->cpu);
444 /* don't re-add the work if we're shutting down */
445 if (work_enabled)
446 schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);