4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
9 * This is the core of the buffer management. Each
10 * CPU buffer is processed and entered into the
11 * global event buffer. Such processing is necessary
12 * in several circumstances, mentioned below.
14 * The processing does the job of converting the
15 * transitory EIP value into a persistent dentry/offset
16 * value that the profiler can record at its leisure.
18 * See fs/dcookies.c for a description of the dentry/offset
23 #include <linux/workqueue.h>
24 #include <linux/notifier.h>
25 #include <linux/dcookies.h>
26 #include <linux/profile.h>
27 #include <linux/module.h>
29 #include <linux/oprofile.h>
30 #include <linux/sched.h>
32 #include "oprofile_stats.h"
33 #include "event_buffer.h"
34 #include "cpu_buffer.h"
35 #include "buffer_sync.h"
37 static LIST_HEAD(dying_tasks
);
38 static LIST_HEAD(dead_tasks
);
39 static cpumask_t marked_cpus
= CPU_MASK_NONE
;
40 static DEFINE_SPINLOCK(task_mortuary
);
41 static void process_task_mortuary(void);
44 /* Take ownership of the task struct and place it on the
45 * list for processing. Only after two full buffer syncs
46 * does the task eventually get freed, because by then
47 * we are sure we will not reference it again.
48 * Can be invoked from softirq via RCU callback due to
49 * call_rcu() of the task struct, hence the _irqsave.
52 task_free_notify(struct notifier_block
*self
, unsigned long val
, void *data
)
55 struct task_struct
*task
= data
;
56 spin_lock_irqsave(&task_mortuary
, flags
);
57 list_add(&task
->tasks
, &dying_tasks
);
58 spin_unlock_irqrestore(&task_mortuary
, flags
);
63 /* The task is on its way out. A sync of the buffer means we can catch
64 * any remaining samples for this task.
67 task_exit_notify(struct notifier_block
*self
, unsigned long val
, void *data
)
69 /* To avoid latency problems, we only process the current CPU,
70 * hoping that most samples for the task are on this CPU
72 sync_buffer(raw_smp_processor_id());
77 /* The task is about to try a do_munmap(). We peek at what it's going to
78 * do, and if it's an executable region, process the samples first, so
79 * we don't lose any. This does not have to be exact, it's a QoI issue
83 munmap_notify(struct notifier_block
*self
, unsigned long val
, void *data
)
85 unsigned long addr
= (unsigned long)data
;
86 struct mm_struct
*mm
= current
->mm
;
87 struct vm_area_struct
*mpnt
;
89 down_read(&mm
->mmap_sem
);
91 mpnt
= find_vma(mm
, addr
);
92 if (mpnt
&& mpnt
->vm_file
&& (mpnt
->vm_flags
& VM_EXEC
)) {
93 up_read(&mm
->mmap_sem
);
94 /* To avoid latency problems, we only process the current CPU,
95 * hoping that most samples for the task are on this CPU
97 sync_buffer(raw_smp_processor_id());
101 up_read(&mm
->mmap_sem
);
106 /* We need to be told about new modules so we don't attribute to a previously
107 * loaded module, or drop the samples on the floor.
110 module_load_notify(struct notifier_block
*self
, unsigned long val
, void *data
)
112 #ifdef CONFIG_MODULES
113 if (val
!= MODULE_STATE_COMING
)
116 /* FIXME: should we process all CPU buffers ? */
117 mutex_lock(&buffer_mutex
);
118 add_event_entry(ESCAPE_CODE
);
119 add_event_entry(MODULE_LOADED_CODE
);
120 mutex_unlock(&buffer_mutex
);
126 static struct notifier_block task_free_nb
= {
127 .notifier_call
= task_free_notify
,
130 static struct notifier_block task_exit_nb
= {
131 .notifier_call
= task_exit_notify
,
134 static struct notifier_block munmap_nb
= {
135 .notifier_call
= munmap_notify
,
138 static struct notifier_block module_load_nb
= {
139 .notifier_call
= module_load_notify
,
143 static void end_sync(void)
146 /* make sure we don't leak task structs */
147 process_task_mortuary();
148 process_task_mortuary();
158 err
= task_handoff_register(&task_free_nb
);
161 err
= profile_event_register(PROFILE_TASK_EXIT
, &task_exit_nb
);
164 err
= profile_event_register(PROFILE_MUNMAP
, &munmap_nb
);
167 err
= register_module_notifier(&module_load_nb
);
174 profile_event_unregister(PROFILE_MUNMAP
, &munmap_nb
);
176 profile_event_unregister(PROFILE_TASK_EXIT
, &task_exit_nb
);
178 task_handoff_unregister(&task_free_nb
);
187 unregister_module_notifier(&module_load_nb
);
188 profile_event_unregister(PROFILE_MUNMAP
, &munmap_nb
);
189 profile_event_unregister(PROFILE_TASK_EXIT
, &task_exit_nb
);
190 task_handoff_unregister(&task_free_nb
);
195 /* Optimisation. We can manage without taking the dcookie sem
196 * because we cannot reach this code without at least one
197 * dcookie user still being registered (namely, the reader
198 * of the event buffer). */
199 static inline unsigned long fast_get_dcookie(struct path
*path
)
201 unsigned long cookie
;
203 if (path
->dentry
->d_cookie
)
204 return (unsigned long)path
->dentry
;
205 get_dcookie(path
, &cookie
);
210 /* Look up the dcookie for the task's first VM_EXECUTABLE mapping,
211 * which corresponds loosely to "application name". This is
212 * not strictly necessary but allows oprofile to associate
213 * shared-library samples with particular applications
215 static unsigned long get_exec_dcookie(struct mm_struct
*mm
)
217 unsigned long cookie
= NO_COOKIE
;
218 struct vm_area_struct
*vma
;
223 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
226 if (!(vma
->vm_flags
& VM_EXECUTABLE
))
228 cookie
= fast_get_dcookie(&vma
->vm_file
->f_path
);
237 /* Convert the EIP value of a sample into a persistent dentry/offset
238 * pair that can then be added to the global event buffer. We make
239 * sure to do this lookup before a mm->mmap modification happens so
240 * we don't lose track.
243 lookup_dcookie(struct mm_struct
*mm
, unsigned long addr
, off_t
*offset
)
245 unsigned long cookie
= NO_COOKIE
;
246 struct vm_area_struct
*vma
;
248 for (vma
= find_vma(mm
, addr
); vma
; vma
= vma
->vm_next
) {
250 if (addr
< vma
->vm_start
|| addr
>= vma
->vm_end
)
254 cookie
= fast_get_dcookie(&vma
->vm_file
->f_path
);
255 *offset
= (vma
->vm_pgoff
<< PAGE_SHIFT
) + addr
-
258 /* must be an anonymous map */
266 cookie
= INVALID_COOKIE
;
272 static unsigned long last_cookie
= INVALID_COOKIE
;
274 static void add_cpu_switch(int i
)
276 add_event_entry(ESCAPE_CODE
);
277 add_event_entry(CPU_SWITCH_CODE
);
279 last_cookie
= INVALID_COOKIE
;
282 static void add_kernel_ctx_switch(unsigned int in_kernel
)
284 add_event_entry(ESCAPE_CODE
);
286 add_event_entry(KERNEL_ENTER_SWITCH_CODE
);
288 add_event_entry(KERNEL_EXIT_SWITCH_CODE
);
292 add_user_ctx_switch(struct task_struct
const *task
, unsigned long cookie
)
294 add_event_entry(ESCAPE_CODE
);
295 add_event_entry(CTX_SWITCH_CODE
);
296 add_event_entry(task
->pid
);
297 add_event_entry(cookie
);
298 /* Another code for daemon back-compat */
299 add_event_entry(ESCAPE_CODE
);
300 add_event_entry(CTX_TGID_CODE
);
301 add_event_entry(task
->tgid
);
305 static void add_cookie_switch(unsigned long cookie
)
307 add_event_entry(ESCAPE_CODE
);
308 add_event_entry(COOKIE_SWITCH_CODE
);
309 add_event_entry(cookie
);
313 static void add_trace_begin(void)
315 add_event_entry(ESCAPE_CODE
);
316 add_event_entry(TRACE_BEGIN_CODE
);
320 static void add_sample_entry(unsigned long offset
, unsigned long event
)
322 add_event_entry(offset
);
323 add_event_entry(event
);
327 static int add_us_sample(struct mm_struct
*mm
, struct op_sample
*s
)
329 unsigned long cookie
;
332 cookie
= lookup_dcookie(mm
, s
->eip
, &offset
);
334 if (cookie
== INVALID_COOKIE
) {
335 atomic_inc(&oprofile_stats
.sample_lost_no_mapping
);
339 if (cookie
!= last_cookie
) {
340 add_cookie_switch(cookie
);
341 last_cookie
= cookie
;
344 add_sample_entry(offset
, s
->event
);
350 /* Add a sample to the global event buffer. If possible the
351 * sample is converted into a persistent dentry/offset pair
352 * for later lookup from userspace.
355 add_sample(struct mm_struct
*mm
, struct op_sample
*s
, int in_kernel
)
358 add_sample_entry(s
->eip
, s
->event
);
361 return add_us_sample(mm
, s
);
363 atomic_inc(&oprofile_stats
.sample_lost_no_mm
);
369 static void release_mm(struct mm_struct
*mm
)
373 up_read(&mm
->mmap_sem
);
378 static struct mm_struct
*take_tasks_mm(struct task_struct
*task
)
380 struct mm_struct
*mm
= get_task_mm(task
);
382 down_read(&mm
->mmap_sem
);
387 static inline int is_code(unsigned long val
)
389 return val
== ESCAPE_CODE
;
393 /* "acquire" as many cpu buffer slots as we can */
394 static unsigned long get_slots(struct oprofile_cpu_buffer
*b
)
396 unsigned long head
= b
->head_pos
;
397 unsigned long tail
= b
->tail_pos
;
400 * Subtle. This resets the persistent last_task
401 * and in_kernel values used for switching notes.
402 * BUT, there is a small window between reading
403 * head_pos, and this call, that means samples
404 * can appear at the new head position, but not
405 * be prefixed with the notes for switching
406 * kernel mode or a task switch. This small hole
407 * can lead to mis-attribution or samples where
408 * we don't know if it's in the kernel or not,
409 * at the start of an event buffer.
416 return head
+ (b
->buffer_size
- tail
);
420 static void increment_tail(struct oprofile_cpu_buffer
*b
)
422 unsigned long new_tail
= b
->tail_pos
+ 1;
426 if (new_tail
< b
->buffer_size
)
427 b
->tail_pos
= new_tail
;
433 /* Move tasks along towards death. Any tasks on dead_tasks
434 * will definitely have no remaining references in any
435 * CPU buffers at this point, because we use two lists,
436 * and to have reached the list, it must have gone through
437 * one full sync already.
439 static void process_task_mortuary(void)
442 LIST_HEAD(local_dead_tasks
);
443 struct task_struct
*task
;
444 struct task_struct
*ttask
;
446 spin_lock_irqsave(&task_mortuary
, flags
);
448 list_splice_init(&dead_tasks
, &local_dead_tasks
);
449 list_splice_init(&dying_tasks
, &dead_tasks
);
451 spin_unlock_irqrestore(&task_mortuary
, flags
);
453 list_for_each_entry_safe(task
, ttask
, &local_dead_tasks
, tasks
) {
454 list_del(&task
->tasks
);
460 static void mark_done(int cpu
)
464 cpu_set(cpu
, marked_cpus
);
466 for_each_online_cpu(i
) {
467 if (!cpu_isset(i
, marked_cpus
))
471 /* All CPUs have been processed at least once,
472 * we can process the mortuary once
474 process_task_mortuary();
476 cpus_clear(marked_cpus
);
480 /* FIXME: this is not sufficient if we implement syscall barrier backtrace
481 * traversal, the code switch to sb_sample_start at first kernel enter/exit
482 * switch so we need a fifth state and some special handling in sync_buffer()
491 /* Sync one of the CPU's buffers into the global event buffer.
492 * Here we need to go through each batch of samples punctuated
493 * by context switch notes, taking the task's mmap_sem and doing
494 * lookup in task->mm->mmap to convert EIP into dcookie/offset
497 void sync_buffer(int cpu
)
499 struct oprofile_cpu_buffer
*cpu_buf
= &per_cpu(cpu_buffer
, cpu
);
500 struct mm_struct
*mm
= NULL
;
501 struct task_struct
*new;
502 unsigned long cookie
= 0;
505 sync_buffer_state state
= sb_buffer_start
;
506 unsigned long available
;
508 mutex_lock(&buffer_mutex
);
512 /* Remember, only we can modify tail_pos */
514 available
= get_slots(cpu_buf
);
516 for (i
= 0; i
< available
; ++i
) {
517 struct op_sample
*s
= &cpu_buf
->buffer
[cpu_buf
->tail_pos
];
519 if (is_code(s
->eip
)) {
520 if (s
->event
<= CPU_IS_KERNEL
) {
521 /* kernel/userspace switch */
522 in_kernel
= s
->event
;
523 if (state
== sb_buffer_start
)
524 state
= sb_sample_start
;
525 add_kernel_ctx_switch(s
->event
);
526 } else if (s
->event
== CPU_TRACE_BEGIN
) {
530 struct mm_struct
*oldmm
= mm
;
532 /* userspace context switch */
533 new = (struct task_struct
*)s
->event
;
536 mm
= take_tasks_mm(new);
538 cookie
= get_exec_dcookie(mm
);
539 add_user_ctx_switch(new, cookie
);
541 } else if (state
>= sb_bt_start
&&
542 !add_sample(mm
, s
, in_kernel
)) {
543 if (state
== sb_bt_start
) {
544 state
= sb_bt_ignore
;
545 atomic_inc(&oprofile_stats
.bt_lost_no_mapping
);
549 increment_tail(cpu_buf
);
555 mutex_unlock(&buffer_mutex
);