2 * trace event based perf counter profiling
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
13 static char *perf_trace_buf
;
14 static char *perf_trace_buf_nmi
;
16 typedef typeof(char [FTRACE_MAX_PROFILE_SIZE
]) perf_trace_t
;
18 /* Count the events in use (per event id, not per instance) */
19 static int total_profile_count
;
21 static int ftrace_profile_enable_event(struct ftrace_event_call
*event
)
26 if (event
->profile_count
++ > 0)
29 if (!total_profile_count
) {
30 buf
= (char *)alloc_percpu(perf_trace_t
);
34 rcu_assign_pointer(perf_trace_buf
, buf
);
36 buf
= (char *)alloc_percpu(perf_trace_t
);
40 rcu_assign_pointer(perf_trace_buf_nmi
, buf
);
43 ret
= event
->profile_enable(event
);
45 total_profile_count
++;
50 if (!total_profile_count
) {
51 free_percpu(perf_trace_buf_nmi
);
52 free_percpu(perf_trace_buf
);
53 perf_trace_buf_nmi
= NULL
;
54 perf_trace_buf
= NULL
;
57 event
->profile_count
--;
62 int ftrace_profile_enable(int event_id
)
64 struct ftrace_event_call
*event
;
67 mutex_lock(&event_mutex
);
68 list_for_each_entry(event
, &ftrace_events
, list
) {
69 if (event
->id
== event_id
&& event
->profile_enable
&&
70 try_module_get(event
->mod
)) {
71 ret
= ftrace_profile_enable_event(event
);
75 mutex_unlock(&event_mutex
);
80 static void ftrace_profile_disable_event(struct ftrace_event_call
*event
)
84 if (--event
->profile_count
> 0)
87 event
->profile_disable(event
);
89 if (!--total_profile_count
) {
91 rcu_assign_pointer(perf_trace_buf
, NULL
);
93 nmi_buf
= perf_trace_buf_nmi
;
94 rcu_assign_pointer(perf_trace_buf_nmi
, NULL
);
97 * Ensure every events in profiling have finished before
98 * releasing the buffers
103 free_percpu(nmi_buf
);
107 void ftrace_profile_disable(int event_id
)
109 struct ftrace_event_call
*event
;
111 mutex_lock(&event_mutex
);
112 list_for_each_entry(event
, &ftrace_events
, list
) {
113 if (event
->id
== event_id
) {
114 ftrace_profile_disable_event(event
);
115 module_put(event
->mod
);
119 mutex_unlock(&event_mutex
);
122 __kprobes
void *ftrace_perf_buf_prepare(int size
, unsigned short type
,
123 int *rctxp
, unsigned long *irq_flags
)
125 struct trace_entry
*entry
;
126 char *trace_buf
, *raw_data
;
129 pc
= preempt_count();
131 /* Protect the per cpu buffer, begin the rcu read side */
132 local_irq_save(*irq_flags
);
134 *rctxp
= perf_swevent_get_recursion_context();
138 cpu
= smp_processor_id();
141 trace_buf
= rcu_dereference_sched(perf_trace_buf_nmi
);
143 trace_buf
= rcu_dereference_sched(perf_trace_buf
);
148 raw_data
= per_cpu_ptr(trace_buf
, cpu
);
150 /* zero the dead bytes from align to not leak stack to user */
151 *(u64
*)(&raw_data
[size
- sizeof(u64
)]) = 0ULL;
153 entry
= (struct trace_entry
*)raw_data
;
154 tracing_generic_entry_update(entry
, *irq_flags
, pc
);
159 perf_swevent_put_recursion_context(*rctxp
);
161 local_irq_restore(*irq_flags
);
164 EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare
);