2 * trace event based perf event profiling/tracing
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
12 DEFINE_PER_CPU(struct pt_regs
, perf_trace_regs
);
13 EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs
);
15 EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs
);
17 static char *perf_trace_buf
;
18 static char *perf_trace_buf_nmi
;
21 * Force it to be aligned to unsigned long to avoid misaligned accesses
24 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE
/ sizeof(unsigned long)])
27 /* Count the events in use (per event id, not per instance) */
28 static int total_ref_count
;
30 static int perf_trace_event_enable(struct ftrace_event_call
*event
)
35 if (event
->perf_refcount
++ > 0)
38 if (!total_ref_count
) {
39 buf
= (char *)alloc_percpu(perf_trace_t
);
43 rcu_assign_pointer(perf_trace_buf
, buf
);
45 buf
= (char *)alloc_percpu(perf_trace_t
);
49 rcu_assign_pointer(perf_trace_buf_nmi
, buf
);
52 ret
= event
->perf_event_enable(event
);
59 if (!total_ref_count
) {
60 free_percpu(perf_trace_buf_nmi
);
61 free_percpu(perf_trace_buf
);
62 perf_trace_buf_nmi
= NULL
;
63 perf_trace_buf
= NULL
;
66 event
->perf_refcount
--;
71 int perf_trace_enable(int event_id
)
73 struct ftrace_event_call
*event
;
76 mutex_lock(&event_mutex
);
77 list_for_each_entry(event
, &ftrace_events
, list
) {
78 if (event
->id
== event_id
&& event
->perf_event_enable
&&
79 try_module_get(event
->mod
)) {
80 ret
= perf_trace_event_enable(event
);
84 mutex_unlock(&event_mutex
);
89 static void perf_trace_event_disable(struct ftrace_event_call
*event
)
93 if (--event
->perf_refcount
> 0)
96 event
->perf_event_disable(event
);
98 if (!--total_ref_count
) {
100 rcu_assign_pointer(perf_trace_buf
, NULL
);
102 nmi_buf
= perf_trace_buf_nmi
;
103 rcu_assign_pointer(perf_trace_buf_nmi
, NULL
);
106 * Ensure every events in profiling have finished before
107 * releasing the buffers
112 free_percpu(nmi_buf
);
116 void perf_trace_disable(int event_id
)
118 struct ftrace_event_call
*event
;
120 mutex_lock(&event_mutex
);
121 list_for_each_entry(event
, &ftrace_events
, list
) {
122 if (event
->id
== event_id
) {
123 perf_trace_event_disable(event
);
124 module_put(event
->mod
);
128 mutex_unlock(&event_mutex
);
131 __kprobes
void *perf_trace_buf_prepare(int size
, unsigned short type
,
132 int *rctxp
, unsigned long *irq_flags
)
134 struct trace_entry
*entry
;
135 char *trace_buf
, *raw_data
;
138 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE
% sizeof(unsigned long));
140 pc
= preempt_count();
142 /* Protect the per cpu buffer, begin the rcu read side */
143 local_irq_save(*irq_flags
);
145 *rctxp
= perf_swevent_get_recursion_context();
149 cpu
= smp_processor_id();
152 trace_buf
= rcu_dereference_sched(perf_trace_buf_nmi
);
154 trace_buf
= rcu_dereference_sched(perf_trace_buf
);
159 raw_data
= per_cpu_ptr(trace_buf
, cpu
);
161 /* zero the dead bytes from align to not leak stack to user */
162 memset(&raw_data
[size
- sizeof(u64
)], 0, sizeof(u64
));
164 entry
= (struct trace_entry
*)raw_data
;
165 tracing_generic_entry_update(entry
, *irq_flags
, pc
);
170 perf_swevent_put_recursion_context(*rctxp
);
172 local_irq_restore(*irq_flags
);
175 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare
);