powerpc: Remove IOMMU_VMERGE config option
[linux-2.6.git] / kernel / trace / trace_event_profile.c
blobc1cc3ab633de6cab14b87463d4f70669feb8904b
1 /*
2 * trace event based perf counter profiling
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
6 */
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
10 #include "trace.h"
13 static char *perf_trace_buf;
14 static char *perf_trace_buf_nmi;
16 typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
18 /* Count the events in use (per event id, not per instance) */
19 static int total_profile_count;
21 static int ftrace_profile_enable_event(struct ftrace_event_call *event)
23 char *buf;
24 int ret = -ENOMEM;
26 if (event->profile_count++ > 0)
27 return 0;
29 if (!total_profile_count) {
30 buf = (char *)alloc_percpu(perf_trace_t);
31 if (!buf)
32 goto fail_buf;
34 rcu_assign_pointer(perf_trace_buf, buf);
36 buf = (char *)alloc_percpu(perf_trace_t);
37 if (!buf)
38 goto fail_buf_nmi;
40 rcu_assign_pointer(perf_trace_buf_nmi, buf);
43 ret = event->profile_enable(event);
44 if (!ret) {
45 total_profile_count++;
46 return 0;
49 fail_buf_nmi:
50 if (!total_profile_count) {
51 free_percpu(perf_trace_buf_nmi);
52 free_percpu(perf_trace_buf);
53 perf_trace_buf_nmi = NULL;
54 perf_trace_buf = NULL;
56 fail_buf:
57 event->profile_count--;
59 return ret;
62 int ftrace_profile_enable(int event_id)
64 struct ftrace_event_call *event;
65 int ret = -EINVAL;
67 mutex_lock(&event_mutex);
68 list_for_each_entry(event, &ftrace_events, list) {
69 if (event->id == event_id && event->profile_enable &&
70 try_module_get(event->mod)) {
71 ret = ftrace_profile_enable_event(event);
72 break;
75 mutex_unlock(&event_mutex);
77 return ret;
80 static void ftrace_profile_disable_event(struct ftrace_event_call *event)
82 char *buf, *nmi_buf;
84 if (--event->profile_count > 0)
85 return;
87 event->profile_disable(event);
89 if (!--total_profile_count) {
90 buf = perf_trace_buf;
91 rcu_assign_pointer(perf_trace_buf, NULL);
93 nmi_buf = perf_trace_buf_nmi;
94 rcu_assign_pointer(perf_trace_buf_nmi, NULL);
97 * Ensure every events in profiling have finished before
98 * releasing the buffers
100 synchronize_sched();
102 free_percpu(buf);
103 free_percpu(nmi_buf);
107 void ftrace_profile_disable(int event_id)
109 struct ftrace_event_call *event;
111 mutex_lock(&event_mutex);
112 list_for_each_entry(event, &ftrace_events, list) {
113 if (event->id == event_id) {
114 ftrace_profile_disable_event(event);
115 module_put(event->mod);
116 break;
119 mutex_unlock(&event_mutex);
122 __kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type,
123 int *rctxp, unsigned long *irq_flags)
125 struct trace_entry *entry;
126 char *trace_buf, *raw_data;
127 int pc, cpu;
129 pc = preempt_count();
131 /* Protect the per cpu buffer, begin the rcu read side */
132 local_irq_save(*irq_flags);
134 *rctxp = perf_swevent_get_recursion_context();
135 if (*rctxp < 0)
136 goto err_recursion;
138 cpu = smp_processor_id();
140 if (in_nmi())
141 trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
142 else
143 trace_buf = rcu_dereference_sched(perf_trace_buf);
145 if (!trace_buf)
146 goto err;
148 raw_data = per_cpu_ptr(trace_buf, cpu);
150 /* zero the dead bytes from align to not leak stack to user */
151 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
153 entry = (struct trace_entry *)raw_data;
154 tracing_generic_entry_update(entry, *irq_flags, pc);
155 entry->type = type;
157 return raw_data;
158 err:
159 perf_swevent_put_recursion_context(*rctxp);
160 err_recursion:
161 local_irq_restore(*irq_flags);
162 return NULL;
164 EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare);