PCI: PCIe: Move PCIe PME code to the pcie directory
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / trace / trace_event_perf.c
blob000e6e85b445906893d7003b2f28c615453bb726
1 /*
2 * trace event based perf event profiling/tracing
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6 */
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
10 #include "trace.h"
12 static char *perf_trace_buf[4];
15 * Force it to be aligned to unsigned long to avoid misaligned accesses
16 * suprises
18 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
19 perf_trace_t;
21 /* Count the events in use (per event id, not per instance) */
22 static int total_ref_count;
24 static int perf_trace_event_init(struct ftrace_event_call *tp_event,
25 struct perf_event *p_event)
27 struct hlist_head *list;
28 int ret = -ENOMEM;
29 int cpu;
31 p_event->tp_event = tp_event;
32 if (tp_event->perf_refcount++ > 0)
33 return 0;
35 list = alloc_percpu(struct hlist_head);
36 if (!list)
37 goto fail;
39 for_each_possible_cpu(cpu)
40 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
42 tp_event->perf_events = list;
44 if (!total_ref_count) {
45 char *buf;
46 int i;
48 for (i = 0; i < 4; i++) {
49 buf = (char *)alloc_percpu(perf_trace_t);
50 if (!buf)
51 goto fail;
53 perf_trace_buf[i] = buf;
57 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER);
58 if (ret)
59 goto fail;
61 total_ref_count++;
62 return 0;
64 fail:
65 if (!total_ref_count) {
66 int i;
68 for (i = 0; i < 4; i++) {
69 free_percpu(perf_trace_buf[i]);
70 perf_trace_buf[i] = NULL;
74 if (!--tp_event->perf_refcount) {
75 free_percpu(tp_event->perf_events);
76 tp_event->perf_events = NULL;
79 return ret;
82 int perf_trace_init(struct perf_event *p_event)
84 struct ftrace_event_call *tp_event;
85 int event_id = p_event->attr.config;
86 int ret = -EINVAL;
88 mutex_lock(&event_mutex);
89 list_for_each_entry(tp_event, &ftrace_events, list) {
90 if (tp_event->event.type == event_id &&
91 tp_event->class && tp_event->class->reg &&
92 try_module_get(tp_event->mod)) {
93 ret = perf_trace_event_init(tp_event, p_event);
94 break;
97 mutex_unlock(&event_mutex);
99 return ret;
102 int perf_trace_enable(struct perf_event *p_event)
104 struct ftrace_event_call *tp_event = p_event->tp_event;
105 struct hlist_head *list;
107 list = tp_event->perf_events;
108 if (WARN_ON_ONCE(!list))
109 return -EINVAL;
111 list = this_cpu_ptr(list);
112 hlist_add_head_rcu(&p_event->hlist_entry, list);
114 return 0;
117 void perf_trace_disable(struct perf_event *p_event)
119 hlist_del_rcu(&p_event->hlist_entry);
122 void perf_trace_destroy(struct perf_event *p_event)
124 struct ftrace_event_call *tp_event = p_event->tp_event;
125 int i;
127 mutex_lock(&event_mutex);
128 if (--tp_event->perf_refcount > 0)
129 goto out;
131 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER);
134 * Ensure our callback won't be called anymore. The buffers
135 * will be freed after that.
137 tracepoint_synchronize_unregister();
139 free_percpu(tp_event->perf_events);
140 tp_event->perf_events = NULL;
142 if (!--total_ref_count) {
143 for (i = 0; i < 4; i++) {
144 free_percpu(perf_trace_buf[i]);
145 perf_trace_buf[i] = NULL;
148 out:
149 mutex_unlock(&event_mutex);
152 __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
153 struct pt_regs *regs, int *rctxp)
155 struct trace_entry *entry;
156 unsigned long flags;
157 char *raw_data;
158 int pc;
160 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
162 pc = preempt_count();
164 *rctxp = perf_swevent_get_recursion_context();
165 if (*rctxp < 0)
166 return NULL;
168 raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
170 /* zero the dead bytes from align to not leak stack to user */
171 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
173 entry = (struct trace_entry *)raw_data;
174 local_save_flags(flags);
175 tracing_generic_entry_update(entry, flags, pc);
176 entry->type = type;
178 return raw_data;
180 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);