eCryptfs: Copy lower directory inode times and size on link
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / trace / trace_event_perf.c
blob81f691eb3a30a508706afa874415a6343042adfb
1 /*
2 * trace event based perf event profiling/tracing
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6 */
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
10 #include "trace.h"
12 DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
13 EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs);
15 EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
17 static char *perf_trace_buf;
18 static char *perf_trace_buf_nmi;
20 typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ;
22 /* Count the events in use (per event id, not per instance) */
23 static int total_ref_count;
25 static int perf_trace_event_enable(struct ftrace_event_call *event)
27 char *buf;
28 int ret = -ENOMEM;
30 if (event->perf_refcount++ > 0)
31 return 0;
33 if (!total_ref_count) {
34 buf = (char *)alloc_percpu(perf_trace_t);
35 if (!buf)
36 goto fail_buf;
38 rcu_assign_pointer(perf_trace_buf, buf);
40 buf = (char *)alloc_percpu(perf_trace_t);
41 if (!buf)
42 goto fail_buf_nmi;
44 rcu_assign_pointer(perf_trace_buf_nmi, buf);
47 ret = event->perf_event_enable(event);
48 if (!ret) {
49 total_ref_count++;
50 return 0;
53 fail_buf_nmi:
54 if (!total_ref_count) {
55 free_percpu(perf_trace_buf_nmi);
56 free_percpu(perf_trace_buf);
57 perf_trace_buf_nmi = NULL;
58 perf_trace_buf = NULL;
60 fail_buf:
61 event->perf_refcount--;
63 return ret;
66 int perf_trace_enable(int event_id)
68 struct ftrace_event_call *event;
69 int ret = -EINVAL;
71 mutex_lock(&event_mutex);
72 list_for_each_entry(event, &ftrace_events, list) {
73 if (event->id == event_id && event->perf_event_enable &&
74 try_module_get(event->mod)) {
75 ret = perf_trace_event_enable(event);
76 break;
79 mutex_unlock(&event_mutex);
81 return ret;
84 static void perf_trace_event_disable(struct ftrace_event_call *event)
86 char *buf, *nmi_buf;
88 if (--event->perf_refcount > 0)
89 return;
91 event->perf_event_disable(event);
93 if (!--total_ref_count) {
94 buf = perf_trace_buf;
95 rcu_assign_pointer(perf_trace_buf, NULL);
97 nmi_buf = perf_trace_buf_nmi;
98 rcu_assign_pointer(perf_trace_buf_nmi, NULL);
101 * Ensure every events in profiling have finished before
102 * releasing the buffers
104 synchronize_sched();
106 free_percpu(buf);
107 free_percpu(nmi_buf);
111 void perf_trace_disable(int event_id)
113 struct ftrace_event_call *event;
115 mutex_lock(&event_mutex);
116 list_for_each_entry(event, &ftrace_events, list) {
117 if (event->id == event_id) {
118 perf_trace_event_disable(event);
119 module_put(event->mod);
120 break;
123 mutex_unlock(&event_mutex);
126 __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
127 int *rctxp, unsigned long *irq_flags)
129 struct trace_entry *entry;
130 char *trace_buf, *raw_data;
131 int pc, cpu;
133 pc = preempt_count();
135 /* Protect the per cpu buffer, begin the rcu read side */
136 local_irq_save(*irq_flags);
138 *rctxp = perf_swevent_get_recursion_context();
139 if (*rctxp < 0)
140 goto err_recursion;
142 cpu = smp_processor_id();
144 if (in_nmi())
145 trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
146 else
147 trace_buf = rcu_dereference_sched(perf_trace_buf);
149 if (!trace_buf)
150 goto err;
152 raw_data = per_cpu_ptr(trace_buf, cpu);
154 /* zero the dead bytes from align to not leak stack to user */
155 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
157 entry = (struct trace_entry *)raw_data;
158 tracing_generic_entry_update(entry, *irq_flags, pc);
159 entry->type = type;
161 return raw_data;
162 err:
163 perf_swevent_put_recursion_context(*rctxp);
164 err_recursion:
165 local_irq_restore(*irq_flags);
166 return NULL;
168 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);