Merge branch 'mini2440-dev-unlikely' into mini2440-dev
[linux-2.6/mini2440.git] / kernel / trace / trace_event_profile.c
blob8d5c171cc9987d924f9fcfd3328fcb966288b9a0
1 /*
2 * trace event based perf counter profiling
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
6 */
8 #include <linux/module.h>
9 #include "trace.h"
12 * We can't use a size but a type in alloc_percpu()
13 * So let's create a dummy type that matches the desired size
15 typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t;
17 char *trace_profile_buf;
18 EXPORT_SYMBOL_GPL(trace_profile_buf);
20 char *trace_profile_buf_nmi;
21 EXPORT_SYMBOL_GPL(trace_profile_buf_nmi);
23 /* Count the events in use (per event id, not per instance) */
24 static int total_profile_count;
26 static int ftrace_profile_enable_event(struct ftrace_event_call *event)
28 char *buf;
29 int ret = -ENOMEM;
31 if (atomic_inc_return(&event->profile_count))
32 return 0;
34 if (!total_profile_count) {
35 buf = (char *)alloc_percpu(profile_buf_t);
36 if (!buf)
37 goto fail_buf;
39 rcu_assign_pointer(trace_profile_buf, buf);
41 buf = (char *)alloc_percpu(profile_buf_t);
42 if (!buf)
43 goto fail_buf_nmi;
45 rcu_assign_pointer(trace_profile_buf_nmi, buf);
48 ret = event->profile_enable();
49 if (!ret) {
50 total_profile_count++;
51 return 0;
54 fail_buf_nmi:
55 if (!total_profile_count) {
56 free_percpu(trace_profile_buf_nmi);
57 free_percpu(trace_profile_buf);
58 trace_profile_buf_nmi = NULL;
59 trace_profile_buf = NULL;
61 fail_buf:
62 atomic_dec(&event->profile_count);
64 return ret;
67 int ftrace_profile_enable(int event_id)
69 struct ftrace_event_call *event;
70 int ret = -EINVAL;
72 mutex_lock(&event_mutex);
73 list_for_each_entry(event, &ftrace_events, list) {
74 if (event->id == event_id && event->profile_enable &&
75 try_module_get(event->mod)) {
76 ret = ftrace_profile_enable_event(event);
77 break;
80 mutex_unlock(&event_mutex);
82 return ret;
85 static void ftrace_profile_disable_event(struct ftrace_event_call *event)
87 char *buf, *nmi_buf;
89 if (!atomic_add_negative(-1, &event->profile_count))
90 return;
92 event->profile_disable();
94 if (!--total_profile_count) {
95 buf = trace_profile_buf;
96 rcu_assign_pointer(trace_profile_buf, NULL);
98 nmi_buf = trace_profile_buf_nmi;
99 rcu_assign_pointer(trace_profile_buf_nmi, NULL);
102 * Ensure every events in profiling have finished before
103 * releasing the buffers
105 synchronize_sched();
107 free_percpu(buf);
108 free_percpu(nmi_buf);
112 void ftrace_profile_disable(int event_id)
114 struct ftrace_event_call *event;
116 mutex_lock(&event_mutex);
117 list_for_each_entry(event, &ftrace_events, list) {
118 if (event->id == event_id) {
119 ftrace_profile_disable_event(event);
120 module_put(event->mod);
121 break;
124 mutex_unlock(&event_mutex);