2 * h/w branch tracer for x86 based on BTS
4 * Copyright (C) 2008-2009 Intel Corporation.
5 * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009
7 #include <linux/kallsyms.h>
8 #include <linux/debugfs.h>
9 #include <linux/ftrace.h>
10 #include <linux/module.h>
11 #include <linux/cpu.h>
12 #include <linux/smp.h>
17 #include "trace_output.h"
21 #define BTS_BUFFER_SIZE (1 << 13)
23 static DEFINE_PER_CPU(struct bts_tracer
*, tracer
);
24 static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE
], buffer
);
26 #define this_tracer per_cpu(tracer, smp_processor_id())
28 static int trace_hw_branches_enabled __read_mostly
;
29 static int trace_hw_branches_suspended __read_mostly
;
30 static struct trace_array
*hw_branch_trace __read_mostly
;
33 static void bts_trace_init_cpu(int cpu
)
35 per_cpu(tracer
, cpu
) =
36 ds_request_bts_cpu(cpu
, per_cpu(buffer
, cpu
), BTS_BUFFER_SIZE
,
37 NULL
, (size_t)-1, BTS_KERNEL
);
39 if (IS_ERR(per_cpu(tracer
, cpu
)))
40 per_cpu(tracer
, cpu
) = NULL
;
43 static int bts_trace_init(struct trace_array
*tr
)
48 trace_hw_branches_enabled
= 0;
51 for_each_online_cpu(cpu
) {
52 bts_trace_init_cpu(cpu
);
54 if (likely(per_cpu(tracer
, cpu
)))
55 trace_hw_branches_enabled
= 1;
57 trace_hw_branches_suspended
= 0;
60 /* If we could not enable tracing on a single cpu, we fail. */
61 return trace_hw_branches_enabled
? 0 : -EOPNOTSUPP
;
64 static void bts_trace_reset(struct trace_array
*tr
)
69 for_each_online_cpu(cpu
) {
70 if (likely(per_cpu(tracer
, cpu
))) {
71 ds_release_bts(per_cpu(tracer
, cpu
));
72 per_cpu(tracer
, cpu
) = NULL
;
75 trace_hw_branches_enabled
= 0;
76 trace_hw_branches_suspended
= 0;
80 static void bts_trace_start(struct trace_array
*tr
)
85 for_each_online_cpu(cpu
)
86 if (likely(per_cpu(tracer
, cpu
)))
87 ds_resume_bts(per_cpu(tracer
, cpu
));
88 trace_hw_branches_suspended
= 0;
92 static void bts_trace_stop(struct trace_array
*tr
)
97 for_each_online_cpu(cpu
)
98 if (likely(per_cpu(tracer
, cpu
)))
99 ds_suspend_bts(per_cpu(tracer
, cpu
));
100 trace_hw_branches_suspended
= 1;
104 static int __cpuinit
bts_hotcpu_handler(struct notifier_block
*nfb
,
105 unsigned long action
, void *hcpu
)
107 int cpu
= (long)hcpu
;
111 case CPU_DOWN_FAILED
:
112 /* The notification is sent with interrupts enabled. */
113 if (trace_hw_branches_enabled
) {
114 bts_trace_init_cpu(cpu
);
116 if (trace_hw_branches_suspended
&&
117 likely(per_cpu(tracer
, cpu
)))
118 ds_suspend_bts(per_cpu(tracer
, cpu
));
122 case CPU_DOWN_PREPARE
:
123 /* The notification is sent with interrupts enabled. */
124 if (likely(per_cpu(tracer
, cpu
))) {
125 ds_release_bts(per_cpu(tracer
, cpu
));
126 per_cpu(tracer
, cpu
) = NULL
;
133 static struct notifier_block bts_hotcpu_notifier __cpuinitdata
= {
134 .notifier_call
= bts_hotcpu_handler
137 static void bts_trace_print_header(struct seq_file
*m
)
139 seq_puts(m
, "# CPU# TO <- FROM\n");
142 static enum print_line_t
bts_trace_print_line(struct trace_iterator
*iter
)
144 unsigned long symflags
= TRACE_ITER_SYM_OFFSET
;
145 struct trace_entry
*entry
= iter
->ent
;
146 struct trace_seq
*seq
= &iter
->seq
;
147 struct hw_branch_entry
*it
;
149 trace_assign_type(it
, entry
);
151 if (entry
->type
== TRACE_HW_BRANCHES
) {
152 if (trace_seq_printf(seq
, "%4d ", iter
->cpu
) &&
153 seq_print_ip_sym(seq
, it
->to
, symflags
) &&
154 trace_seq_printf(seq
, "\t <- ") &&
155 seq_print_ip_sym(seq
, it
->from
, symflags
) &&
156 trace_seq_printf(seq
, "\n"))
157 return TRACE_TYPE_HANDLED
;
158 return TRACE_TYPE_PARTIAL_LINE
;
160 return TRACE_TYPE_UNHANDLED
;
163 void trace_hw_branch(u64 from
, u64 to
)
165 struct ftrace_event_call
*call
= &event_hw_branch
;
166 struct trace_array
*tr
= hw_branch_trace
;
167 struct ring_buffer_event
*event
;
168 struct ring_buffer
*buf
;
169 struct hw_branch_entry
*entry
;
176 if (unlikely(!trace_hw_branches_enabled
))
179 local_irq_save(irq1
);
180 cpu
= raw_smp_processor_id();
181 if (atomic_inc_return(&tr
->data
[cpu
]->disabled
) != 1)
185 event
= trace_buffer_lock_reserve(buf
, TRACE_HW_BRANCHES
,
186 sizeof(*entry
), 0, 0);
189 entry
= ring_buffer_event_data(event
);
190 tracing_generic_entry_update(&entry
->ent
, 0, from
);
191 entry
->ent
.type
= TRACE_HW_BRANCHES
;
194 if (!filter_check_discard(call
, entry
, buf
, event
))
195 trace_buffer_unlock_commit(buf
, event
, 0, 0);
198 atomic_dec(&tr
->data
[cpu
]->disabled
);
199 local_irq_restore(irq1
);
202 static void trace_bts_at(const struct bts_trace
*trace
, void *at
)
204 struct bts_struct bts
;
207 WARN_ON_ONCE(!trace
->read
);
211 err
= trace
->read(this_tracer
, at
, &bts
);
215 switch (bts
.qualifier
) {
217 trace_hw_branch(bts
.variant
.lbr
.from
, bts
.variant
.lbr
.to
);
223 * Collect the trace on the current cpu and write it into the ftrace buffer.
225 * pre: tracing must be suspended on the current cpu
227 static void trace_bts_cpu(void *arg
)
229 struct trace_array
*tr
= (struct trace_array
*)arg
;
230 const struct bts_trace
*trace
;
236 if (unlikely(atomic_read(&tr
->data
[raw_smp_processor_id()]->disabled
)))
239 if (unlikely(!this_tracer
))
242 trace
= ds_read_bts(this_tracer
);
246 for (at
= trace
->ds
.top
; (void *)at
< trace
->ds
.end
;
247 at
+= trace
->ds
.size
)
248 trace_bts_at(trace
, at
);
250 for (at
= trace
->ds
.begin
; (void *)at
< trace
->ds
.top
;
251 at
+= trace
->ds
.size
)
252 trace_bts_at(trace
, at
);
255 static void trace_bts_prepare(struct trace_iterator
*iter
)
260 for_each_online_cpu(cpu
)
261 if (likely(per_cpu(tracer
, cpu
)))
262 ds_suspend_bts(per_cpu(tracer
, cpu
));
264 * We need to collect the trace on the respective cpu since ftrace
265 * implicitly adds the record for the current cpu.
266 * Once that is more flexible, we could collect the data from any cpu.
268 on_each_cpu(trace_bts_cpu
, iter
->tr
, 1);
270 for_each_online_cpu(cpu
)
271 if (likely(per_cpu(tracer
, cpu
)))
272 ds_resume_bts(per_cpu(tracer
, cpu
));
276 static void trace_bts_close(struct trace_iterator
*iter
)
278 tracing_reset_online_cpus(iter
->tr
);
281 void trace_hw_branch_oops(void)
284 ds_suspend_bts_noirq(this_tracer
);
285 trace_bts_cpu(hw_branch_trace
);
286 ds_resume_bts_noirq(this_tracer
);
290 struct tracer bts_tracer __read_mostly
=
292 .name
= "hw-branch-tracer",
293 .init
= bts_trace_init
,
294 .reset
= bts_trace_reset
,
295 .print_header
= bts_trace_print_header
,
296 .print_line
= bts_trace_print_line
,
297 .start
= bts_trace_start
,
298 .stop
= bts_trace_stop
,
299 .open
= trace_bts_prepare
,
300 .close
= trace_bts_close
,
301 #ifdef CONFIG_FTRACE_SELFTEST
302 .selftest
= trace_selftest_startup_hw_branches
,
303 #endif /* CONFIG_FTRACE_SELFTEST */
306 __init
static int init_bts_trace(void)
308 register_hotcpu_notifier(&bts_hotcpu_notifier
);
309 return register_tracer(&bts_tracer
);
311 device_initcall(init_bts_trace
);