2 * h/w branch tracer for x86 based on BTS
4 * Copyright (C) 2008-2009 Intel Corporation.
5 * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009
7 #include <linux/kallsyms.h>
8 #include <linux/debugfs.h>
9 #include <linux/ftrace.h>
10 #include <linux/module.h>
11 #include <linux/cpu.h>
12 #include <linux/smp.h>
17 #include "trace_output.h"
21 #define BTS_BUFFER_SIZE (1 << 13)
23 static DEFINE_PER_CPU(struct bts_tracer
*, hwb_tracer
);
24 static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE
], hwb_buffer
);
26 #define this_tracer per_cpu(hwb_tracer, smp_processor_id())
28 static int trace_hw_branches_enabled __read_mostly
;
29 static int trace_hw_branches_suspended __read_mostly
;
30 static struct trace_array
*hw_branch_trace __read_mostly
;
33 static void bts_trace_init_cpu(int cpu
)
35 per_cpu(hwb_tracer
, cpu
) =
36 ds_request_bts_cpu(cpu
, per_cpu(hwb_buffer
, cpu
),
37 BTS_BUFFER_SIZE
, NULL
, (size_t)-1,
40 if (IS_ERR(per_cpu(hwb_tracer
, cpu
)))
41 per_cpu(hwb_tracer
, cpu
) = NULL
;
44 static int bts_trace_init(struct trace_array
*tr
)
49 trace_hw_branches_enabled
= 0;
52 for_each_online_cpu(cpu
) {
53 bts_trace_init_cpu(cpu
);
55 if (likely(per_cpu(hwb_tracer
, cpu
)))
56 trace_hw_branches_enabled
= 1;
58 trace_hw_branches_suspended
= 0;
61 /* If we could not enable tracing on a single cpu, we fail. */
62 return trace_hw_branches_enabled
? 0 : -EOPNOTSUPP
;
65 static void bts_trace_reset(struct trace_array
*tr
)
70 for_each_online_cpu(cpu
) {
71 if (likely(per_cpu(hwb_tracer
, cpu
))) {
72 ds_release_bts(per_cpu(hwb_tracer
, cpu
));
73 per_cpu(hwb_tracer
, cpu
) = NULL
;
76 trace_hw_branches_enabled
= 0;
77 trace_hw_branches_suspended
= 0;
81 static void bts_trace_start(struct trace_array
*tr
)
86 for_each_online_cpu(cpu
)
87 if (likely(per_cpu(hwb_tracer
, cpu
)))
88 ds_resume_bts(per_cpu(hwb_tracer
, cpu
));
89 trace_hw_branches_suspended
= 0;
93 static void bts_trace_stop(struct trace_array
*tr
)
98 for_each_online_cpu(cpu
)
99 if (likely(per_cpu(hwb_tracer
, cpu
)))
100 ds_suspend_bts(per_cpu(hwb_tracer
, cpu
));
101 trace_hw_branches_suspended
= 1;
105 static int __cpuinit
bts_hotcpu_handler(struct notifier_block
*nfb
,
106 unsigned long action
, void *hcpu
)
108 int cpu
= (long)hcpu
;
112 case CPU_DOWN_FAILED
:
113 /* The notification is sent with interrupts enabled. */
114 if (trace_hw_branches_enabled
) {
115 bts_trace_init_cpu(cpu
);
117 if (trace_hw_branches_suspended
&&
118 likely(per_cpu(hwb_tracer
, cpu
)))
119 ds_suspend_bts(per_cpu(hwb_tracer
, cpu
));
123 case CPU_DOWN_PREPARE
:
124 /* The notification is sent with interrupts enabled. */
125 if (likely(per_cpu(hwb_tracer
, cpu
))) {
126 ds_release_bts(per_cpu(hwb_tracer
, cpu
));
127 per_cpu(hwb_tracer
, cpu
) = NULL
;
134 static struct notifier_block bts_hotcpu_notifier __cpuinitdata
= {
135 .notifier_call
= bts_hotcpu_handler
138 static void bts_trace_print_header(struct seq_file
*m
)
140 seq_puts(m
, "# CPU# TO <- FROM\n");
143 static enum print_line_t
bts_trace_print_line(struct trace_iterator
*iter
)
145 unsigned long symflags
= TRACE_ITER_SYM_OFFSET
;
146 struct trace_entry
*entry
= iter
->ent
;
147 struct trace_seq
*seq
= &iter
->seq
;
148 struct hw_branch_entry
*it
;
150 trace_assign_type(it
, entry
);
152 if (entry
->type
== TRACE_HW_BRANCHES
) {
153 if (trace_seq_printf(seq
, "%4d ", iter
->cpu
) &&
154 seq_print_ip_sym(seq
, it
->to
, symflags
) &&
155 trace_seq_printf(seq
, "\t <- ") &&
156 seq_print_ip_sym(seq
, it
->from
, symflags
) &&
157 trace_seq_printf(seq
, "\n"))
158 return TRACE_TYPE_HANDLED
;
159 return TRACE_TYPE_PARTIAL_LINE
;
161 return TRACE_TYPE_UNHANDLED
;
164 void trace_hw_branch(u64 from
, u64 to
)
166 struct ftrace_event_call
*call
= &event_hw_branch
;
167 struct trace_array
*tr
= hw_branch_trace
;
168 struct ring_buffer_event
*event
;
169 struct ring_buffer
*buf
;
170 struct hw_branch_entry
*entry
;
177 if (unlikely(!trace_hw_branches_enabled
))
180 local_irq_save(irq1
);
181 cpu
= raw_smp_processor_id();
182 if (atomic_inc_return(&tr
->data
[cpu
]->disabled
) != 1)
186 event
= trace_buffer_lock_reserve(buf
, TRACE_HW_BRANCHES
,
187 sizeof(*entry
), 0, 0);
190 entry
= ring_buffer_event_data(event
);
191 tracing_generic_entry_update(&entry
->ent
, 0, from
);
192 entry
->ent
.type
= TRACE_HW_BRANCHES
;
195 if (!filter_check_discard(call
, entry
, buf
, event
))
196 trace_buffer_unlock_commit(buf
, event
, 0, 0);
199 atomic_dec(&tr
->data
[cpu
]->disabled
);
200 local_irq_restore(irq1
);
203 static void trace_bts_at(const struct bts_trace
*trace
, void *at
)
205 struct bts_struct bts
;
208 WARN_ON_ONCE(!trace
->read
);
212 err
= trace
->read(this_tracer
, at
, &bts
);
216 switch (bts
.qualifier
) {
218 trace_hw_branch(bts
.variant
.lbr
.from
, bts
.variant
.lbr
.to
);
224 * Collect the trace on the current cpu and write it into the ftrace buffer.
226 * pre: tracing must be suspended on the current cpu
228 static void trace_bts_cpu(void *arg
)
230 struct trace_array
*tr
= (struct trace_array
*)arg
;
231 const struct bts_trace
*trace
;
237 if (unlikely(atomic_read(&tr
->data
[raw_smp_processor_id()]->disabled
)))
240 if (unlikely(!this_tracer
))
243 trace
= ds_read_bts(this_tracer
);
247 for (at
= trace
->ds
.top
; (void *)at
< trace
->ds
.end
;
248 at
+= trace
->ds
.size
)
249 trace_bts_at(trace
, at
);
251 for (at
= trace
->ds
.begin
; (void *)at
< trace
->ds
.top
;
252 at
+= trace
->ds
.size
)
253 trace_bts_at(trace
, at
);
256 static void trace_bts_prepare(struct trace_iterator
*iter
)
261 for_each_online_cpu(cpu
)
262 if (likely(per_cpu(hwb_tracer
, cpu
)))
263 ds_suspend_bts(per_cpu(hwb_tracer
, cpu
));
265 * We need to collect the trace on the respective cpu since ftrace
266 * implicitly adds the record for the current cpu.
267 * Once that is more flexible, we could collect the data from any cpu.
269 on_each_cpu(trace_bts_cpu
, iter
->tr
, 1);
271 for_each_online_cpu(cpu
)
272 if (likely(per_cpu(hwb_tracer
, cpu
)))
273 ds_resume_bts(per_cpu(hwb_tracer
, cpu
));
277 static void trace_bts_close(struct trace_iterator
*iter
)
279 tracing_reset_online_cpus(iter
->tr
);
282 void trace_hw_branch_oops(void)
285 ds_suspend_bts_noirq(this_tracer
);
286 trace_bts_cpu(hw_branch_trace
);
287 ds_resume_bts_noirq(this_tracer
);
291 struct tracer bts_tracer __read_mostly
=
293 .name
= "hw-branch-tracer",
294 .init
= bts_trace_init
,
295 .reset
= bts_trace_reset
,
296 .print_header
= bts_trace_print_header
,
297 .print_line
= bts_trace_print_line
,
298 .start
= bts_trace_start
,
299 .stop
= bts_trace_stop
,
300 .open
= trace_bts_prepare
,
301 .close
= trace_bts_close
,
302 #ifdef CONFIG_FTRACE_SELFTEST
303 .selftest
= trace_selftest_startup_hw_branches
,
304 #endif /* CONFIG_FTRACE_SELFTEST */
307 __init
static int init_bts_trace(void)
309 register_hotcpu_notifier(&bts_hotcpu_notifier
);
310 return register_tracer(&bts_tracer
);
312 device_initcall(init_bts_trace
);