2 * h/w branch tracer for x86 based on BTS
4 * Copyright (C) 2008-2009 Intel Corporation.
5 * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009
7 #include <linux/kallsyms.h>
8 #include <linux/debugfs.h>
9 #include <linux/ftrace.h>
10 #include <linux/module.h>
11 #include <linux/cpu.h>
12 #include <linux/smp.h>
17 #include "trace_output.h"
21 #define BTS_BUFFER_SIZE (1 << 13)
23 static DEFINE_PER_CPU(struct bts_tracer
*, tracer
);
24 static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE
], buffer
);
26 #define this_tracer per_cpu(tracer, smp_processor_id())
28 static int trace_hw_branches_enabled __read_mostly
;
29 static int trace_hw_branches_suspended __read_mostly
;
30 static struct trace_array
*hw_branch_trace __read_mostly
;
33 static void bts_trace_init_cpu(int cpu
)
35 per_cpu(tracer
, cpu
) =
36 ds_request_bts_cpu(cpu
, per_cpu(buffer
, cpu
), BTS_BUFFER_SIZE
,
37 NULL
, (size_t)-1, BTS_KERNEL
);
39 if (IS_ERR(per_cpu(tracer
, cpu
)))
40 per_cpu(tracer
, cpu
) = NULL
;
43 static int bts_trace_init(struct trace_array
*tr
)
48 trace_hw_branches_enabled
= 0;
51 for_each_online_cpu(cpu
) {
52 bts_trace_init_cpu(cpu
);
54 if (likely(per_cpu(tracer
, cpu
)))
55 trace_hw_branches_enabled
= 1;
57 trace_hw_branches_suspended
= 0;
60 /* If we could not enable tracing on a single cpu, we fail. */
61 return trace_hw_branches_enabled
? 0 : -EOPNOTSUPP
;
64 static void bts_trace_reset(struct trace_array
*tr
)
69 for_each_online_cpu(cpu
) {
70 if (likely(per_cpu(tracer
, cpu
))) {
71 ds_release_bts(per_cpu(tracer
, cpu
));
72 per_cpu(tracer
, cpu
) = NULL
;
75 trace_hw_branches_enabled
= 0;
76 trace_hw_branches_suspended
= 0;
80 static void bts_trace_start(struct trace_array
*tr
)
85 for_each_online_cpu(cpu
)
86 if (likely(per_cpu(tracer
, cpu
)))
87 ds_resume_bts(per_cpu(tracer
, cpu
));
88 trace_hw_branches_suspended
= 0;
92 static void bts_trace_stop(struct trace_array
*tr
)
97 for_each_online_cpu(cpu
)
98 if (likely(per_cpu(tracer
, cpu
)))
99 ds_suspend_bts(per_cpu(tracer
, cpu
));
100 trace_hw_branches_suspended
= 1;
104 static int __cpuinit
bts_hotcpu_handler(struct notifier_block
*nfb
,
105 unsigned long action
, void *hcpu
)
107 int cpu
= (long)hcpu
;
111 case CPU_DOWN_FAILED
:
112 /* The notification is sent with interrupts enabled. */
113 if (trace_hw_branches_enabled
) {
114 bts_trace_init_cpu(cpu
);
116 if (trace_hw_branches_suspended
&&
117 likely(per_cpu(tracer
, cpu
)))
118 ds_suspend_bts(per_cpu(tracer
, cpu
));
122 case CPU_DOWN_PREPARE
:
123 /* The notification is sent with interrupts enabled. */
124 if (likely(per_cpu(tracer
, cpu
))) {
125 ds_release_bts(per_cpu(tracer
, cpu
));
126 per_cpu(tracer
, cpu
) = NULL
;
133 static struct notifier_block bts_hotcpu_notifier __cpuinitdata
= {
134 .notifier_call
= bts_hotcpu_handler
137 static void bts_trace_print_header(struct seq_file
*m
)
139 seq_puts(m
, "# CPU# TO <- FROM\n");
142 static enum print_line_t
bts_trace_print_line(struct trace_iterator
*iter
)
144 unsigned long symflags
= TRACE_ITER_SYM_OFFSET
;
145 struct trace_entry
*entry
= iter
->ent
;
146 struct trace_seq
*seq
= &iter
->seq
;
147 struct hw_branch_entry
*it
;
149 trace_assign_type(it
, entry
);
151 if (entry
->type
== TRACE_HW_BRANCHES
) {
152 if (trace_seq_printf(seq
, "%4d ", iter
->cpu
) &&
153 seq_print_ip_sym(seq
, it
->to
, symflags
) &&
154 trace_seq_printf(seq
, "\t <- ") &&
155 seq_print_ip_sym(seq
, it
->from
, symflags
) &&
156 trace_seq_printf(seq
, "\n"))
157 return TRACE_TYPE_HANDLED
;
158 return TRACE_TYPE_PARTIAL_LINE
;;
160 return TRACE_TYPE_UNHANDLED
;
163 void trace_hw_branch(u64 from
, u64 to
)
165 struct ftrace_event_call
*call
= &event_hw_branch
;
166 struct trace_array
*tr
= hw_branch_trace
;
167 struct ring_buffer_event
*event
;
168 struct hw_branch_entry
*entry
;
175 if (unlikely(!trace_hw_branches_enabled
))
178 local_irq_save(irq1
);
179 cpu
= raw_smp_processor_id();
180 if (atomic_inc_return(&tr
->data
[cpu
]->disabled
) != 1)
183 event
= trace_buffer_lock_reserve(tr
, TRACE_HW_BRANCHES
,
184 sizeof(*entry
), 0, 0);
187 entry
= ring_buffer_event_data(event
);
188 tracing_generic_entry_update(&entry
->ent
, 0, from
);
189 entry
->ent
.type
= TRACE_HW_BRANCHES
;
192 if (!filter_check_discard(call
, entry
, tr
->buffer
, event
))
193 trace_buffer_unlock_commit(tr
, event
, 0, 0);
196 atomic_dec(&tr
->data
[cpu
]->disabled
);
197 local_irq_restore(irq1
);
200 static void trace_bts_at(const struct bts_trace
*trace
, void *at
)
202 struct bts_struct bts
;
205 WARN_ON_ONCE(!trace
->read
);
209 err
= trace
->read(this_tracer
, at
, &bts
);
213 switch (bts
.qualifier
) {
215 trace_hw_branch(bts
.variant
.lbr
.from
, bts
.variant
.lbr
.to
);
221 * Collect the trace on the current cpu and write it into the ftrace buffer.
223 * pre: tracing must be suspended on the current cpu
225 static void trace_bts_cpu(void *arg
)
227 struct trace_array
*tr
= (struct trace_array
*)arg
;
228 const struct bts_trace
*trace
;
234 if (unlikely(atomic_read(&tr
->data
[raw_smp_processor_id()]->disabled
)))
237 if (unlikely(!this_tracer
))
240 trace
= ds_read_bts(this_tracer
);
244 for (at
= trace
->ds
.top
; (void *)at
< trace
->ds
.end
;
245 at
+= trace
->ds
.size
)
246 trace_bts_at(trace
, at
);
248 for (at
= trace
->ds
.begin
; (void *)at
< trace
->ds
.top
;
249 at
+= trace
->ds
.size
)
250 trace_bts_at(trace
, at
);
253 static void trace_bts_prepare(struct trace_iterator
*iter
)
258 for_each_online_cpu(cpu
)
259 if (likely(per_cpu(tracer
, cpu
)))
260 ds_suspend_bts(per_cpu(tracer
, cpu
));
262 * We need to collect the trace on the respective cpu since ftrace
263 * implicitly adds the record for the current cpu.
264 * Once that is more flexible, we could collect the data from any cpu.
266 on_each_cpu(trace_bts_cpu
, iter
->tr
, 1);
268 for_each_online_cpu(cpu
)
269 if (likely(per_cpu(tracer
, cpu
)))
270 ds_resume_bts(per_cpu(tracer
, cpu
));
274 static void trace_bts_close(struct trace_iterator
*iter
)
276 tracing_reset_online_cpus(iter
->tr
);
279 void trace_hw_branch_oops(void)
282 ds_suspend_bts_noirq(this_tracer
);
283 trace_bts_cpu(hw_branch_trace
);
284 ds_resume_bts_noirq(this_tracer
);
288 struct tracer bts_tracer __read_mostly
=
290 .name
= "hw-branch-tracer",
291 .init
= bts_trace_init
,
292 .reset
= bts_trace_reset
,
293 .print_header
= bts_trace_print_header
,
294 .print_line
= bts_trace_print_line
,
295 .start
= bts_trace_start
,
296 .stop
= bts_trace_stop
,
297 .open
= trace_bts_prepare
,
298 .close
= trace_bts_close
,
299 #ifdef CONFIG_FTRACE_SELFTEST
300 .selftest
= trace_selftest_startup_hw_branches
,
301 #endif /* CONFIG_FTRACE_SELFTEST */
304 __init
static int init_bts_trace(void)
306 register_hotcpu_notifier(&bts_hotcpu_notifier
);
307 return register_tracer(&bts_tracer
);
309 device_initcall(init_bts_trace
);