2 * arch/sh/kernel/hw_breakpoint.c
4 * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
6 * Copyright (C) 2009 - 2010 Paul Mundt
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
12 #include <linux/init.h>
13 #include <linux/perf_event.h>
14 #include <linux/hw_breakpoint.h>
15 #include <linux/percpu.h>
16 #include <linux/kallsyms.h>
17 #include <linux/notifier.h>
18 #include <linux/kprobes.h>
19 #include <linux/kdebug.h>
21 #include <linux/clk.h>
22 #include <asm/hw_breakpoint.h>
23 #include <asm/mmu_context.h>
24 #include <asm/ptrace.h>
27 * Stores the breakpoints currently in use on each breakpoint address
28 * register for each cpus
30 static DEFINE_PER_CPU(struct perf_event
*, bp_per_reg
[HBP_NUM
]);
33 * A dummy placeholder for early accesses until the CPUs get a chance to
34 * register their UBCs later in the boot process.
36 static struct sh_ubc ubc_dummy
= { .num_events
= 0 };
38 static struct sh_ubc
*sh_ubc __read_mostly
= &ubc_dummy
;
41 * Install a perf counter breakpoint.
43 * We seek a free UBC channel and use it for this breakpoint.
45 * Atomic: we hold the counter->ctx->lock and we only handle variables
46 * and registers local to this cpu.
48 int arch_install_hw_breakpoint(struct perf_event
*bp
)
50 struct arch_hw_breakpoint
*info
= counter_arch_bp(bp
);
53 for (i
= 0; i
< sh_ubc
->num_events
; i
++) {
54 struct perf_event
**slot
= &__get_cpu_var(bp_per_reg
[i
]);
62 if (WARN_ONCE(i
== sh_ubc
->num_events
, "Can't find any breakpoint slot"))
65 clk_enable(sh_ubc
->clk
);
66 sh_ubc
->enable(info
, i
);
72 * Uninstall the breakpoint contained in the given counter.
74 * First we search the debug address register it uses and then we disable
77 * Atomic: we hold the counter->ctx->lock and we only handle variables
78 * and registers local to this cpu.
80 void arch_uninstall_hw_breakpoint(struct perf_event
*bp
)
82 struct arch_hw_breakpoint
*info
= counter_arch_bp(bp
);
85 for (i
= 0; i
< sh_ubc
->num_events
; i
++) {
86 struct perf_event
**slot
= &__get_cpu_var(bp_per_reg
[i
]);
94 if (WARN_ONCE(i
== sh_ubc
->num_events
, "Can't find any breakpoint slot"))
97 sh_ubc
->disable(info
, i
);
98 clk_disable(sh_ubc
->clk
);
101 static int get_hbp_len(u16 hbp_len
)
103 unsigned int len_in_bytes
= 0;
106 case SH_BREAKPOINT_LEN_1
:
109 case SH_BREAKPOINT_LEN_2
:
112 case SH_BREAKPOINT_LEN_4
:
115 case SH_BREAKPOINT_LEN_8
:
123 * Check for virtual address in kernel space.
125 int arch_check_bp_in_kernelspace(struct perf_event
*bp
)
129 struct arch_hw_breakpoint
*info
= counter_arch_bp(bp
);
132 len
= get_hbp_len(info
->len
);
134 return (va
>= TASK_SIZE
) && ((va
+ len
- 1) >= TASK_SIZE
);
137 int arch_bp_generic_fields(int sh_len
, int sh_type
,
138 int *gen_len
, int *gen_type
)
142 case SH_BREAKPOINT_LEN_1
:
143 *gen_len
= HW_BREAKPOINT_LEN_1
;
145 case SH_BREAKPOINT_LEN_2
:
146 *gen_len
= HW_BREAKPOINT_LEN_2
;
148 case SH_BREAKPOINT_LEN_4
:
149 *gen_len
= HW_BREAKPOINT_LEN_4
;
151 case SH_BREAKPOINT_LEN_8
:
152 *gen_len
= HW_BREAKPOINT_LEN_8
;
160 case SH_BREAKPOINT_READ
:
161 *gen_type
= HW_BREAKPOINT_R
;
162 case SH_BREAKPOINT_WRITE
:
163 *gen_type
= HW_BREAKPOINT_W
;
165 case SH_BREAKPOINT_RW
:
166 *gen_type
= HW_BREAKPOINT_W
| HW_BREAKPOINT_R
;
175 static int arch_build_bp_info(struct perf_event
*bp
)
177 struct arch_hw_breakpoint
*info
= counter_arch_bp(bp
);
179 info
->address
= bp
->attr
.bp_addr
;
182 switch (bp
->attr
.bp_len
) {
183 case HW_BREAKPOINT_LEN_1
:
184 info
->len
= SH_BREAKPOINT_LEN_1
;
186 case HW_BREAKPOINT_LEN_2
:
187 info
->len
= SH_BREAKPOINT_LEN_2
;
189 case HW_BREAKPOINT_LEN_4
:
190 info
->len
= SH_BREAKPOINT_LEN_4
;
192 case HW_BREAKPOINT_LEN_8
:
193 info
->len
= SH_BREAKPOINT_LEN_8
;
200 switch (bp
->attr
.bp_type
) {
201 case HW_BREAKPOINT_R
:
202 info
->type
= SH_BREAKPOINT_READ
;
204 case HW_BREAKPOINT_W
:
205 info
->type
= SH_BREAKPOINT_WRITE
;
207 case HW_BREAKPOINT_W
| HW_BREAKPOINT_R
:
208 info
->type
= SH_BREAKPOINT_RW
;
218 * Validate the arch-specific HW Breakpoint register settings
220 int arch_validate_hwbkpt_settings(struct perf_event
*bp
)
222 struct arch_hw_breakpoint
*info
= counter_arch_bp(bp
);
226 ret
= arch_build_bp_info(bp
);
233 case SH_BREAKPOINT_LEN_1
:
236 case SH_BREAKPOINT_LEN_2
:
239 case SH_BREAKPOINT_LEN_4
:
242 case SH_BREAKPOINT_LEN_8
:
250 * For kernel-addresses, either the address or symbol name can be
254 info
->address
= (unsigned long)kallsyms_lookup_name(info
->name
);
257 * Check that the low-order bits of the address are appropriate
258 * for the alignment implied by len.
260 if (info
->address
& align
)
267 * Release the user breakpoints used by ptrace
269 void flush_ptrace_hw_breakpoint(struct task_struct
*tsk
)
272 struct thread_struct
*t
= &tsk
->thread
;
274 for (i
= 0; i
< sh_ubc
->num_events
; i
++) {
275 unregister_hw_breakpoint(t
->ptrace_bps
[i
]);
276 t
->ptrace_bps
[i
] = NULL
;
280 static int __kprobes
hw_breakpoint_handler(struct die_args
*args
)
282 int cpu
, i
, rc
= NOTIFY_STOP
;
283 struct perf_event
*bp
;
284 unsigned int cmf
, resume_mask
;
287 * Do an early return if none of the channels triggered.
289 cmf
= sh_ubc
->triggered_mask();
294 * By default, resume all of the active channels.
296 resume_mask
= sh_ubc
->active_mask();
299 * Disable breakpoints during exception handling.
301 sh_ubc
->disable_all();
304 for (i
= 0; i
< sh_ubc
->num_events
; i
++) {
305 unsigned long event_mask
= (1 << i
);
307 if (likely(!(cmf
& event_mask
)))
311 * The counter may be concurrently released but that can only
312 * occur from a call_rcu() path. We can then safely fetch
313 * the breakpoint, use its callback, touch its counter
314 * while we are in an rcu_read_lock() path.
318 bp
= per_cpu(bp_per_reg
[i
], cpu
);
323 * Reset the condition match flag to denote completion of
324 * exception handling.
326 sh_ubc
->clear_triggered_mask(event_mask
);
329 * bp can be NULL due to concurrent perf counter
338 * Don't restore the channel if the breakpoint is from
339 * ptrace, as it always operates in one-shot mode.
341 if (bp
->overflow_handler
== ptrace_triggered
)
342 resume_mask
&= ~(1 << i
);
344 perf_bp_event(bp
, args
->regs
);
346 /* Deliver the signal to userspace */
347 if (!arch_check_bp_in_kernelspace(bp
)) {
350 info
.si_signo
= args
->signr
;
351 info
.si_errno
= notifier_to_errno(rc
);
352 info
.si_code
= TRAP_HWBKPT
;
354 force_sig_info(args
->signr
, &info
, current
);
363 sh_ubc
->enable_all(resume_mask
);
370 BUILD_TRAP_HANDLER(breakpoint
)
372 unsigned long ex
= lookup_exception_vector();
375 notify_die(DIE_BREAKPOINT
, "breakpoint", regs
, 0, ex
, SIGTRAP
);
379 * Handle debug exception notifications.
381 int __kprobes
hw_breakpoint_exceptions_notify(struct notifier_block
*unused
,
382 unsigned long val
, void *data
)
384 struct die_args
*args
= data
;
386 if (val
!= DIE_BREAKPOINT
)
390 * If the breakpoint hasn't been triggered by the UBC, it's
391 * probably from a debugger, so don't do anything more here.
393 * This also permits the UBC interface clock to remain off for
394 * non-UBC breakpoints, as we don't need to check the triggered
395 * or active channel masks.
397 if (args
->trapnr
!= sh_ubc
->trap_nr
)
400 return hw_breakpoint_handler(data
);
403 void hw_breakpoint_pmu_read(struct perf_event
*bp
)
408 int register_sh_ubc(struct sh_ubc
*ubc
)
410 /* Bail if it's already assigned */
411 if (sh_ubc
!= &ubc_dummy
)
415 pr_info("HW Breakpoints: %s UBC support registered\n", ubc
->name
);
417 WARN_ON(ubc
->num_events
> HBP_NUM
);