ARM: hw_breakpoint: correct and simplify alignment fixup code
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / arm / kernel / hw_breakpoint.c
blobd37ed3501e5700a515510c34b01d02517bdc784f
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 * Copyright (C) 2009, 2010 ARM Limited
17 * Author: Will Deacon <will.deacon@arm.com>
21 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
22 * using the CPU's debug registers.
24 #define pr_fmt(fmt) "hw-breakpoint: " fmt
26 #include <linux/errno.h>
27 #include <linux/perf_event.h>
28 #include <linux/hw_breakpoint.h>
29 #include <linux/smp.h>
31 #include <asm/cacheflush.h>
32 #include <asm/cputype.h>
33 #include <asm/current.h>
34 #include <asm/hw_breakpoint.h>
35 #include <asm/kdebug.h>
36 #include <asm/system.h>
37 #include <asm/traps.h>
39 /* Breakpoint currently in use for each BRP. */
40 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
42 /* Watchpoint currently in use for each WRP. */
43 static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
45 /* Number of BRP/WRP registers on this CPU. */
46 static int core_num_brps;
47 static int core_num_wrps;
49 /* Debug architecture version. */
50 static u8 debug_arch;
52 /* Maximum supported watchpoint length. */
53 static u8 max_watchpoint_len;
55 /* Determine number of BRP registers available. */
56 static int get_num_brps(void)
58 u32 didr;
59 ARM_DBG_READ(c0, 0, didr);
60 return ((didr >> 24) & 0xf) + 1;
63 /* Determine number of WRP registers available. */
64 static int get_num_wrps(void)
67 * FIXME: When a watchpoint fires, the only way to work out which
68 * watchpoint it was is by disassembling the faulting instruction
69 * and working out the address of the memory access.
71 * Furthermore, we can only do this if the watchpoint was precise
72 * since imprecise watchpoints prevent us from calculating register
73 * based addresses.
75 * For the time being, we only report 1 watchpoint register so we
76 * always know which watchpoint fired. In the future we can either
77 * add a disassembler and address generation emulator, or we can
78 * insert a check to see if the DFAR is set on watchpoint exception
79 * entry [the ARM ARM states that the DFAR is UNKNOWN, but
80 * experience shows that it is set on some implementations].
83 #if 0
84 u32 didr, wrps;
85 ARM_DBG_READ(c0, 0, didr);
86 return ((didr >> 28) & 0xf) + 1;
87 #endif
89 return 1;
92 int hw_breakpoint_slots(int type)
95 * We can be called early, so don't rely on
96 * our static variables being initialised.
98 switch (type) {
99 case TYPE_INST:
100 return get_num_brps();
101 case TYPE_DATA:
102 return get_num_wrps();
103 default:
104 pr_warning("unknown slot type: %d\n", type);
105 return 0;
109 /* Determine debug architecture. */
110 static u8 get_debug_arch(void)
112 u32 didr;
114 /* Do we implement the extended CPUID interface? */
115 if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
116 pr_warning("CPUID feature registers not supported. "
117 "Assuming v6 debug is present.\n");
118 return ARM_DEBUG_ARCH_V6;
121 ARM_DBG_READ(c0, 0, didr);
122 return (didr >> 16) & 0xf;
125 /* Does this core support mismatch breakpoints? */
126 static int core_has_mismatch_bps(void)
128 return debug_arch >= ARM_DEBUG_ARCH_V7_ECP14 && core_num_brps > 1;
131 u8 arch_get_debug_arch(void)
133 return debug_arch;
136 #define READ_WB_REG_CASE(OP2, M, VAL) \
137 case ((OP2 << 4) + M): \
138 ARM_DBG_READ(c ## M, OP2, VAL); \
139 break
141 #define WRITE_WB_REG_CASE(OP2, M, VAL) \
142 case ((OP2 << 4) + M): \
143 ARM_DBG_WRITE(c ## M, OP2, VAL);\
144 break
146 #define GEN_READ_WB_REG_CASES(OP2, VAL) \
147 READ_WB_REG_CASE(OP2, 0, VAL); \
148 READ_WB_REG_CASE(OP2, 1, VAL); \
149 READ_WB_REG_CASE(OP2, 2, VAL); \
150 READ_WB_REG_CASE(OP2, 3, VAL); \
151 READ_WB_REG_CASE(OP2, 4, VAL); \
152 READ_WB_REG_CASE(OP2, 5, VAL); \
153 READ_WB_REG_CASE(OP2, 6, VAL); \
154 READ_WB_REG_CASE(OP2, 7, VAL); \
155 READ_WB_REG_CASE(OP2, 8, VAL); \
156 READ_WB_REG_CASE(OP2, 9, VAL); \
157 READ_WB_REG_CASE(OP2, 10, VAL); \
158 READ_WB_REG_CASE(OP2, 11, VAL); \
159 READ_WB_REG_CASE(OP2, 12, VAL); \
160 READ_WB_REG_CASE(OP2, 13, VAL); \
161 READ_WB_REG_CASE(OP2, 14, VAL); \
162 READ_WB_REG_CASE(OP2, 15, VAL)
164 #define GEN_WRITE_WB_REG_CASES(OP2, VAL) \
165 WRITE_WB_REG_CASE(OP2, 0, VAL); \
166 WRITE_WB_REG_CASE(OP2, 1, VAL); \
167 WRITE_WB_REG_CASE(OP2, 2, VAL); \
168 WRITE_WB_REG_CASE(OP2, 3, VAL); \
169 WRITE_WB_REG_CASE(OP2, 4, VAL); \
170 WRITE_WB_REG_CASE(OP2, 5, VAL); \
171 WRITE_WB_REG_CASE(OP2, 6, VAL); \
172 WRITE_WB_REG_CASE(OP2, 7, VAL); \
173 WRITE_WB_REG_CASE(OP2, 8, VAL); \
174 WRITE_WB_REG_CASE(OP2, 9, VAL); \
175 WRITE_WB_REG_CASE(OP2, 10, VAL); \
176 WRITE_WB_REG_CASE(OP2, 11, VAL); \
177 WRITE_WB_REG_CASE(OP2, 12, VAL); \
178 WRITE_WB_REG_CASE(OP2, 13, VAL); \
179 WRITE_WB_REG_CASE(OP2, 14, VAL); \
180 WRITE_WB_REG_CASE(OP2, 15, VAL)
182 static u32 read_wb_reg(int n)
184 u32 val = 0;
186 switch (n) {
187 GEN_READ_WB_REG_CASES(ARM_OP2_BVR, val);
188 GEN_READ_WB_REG_CASES(ARM_OP2_BCR, val);
189 GEN_READ_WB_REG_CASES(ARM_OP2_WVR, val);
190 GEN_READ_WB_REG_CASES(ARM_OP2_WCR, val);
191 default:
192 pr_warning("attempt to read from unknown breakpoint "
193 "register %d\n", n);
196 return val;
199 static void write_wb_reg(int n, u32 val)
201 switch (n) {
202 GEN_WRITE_WB_REG_CASES(ARM_OP2_BVR, val);
203 GEN_WRITE_WB_REG_CASES(ARM_OP2_BCR, val);
204 GEN_WRITE_WB_REG_CASES(ARM_OP2_WVR, val);
205 GEN_WRITE_WB_REG_CASES(ARM_OP2_WCR, val);
206 default:
207 pr_warning("attempt to write to unknown breakpoint "
208 "register %d\n", n);
210 isb();
214 * In order to access the breakpoint/watchpoint control registers,
215 * we must be running in debug monitor mode. Unfortunately, we can
216 * be put into halting debug mode at any time by an external debugger
217 * but there is nothing we can do to prevent that.
219 static int enable_monitor_mode(void)
221 u32 dscr;
222 int ret = 0;
224 ARM_DBG_READ(c1, 0, dscr);
226 /* Ensure that halting mode is disabled. */
227 if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN, "halting debug mode enabled."
228 "Unable to access hardware resources.")) {
229 ret = -EPERM;
230 goto out;
233 /* Write to the corresponding DSCR. */
234 switch (debug_arch) {
235 case ARM_DEBUG_ARCH_V6:
236 case ARM_DEBUG_ARCH_V6_1:
237 ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN));
238 break;
239 case ARM_DEBUG_ARCH_V7_ECP14:
240 ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN));
241 break;
242 default:
243 ret = -ENODEV;
244 goto out;
247 /* Check that the write made it through. */
248 ARM_DBG_READ(c1, 0, dscr);
249 if (WARN_ONCE(!(dscr & ARM_DSCR_MDBGEN),
250 "failed to enable monitor mode.")) {
251 ret = -EPERM;
254 out:
255 return ret;
259 * Check if 8-bit byte-address select is available.
260 * This clobbers WRP 0.
262 static u8 get_max_wp_len(void)
264 u32 ctrl_reg;
265 struct arch_hw_breakpoint_ctrl ctrl;
266 u8 size = 4;
268 if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14)
269 goto out;
271 if (enable_monitor_mode())
272 goto out;
274 memset(&ctrl, 0, sizeof(ctrl));
275 ctrl.len = ARM_BREAKPOINT_LEN_8;
276 ctrl_reg = encode_ctrl_reg(ctrl);
278 write_wb_reg(ARM_BASE_WVR, 0);
279 write_wb_reg(ARM_BASE_WCR, ctrl_reg);
280 if ((read_wb_reg(ARM_BASE_WCR) & ctrl_reg) == ctrl_reg)
281 size = 8;
283 out:
284 return size;
287 u8 arch_get_max_wp_len(void)
289 return max_watchpoint_len;
293 * Handler for reactivating a suspended watchpoint when the single
294 * step `mismatch' breakpoint is triggered.
296 static void wp_single_step_handler(struct perf_event *bp, int unused,
297 struct perf_sample_data *data,
298 struct pt_regs *regs)
300 perf_event_enable(counter_arch_bp(bp)->suspended_wp);
301 unregister_hw_breakpoint(bp);
304 static int bp_is_single_step(struct perf_event *bp)
306 return bp->overflow_handler == wp_single_step_handler;
310 * Install a perf counter breakpoint.
312 int arch_install_hw_breakpoint(struct perf_event *bp)
314 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
315 struct perf_event **slot, **slots;
316 int i, max_slots, ctrl_base, val_base, ret = 0;
318 /* Ensure that we are in monitor mode and halting mode is disabled. */
319 ret = enable_monitor_mode();
320 if (ret)
321 goto out;
323 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
324 /* Breakpoint */
325 ctrl_base = ARM_BASE_BCR;
326 val_base = ARM_BASE_BVR;
327 slots = __get_cpu_var(bp_on_reg);
328 max_slots = core_num_brps - 1;
330 if (bp_is_single_step(bp)) {
331 info->ctrl.mismatch = 1;
332 i = max_slots;
333 slots[i] = bp;
334 goto setup;
336 } else {
337 /* Watchpoint */
338 ctrl_base = ARM_BASE_WCR;
339 val_base = ARM_BASE_WVR;
340 slots = __get_cpu_var(wp_on_reg);
341 max_slots = core_num_wrps;
344 for (i = 0; i < max_slots; ++i) {
345 slot = &slots[i];
347 if (!*slot) {
348 *slot = bp;
349 break;
353 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot")) {
354 ret = -EBUSY;
355 goto out;
358 setup:
359 /* Setup the address register. */
360 write_wb_reg(val_base + i, info->address);
362 /* Setup the control register. */
363 write_wb_reg(ctrl_base + i, encode_ctrl_reg(info->ctrl) | 0x1);
365 out:
366 return ret;
369 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
371 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
372 struct perf_event **slot, **slots;
373 int i, max_slots, base;
375 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
376 /* Breakpoint */
377 base = ARM_BASE_BCR;
378 slots = __get_cpu_var(bp_on_reg);
379 max_slots = core_num_brps - 1;
381 if (bp_is_single_step(bp)) {
382 i = max_slots;
383 slots[i] = NULL;
384 goto reset;
386 } else {
387 /* Watchpoint */
388 base = ARM_BASE_WCR;
389 slots = __get_cpu_var(wp_on_reg);
390 max_slots = core_num_wrps;
393 /* Remove the breakpoint. */
394 for (i = 0; i < max_slots; ++i) {
395 slot = &slots[i];
397 if (*slot == bp) {
398 *slot = NULL;
399 break;
403 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot"))
404 return;
406 reset:
407 /* Reset the control register. */
408 write_wb_reg(base + i, 0);
411 static int get_hbp_len(u8 hbp_len)
413 unsigned int len_in_bytes = 0;
415 switch (hbp_len) {
416 case ARM_BREAKPOINT_LEN_1:
417 len_in_bytes = 1;
418 break;
419 case ARM_BREAKPOINT_LEN_2:
420 len_in_bytes = 2;
421 break;
422 case ARM_BREAKPOINT_LEN_4:
423 len_in_bytes = 4;
424 break;
425 case ARM_BREAKPOINT_LEN_8:
426 len_in_bytes = 8;
427 break;
430 return len_in_bytes;
434 * Check whether bp virtual address is in kernel space.
436 int arch_check_bp_in_kernelspace(struct perf_event *bp)
438 unsigned int len;
439 unsigned long va;
440 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
442 va = info->address;
443 len = get_hbp_len(info->ctrl.len);
445 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
449 * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
450 * Hopefully this will disappear when ptrace can bypass the conversion
451 * to generic breakpoint descriptions.
453 int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
454 int *gen_len, int *gen_type)
456 /* Type */
457 switch (ctrl.type) {
458 case ARM_BREAKPOINT_EXECUTE:
459 *gen_type = HW_BREAKPOINT_X;
460 break;
461 case ARM_BREAKPOINT_LOAD:
462 *gen_type = HW_BREAKPOINT_R;
463 break;
464 case ARM_BREAKPOINT_STORE:
465 *gen_type = HW_BREAKPOINT_W;
466 break;
467 case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
468 *gen_type = HW_BREAKPOINT_RW;
469 break;
470 default:
471 return -EINVAL;
474 /* Len */
475 switch (ctrl.len) {
476 case ARM_BREAKPOINT_LEN_1:
477 *gen_len = HW_BREAKPOINT_LEN_1;
478 break;
479 case ARM_BREAKPOINT_LEN_2:
480 *gen_len = HW_BREAKPOINT_LEN_2;
481 break;
482 case ARM_BREAKPOINT_LEN_4:
483 *gen_len = HW_BREAKPOINT_LEN_4;
484 break;
485 case ARM_BREAKPOINT_LEN_8:
486 *gen_len = HW_BREAKPOINT_LEN_8;
487 break;
488 default:
489 return -EINVAL;
492 return 0;
496 * Construct an arch_hw_breakpoint from a perf_event.
498 static int arch_build_bp_info(struct perf_event *bp)
500 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
502 /* Type */
503 switch (bp->attr.bp_type) {
504 case HW_BREAKPOINT_X:
505 info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
506 break;
507 case HW_BREAKPOINT_R:
508 info->ctrl.type = ARM_BREAKPOINT_LOAD;
509 break;
510 case HW_BREAKPOINT_W:
511 info->ctrl.type = ARM_BREAKPOINT_STORE;
512 break;
513 case HW_BREAKPOINT_RW:
514 info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
515 break;
516 default:
517 return -EINVAL;
520 /* Len */
521 switch (bp->attr.bp_len) {
522 case HW_BREAKPOINT_LEN_1:
523 info->ctrl.len = ARM_BREAKPOINT_LEN_1;
524 break;
525 case HW_BREAKPOINT_LEN_2:
526 info->ctrl.len = ARM_BREAKPOINT_LEN_2;
527 break;
528 case HW_BREAKPOINT_LEN_4:
529 info->ctrl.len = ARM_BREAKPOINT_LEN_4;
530 break;
531 case HW_BREAKPOINT_LEN_8:
532 info->ctrl.len = ARM_BREAKPOINT_LEN_8;
533 if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE)
534 && max_watchpoint_len >= 8)
535 break;
536 default:
537 return -EINVAL;
541 * Breakpoints must be of length 2 (thumb) or 4 (ARM) bytes.
542 * Watchpoints can be of length 1, 2, 4 or 8 bytes if supported
543 * by the hardware and must be aligned to the appropriate number of
544 * bytes.
546 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
547 info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
548 info->ctrl.len != ARM_BREAKPOINT_LEN_4)
549 return -EINVAL;
551 /* Address */
552 info->address = bp->attr.bp_addr;
554 /* Privilege */
555 info->ctrl.privilege = ARM_BREAKPOINT_USER;
556 if (arch_check_bp_in_kernelspace(bp) && !bp_is_single_step(bp))
557 info->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
559 /* Enabled? */
560 info->ctrl.enabled = !bp->attr.disabled;
562 /* Mismatch */
563 info->ctrl.mismatch = 0;
565 return 0;
569 * Validate the arch-specific HW Breakpoint register settings.
571 int arch_validate_hwbkpt_settings(struct perf_event *bp)
573 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
574 int ret = 0;
575 u32 offset, alignment_mask = 0x3;
577 /* Build the arch_hw_breakpoint. */
578 ret = arch_build_bp_info(bp);
579 if (ret)
580 goto out;
582 /* Check address alignment. */
583 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
584 alignment_mask = 0x7;
585 offset = info->address & alignment_mask;
586 switch (offset) {
587 case 0:
588 /* Aligned */
589 break;
590 case 1:
591 /* Allow single byte watchpoint. */
592 if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
593 break;
594 case 2:
595 /* Allow halfword watchpoints and breakpoints. */
596 if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
597 break;
598 default:
599 ret = -EINVAL;
600 goto out;
603 info->address &= ~alignment_mask;
604 info->ctrl.len <<= offset;
607 * Currently we rely on an overflow handler to take
608 * care of single-stepping the breakpoint when it fires.
609 * In the case of userspace breakpoints on a core with V7 debug,
610 * we can use the mismatch feature as a poor-man's hardware single-step.
612 if (WARN_ONCE(!bp->overflow_handler &&
613 (arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_bps()),
614 "overflow handler required but none found")) {
615 ret = -EINVAL;
617 out:
618 return ret;
621 static void update_mismatch_flag(int idx, int flag)
623 struct perf_event *bp = __get_cpu_var(bp_on_reg[idx]);
624 struct arch_hw_breakpoint *info;
626 if (bp == NULL)
627 return;
629 info = counter_arch_bp(bp);
631 /* Update the mismatch field to enter/exit `single-step' mode */
632 if (!bp->overflow_handler && info->ctrl.mismatch != flag) {
633 info->ctrl.mismatch = flag;
634 write_wb_reg(ARM_BASE_BCR + idx, encode_ctrl_reg(info->ctrl) | 0x1);
638 static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs)
640 int i;
641 struct perf_event *bp, **slots = __get_cpu_var(wp_on_reg);
642 struct arch_hw_breakpoint *info;
643 struct perf_event_attr attr;
645 /* Without a disassembler, we can only handle 1 watchpoint. */
646 BUG_ON(core_num_wrps > 1);
648 hw_breakpoint_init(&attr);
649 attr.bp_addr = regs->ARM_pc & ~0x3;
650 attr.bp_len = HW_BREAKPOINT_LEN_4;
651 attr.bp_type = HW_BREAKPOINT_X;
653 for (i = 0; i < core_num_wrps; ++i) {
654 rcu_read_lock();
656 if (slots[i] == NULL) {
657 rcu_read_unlock();
658 continue;
662 * The DFAR is an unknown value. Since we only allow a
663 * single watchpoint, we can set the trigger to the lowest
664 * possible faulting address.
666 info = counter_arch_bp(slots[i]);
667 info->trigger = slots[i]->attr.bp_addr;
668 pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
669 perf_bp_event(slots[i], regs);
672 * If no overflow handler is present, insert a temporary
673 * mismatch breakpoint so we can single-step over the
674 * watchpoint trigger.
676 if (!slots[i]->overflow_handler) {
677 bp = register_user_hw_breakpoint(&attr,
678 wp_single_step_handler,
679 current);
680 counter_arch_bp(bp)->suspended_wp = slots[i];
681 perf_event_disable(slots[i]);
684 rcu_read_unlock();
688 static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
690 int i;
691 int mismatch;
692 u32 ctrl_reg, val, addr;
693 struct perf_event *bp, **slots = __get_cpu_var(bp_on_reg);
694 struct arch_hw_breakpoint *info;
695 struct arch_hw_breakpoint_ctrl ctrl;
697 /* The exception entry code places the amended lr in the PC. */
698 addr = regs->ARM_pc;
700 for (i = 0; i < core_num_brps; ++i) {
701 rcu_read_lock();
703 bp = slots[i];
705 if (bp == NULL) {
706 rcu_read_unlock();
707 continue;
710 mismatch = 0;
712 /* Check if the breakpoint value matches. */
713 val = read_wb_reg(ARM_BASE_BVR + i);
714 if (val != (addr & ~0x3))
715 goto unlock;
717 /* Possible match, check the byte address select to confirm. */
718 ctrl_reg = read_wb_reg(ARM_BASE_BCR + i);
719 decode_ctrl_reg(ctrl_reg, &ctrl);
720 if ((1 << (addr & 0x3)) & ctrl.len) {
721 mismatch = 1;
722 info = counter_arch_bp(bp);
723 info->trigger = addr;
726 unlock:
727 if ((mismatch && !info->ctrl.mismatch) || bp_is_single_step(bp)) {
728 pr_debug("breakpoint fired: address = 0x%x\n", addr);
729 perf_bp_event(bp, regs);
732 update_mismatch_flag(i, mismatch);
733 rcu_read_unlock();
738 * Called from either the Data Abort Handler [watchpoint] or the
739 * Prefetch Abort Handler [breakpoint].
741 static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
742 struct pt_regs *regs)
744 int ret = 1; /* Unhandled fault. */
745 u32 dscr;
747 /* We only handle watchpoints and hardware breakpoints. */
748 ARM_DBG_READ(c1, 0, dscr);
750 /* Perform perf callbacks. */
751 switch (ARM_DSCR_MOE(dscr)) {
752 case ARM_ENTRY_BREAKPOINT:
753 breakpoint_handler(addr, regs);
754 break;
755 case ARM_ENTRY_ASYNC_WATCHPOINT:
756 WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
757 case ARM_ENTRY_SYNC_WATCHPOINT:
758 watchpoint_handler(addr, regs);
759 break;
760 default:
761 goto out;
764 ret = 0;
765 out:
766 return ret;
770 * One-time initialisation.
772 static void reset_ctrl_regs(void *unused)
774 int i;
777 * v7 debug contains save and restore registers so that debug state
778 * can be maintained across low-power modes without leaving
779 * the debug logic powered up. It is IMPLEMENTATION DEFINED whether
780 * we can write to the debug registers out of reset, so we must
781 * unlock the OS Lock Access Register to avoid taking undefined
782 * instruction exceptions later on.
784 if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
786 * Unconditionally clear the lock by writing a value
787 * other than 0xC5ACCE55 to the access register.
789 asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0));
790 isb();
793 if (enable_monitor_mode())
794 return;
796 for (i = 0; i < core_num_brps; ++i) {
797 write_wb_reg(ARM_BASE_BCR + i, 0UL);
798 write_wb_reg(ARM_BASE_BVR + i, 0UL);
801 for (i = 0; i < core_num_wrps; ++i) {
802 write_wb_reg(ARM_BASE_WCR + i, 0UL);
803 write_wb_reg(ARM_BASE_WVR + i, 0UL);
807 static int __cpuinit dbg_reset_notify(struct notifier_block *self,
808 unsigned long action, void *cpu)
810 if (action == CPU_ONLINE)
811 smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
812 return NOTIFY_OK;
815 static struct notifier_block __cpuinitdata dbg_reset_nb = {
816 .notifier_call = dbg_reset_notify,
819 static int __init arch_hw_breakpoint_init(void)
821 int ret = 0;
822 u32 dscr;
824 debug_arch = get_debug_arch();
826 if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) {
827 pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
828 ret = -ENODEV;
829 goto out;
832 /* Determine how many BRPs/WRPs are available. */
833 core_num_brps = get_num_brps();
834 core_num_wrps = get_num_wrps();
836 pr_info("found %d breakpoint and %d watchpoint registers.\n",
837 core_num_brps, core_num_wrps);
839 if (core_has_mismatch_bps())
840 pr_info("1 breakpoint reserved for watchpoint single-step.\n");
842 ARM_DBG_READ(c1, 0, dscr);
843 if (dscr & ARM_DSCR_HDBGEN) {
844 pr_warning("halting debug mode enabled. Assuming maximum "
845 "watchpoint size of 4 bytes.");
846 } else {
848 * Reset the breakpoint resources. We assume that a halting
849 * debugger will leave the world in a nice state for us.
851 smp_call_function(reset_ctrl_regs, NULL, 1);
852 reset_ctrl_regs(NULL);
854 /* Work out the maximum supported watchpoint length. */
855 max_watchpoint_len = get_max_wp_len();
856 pr_info("maximum watchpoint size is %u bytes.\n",
857 max_watchpoint_len);
860 /* Register debug fault handler. */
861 hook_fault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT,
862 "watchpoint debug exception");
863 hook_ifault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT,
864 "breakpoint debug exception");
866 /* Register hotplug notifier. */
867 register_cpu_notifier(&dbg_reset_nb);
868 out:
869 return ret;
871 arch_initcall(arch_hw_breakpoint_init);
873 void hw_breakpoint_pmu_read(struct perf_event *bp)
878 * Dummy function to register with die_notifier.
880 int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
881 unsigned long val, void *data)
883 return NOTIFY_DONE;