2 * Copyright (C) 2009 by David Brownell
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the
16 * Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 #include <jtag/jtag.h>
28 #include "breakpoints.h"
29 #include "target_type.h"
30 #include "arm_opcodes.h"
35 * Implements various ARM DPM operations using architectural debug registers.
36 * These routines layer over core-specific communication methods to cope with
37 * implementation differences between cores like ARM1136 and Cortex-A8.
39 * The "Debug Programmers' Model" (DPM) for ARMv6 and ARMv7 is defined by
40 * Part C (Debug Architecture) of the ARM Architecture Reference Manual,
41 * ARMv7-A and ARMv7-R edition (ARM DDI 0406B). In OpenOCD, DPM operations
42 * are abstracted through internal programming interfaces to share code and
43 * to minimize needless differences in debug behavior between cores.
46 /*----------------------------------------------------------------------*/
52 /* Read coprocessor */
53 static int dpm_mrc(struct target
*target
, int cpnum
,
54 uint32_t op1
, uint32_t op2
, uint32_t CRn
, uint32_t CRm
,
57 struct arm
*arm
= target_to_arm(target
);
58 struct arm_dpm
*dpm
= arm
->dpm
;
61 retval
= dpm
->prepare(dpm
);
62 if (retval
!= ERROR_OK
)
65 LOG_DEBUG("MRC p%d, %d, r0, c%d, c%d, %d", cpnum
,
67 (int) CRm
, (int) op2
);
69 /* read coprocessor register into R0; return via DCC */
70 retval
= dpm
->instr_read_data_r0(dpm
,
71 ARMV4_5_MRC(cpnum
, op1
, 0, CRn
, CRm
, op2
),
74 /* (void) */ dpm
->finish(dpm
);
78 static int dpm_mcr(struct target
*target
, int cpnum
,
79 uint32_t op1
, uint32_t op2
, uint32_t CRn
, uint32_t CRm
,
82 struct arm
*arm
= target_to_arm(target
);
83 struct arm_dpm
*dpm
= arm
->dpm
;
86 retval
= dpm
->prepare(dpm
);
87 if (retval
!= ERROR_OK
)
90 LOG_DEBUG("MCR p%d, %d, r0, c%d, c%d, %d", cpnum
,
92 (int) CRm
, (int) op2
);
94 /* read DCC into r0; then write coprocessor register from R0 */
95 retval
= dpm
->instr_write_data_r0(dpm
,
96 ARMV4_5_MCR(cpnum
, op1
, 0, CRn
, CRm
, op2
),
99 /* (void) */ dpm
->finish(dpm
);
103 /*----------------------------------------------------------------------*/
106 * Register access utilities
109 /* Toggles between recorded core mode (USR, SVC, etc) and a temporary one.
110 * Routines *must* restore the original mode before returning!!
112 static int dpm_modeswitch(struct arm_dpm
*dpm
, enum arm_mode mode
)
117 /* restore previous mode */
118 if (mode
== ARM_MODE_ANY
)
119 cpsr
= buf_get_u32(dpm
->arm
->cpsr
->value
, 0, 32);
121 /* else force to the specified mode */
125 retval
= dpm
->instr_write_data_r0(dpm
, ARMV4_5_MSR_GP(0, 0xf, 0), cpsr
);
126 if (retval
!= ERROR_OK
)
129 if (dpm
->instr_cpsr_sync
)
130 retval
= dpm
->instr_cpsr_sync(dpm
);
135 /* just read the register -- rely on the core mode being right */
136 static int dpm_read_reg(struct arm_dpm
*dpm
, struct reg
*r
, unsigned regnum
)
143 /* return via DCC: "MCR p14, 0, Rnum, c0, c5, 0" */
144 retval
= dpm
->instr_read_data_dcc(dpm
,
145 ARMV4_5_MCR(14, 0, regnum
, 0, 5, 0),
149 /* "MOV r0, pc"; then return via DCC */
150 retval
= dpm
->instr_read_data_r0(dpm
, 0xe1a0000f, &value
);
152 /* NOTE: this seems like a slightly awkward place to update
153 * this value ... but if the PC gets written (the only way
154 * to change what we compute), the arch spec says subsequent
155 * reads return values which are "unpredictable". So this
156 * is always right except in those broken-by-intent cases.
158 switch (dpm
->arm
->core_state
) {
162 case ARM_STATE_THUMB
:
163 case ARM_STATE_THUMB_EE
:
166 case ARM_STATE_JAZELLE
:
167 /* core-specific ... ? */
168 LOG_WARNING("Jazelle PC adjustment unknown");
173 /* 16: "MRS r0, CPSR"; then return via DCC
174 * 17: "MRS r0, SPSR"; then return via DCC
176 retval
= dpm
->instr_read_data_r0(dpm
,
177 ARMV4_5_MRS(0, regnum
& 1),
182 if (retval
== ERROR_OK
) {
183 buf_set_u32(r
->value
, 0, 32, value
);
186 LOG_DEBUG("READ: %s, %8.8x", r
->name
, (unsigned) value
);
192 /* just write the register -- rely on the core mode being right */
193 static int dpm_write_reg(struct arm_dpm
*dpm
, struct reg
*r
, unsigned regnum
)
196 uint32_t value
= buf_get_u32(r
->value
, 0, 32);
200 /* load register from DCC: "MRC p14, 0, Rnum, c0, c5, 0" */
201 retval
= dpm
->instr_write_data_dcc(dpm
,
202 ARMV4_5_MRC(14, 0, regnum
, 0, 5, 0),
206 /* read r0 from DCC; then "MOV pc, r0" */
207 retval
= dpm
->instr_write_data_r0(dpm
, 0xe1a0f000, value
);
210 /* 16: read r0 from DCC, then "MSR r0, CPSR_cxsf"
211 * 17: read r0 from DCC, then "MSR r0, SPSR_cxsf"
213 retval
= dpm
->instr_write_data_r0(dpm
,
214 ARMV4_5_MSR_GP(0, 0xf, regnum
& 1),
216 if (retval
!= ERROR_OK
)
219 if (regnum
== 16 && dpm
->instr_cpsr_sync
)
220 retval
= dpm
->instr_cpsr_sync(dpm
);
225 if (retval
== ERROR_OK
) {
227 LOG_DEBUG("WRITE: %s, %8.8x", r
->name
, (unsigned) value
);
234 * Read basic registers of the the current context: R0 to R15, and CPSR;
235 * sets the core mode (such as USR or IRQ) and state (such as ARM or Thumb).
236 * In normal operation this is called on entry to halting debug state,
237 * possibly after some other operations supporting restore of debug state
238 * or making sure the CPU is fully idle (drain write buffer, etc).
240 int arm_dpm_read_current_registers(struct arm_dpm
*dpm
)
242 struct arm
*arm
= dpm
->arm
;
247 retval
= dpm
->prepare(dpm
);
248 if (retval
!= ERROR_OK
)
251 /* read R0 first (it's used for scratch), then CPSR */
252 r
= arm
->core_cache
->reg_list
+ 0;
254 retval
= dpm_read_reg(dpm
, r
, 0);
255 if (retval
!= ERROR_OK
)
260 retval
= dpm
->instr_read_data_r0(dpm
, ARMV4_5_MRS(0, 0), &cpsr
);
261 if (retval
!= ERROR_OK
)
264 /* update core mode and state, plus shadow mapping for R8..R14 */
265 arm_set_cpsr(arm
, cpsr
);
267 /* REVISIT we can probably avoid reading R1..R14, saving time... */
268 for (unsigned i
= 1; i
< 16; i
++) {
269 r
= arm_reg_current(arm
, i
);
273 retval
= dpm_read_reg(dpm
, r
, i
);
274 if (retval
!= ERROR_OK
)
278 /* NOTE: SPSR ignored (if it's even relevant). */
280 /* REVISIT the debugger can trigger various exceptions. See the
281 * ARMv7A architecture spec, section C5.7, for more info about
282 * what defenses are needed; v6 debug has the most issues.
286 /* (void) */ dpm
->finish(dpm
);
290 /* Avoid needless I/O ... leave breakpoints and watchpoints alone
291 * unless they're removed, or need updating because of single-stepping
292 * or running debugger code.
294 static int dpm_maybe_update_bpwp(struct arm_dpm
*dpm
, bool bpwp
,
295 struct dpm_bpwp
*xp
, int *set_p
)
297 int retval
= ERROR_OK
;
304 /* removed or startup; we must disable it */
309 /* disabled, but we must set it */
310 xp
->dirty
= disable
= false;
315 /* set, but we must temporarily disable it */
316 xp
->dirty
= disable
= true;
321 retval
= dpm
->bpwp_disable(dpm
, xp
->number
);
323 retval
= dpm
->bpwp_enable(dpm
, xp
->number
,
324 xp
->address
, xp
->control
);
326 if (retval
!= ERROR_OK
)
327 LOG_ERROR("%s: can't %s HW %spoint %d",
328 disable
? "disable" : "enable",
329 target_name(dpm
->arm
->target
),
330 (xp
->number
< 16) ? "break" : "watch",
336 static int dpm_add_breakpoint(struct target
*target
, struct breakpoint
*bp
);
339 * Writes all modified core registers for all processor modes. In normal
340 * operation this is called on exit from halting debug state.
342 * @param dpm: represents the processor
343 * @param bpwp: true ensures breakpoints and watchpoints are set,
344 * false ensures they are cleared
346 int arm_dpm_write_dirty_registers(struct arm_dpm
*dpm
, bool bpwp
)
348 struct arm
*arm
= dpm
->arm
;
349 struct reg_cache
*cache
= arm
->core_cache
;
353 retval
= dpm
->prepare(dpm
);
354 if (retval
!= ERROR_OK
)
357 /* If we're managing hardware breakpoints for this core, enable
358 * or disable them as requested.
360 * REVISIT We don't yet manage them for ANY cores. Eventually
361 * we should be able to assume we handle them; but until then,
362 * cope with the hand-crafted breakpoint code.
364 if (arm
->target
->type
->add_breakpoint
== dpm_add_breakpoint
) {
365 for (unsigned i
= 0; i
< dpm
->nbp
; i
++) {
366 struct dpm_bp
*dbp
= dpm
->dbp
+ i
;
367 struct breakpoint
*bp
= dbp
->bp
;
369 retval
= dpm_maybe_update_bpwp(dpm
, bpwp
, &dbp
->bpwp
,
370 bp
? &bp
->set
: NULL
);
371 if (retval
!= ERROR_OK
)
376 /* enable/disable watchpoints */
377 for (unsigned i
= 0; i
< dpm
->nwp
; i
++) {
378 struct dpm_wp
*dwp
= dpm
->dwp
+ i
;
379 struct watchpoint
*wp
= dwp
->wp
;
381 retval
= dpm_maybe_update_bpwp(dpm
, bpwp
, &dwp
->bpwp
,
382 wp
? &wp
->set
: NULL
);
383 if (retval
!= ERROR_OK
)
387 /* NOTE: writes to breakpoint and watchpoint registers might
388 * be queued, and need (efficient/batched) flushing later.
391 /* Scan the registers until we find one that's both dirty and
392 * eligible for flushing. Flush that and everything else that
393 * shares the same core mode setting. Typically this won't
394 * actually find anything to do...
397 enum arm_mode mode
= ARM_MODE_ANY
;
401 /* check everything except our scratch register R0 */
402 for (unsigned i
= 1; i
< cache
->num_regs
; i
++) {
406 /* also skip PC, CPSR, and non-dirty */
409 if (arm
->cpsr
== cache
->reg_list
+ i
)
411 if (!cache
->reg_list
[i
].dirty
)
414 r
= cache
->reg_list
[i
].arch_info
;
417 /* may need to pick and set a mode */
422 mode
= tmode
= r
->mode
;
424 /* cope with special cases */
427 /* r8..r12 "anything but FIQ" case;
428 * we "know" core mode is accurate
429 * since we haven't changed it yet
431 if (arm
->core_mode
== ARM_MODE_FIQ
434 tmode
= ARM_MODE_USR
;
442 /* REVISIT error checks */
443 if (tmode
!= ARM_MODE_ANY
)
445 retval
= dpm_modeswitch(dpm
, tmode
);
446 if (retval
!= ERROR_OK
)
453 retval
= dpm_write_reg(dpm
,
456 if (retval
!= ERROR_OK
)
462 /* Restore original CPSR ... assuming either that we changed it,
463 * or it's dirty. Must write PC to ensure the return address is
464 * defined, and must not write it before CPSR.
466 retval
= dpm_modeswitch(dpm
, ARM_MODE_ANY
);
467 if (retval
!= ERROR_OK
)
469 arm
->cpsr
->dirty
= false;
471 retval
= dpm_write_reg(dpm
, arm
->pc
, 15);
472 if (retval
!= ERROR_OK
)
474 arm
->pc
->dirty
= false;
476 /* flush R0 -- it's *very* dirty by now */
477 retval
= dpm_write_reg(dpm
, &cache
->reg_list
[0], 0);
478 if (retval
!= ERROR_OK
)
480 cache
->reg_list
[0].dirty
= false;
482 /* (void) */ dpm
->finish(dpm
);
487 /* Returns ARM_MODE_ANY or temporary mode to use while reading the
488 * specified register ... works around flakiness from ARM core calls.
489 * Caller already filtered out SPSR access; mode is never MODE_SYS
492 static enum arm_mode
dpm_mapmode(struct arm
*arm
,
493 unsigned num
, enum arm_mode mode
)
495 enum arm_mode amode
= arm
->core_mode
;
497 /* don't switch if the mode is already correct */
498 if (amode
== ARM_MODE_SYS
)
499 amode
= ARM_MODE_USR
;
504 /* don't switch for non-shadowed registers (r0..r7, r15/pc, cpsr) */
509 /* r8..r12 aren't shadowed for anything except FIQ */
511 if (mode
== ARM_MODE_FIQ
)
514 /* r13/sp, and r14/lr are always shadowed */
519 LOG_WARNING("invalid register #%u", num
);
527 * Standard ARM register accessors ... there are three methods
528 * in "struct arm", to support individual read/write and bulk read
532 static int arm_dpm_read_core_reg(struct target
*target
, struct reg
*r
,
533 int regnum
, enum arm_mode mode
)
535 struct arm_dpm
*dpm
= target_to_arm(target
)->dpm
;
538 if (regnum
< 0 || regnum
> 16)
539 return ERROR_INVALID_ARGUMENTS
;
542 if (mode
!= ARM_MODE_ANY
)
545 mode
= dpm_mapmode(dpm
->arm
, regnum
, mode
);
547 /* REVISIT what happens if we try to read SPSR in a core mode
548 * which has no such register?
551 retval
= dpm
->prepare(dpm
);
552 if (retval
!= ERROR_OK
)
555 if (mode
!= ARM_MODE_ANY
) {
556 retval
= dpm_modeswitch(dpm
, mode
);
557 if (retval
!= ERROR_OK
)
561 retval
= dpm_read_reg(dpm
, r
, regnum
);
562 if (retval
!= ERROR_OK
)
564 /* always clean up, regardless of error */
566 if (mode
!= ARM_MODE_ANY
)
567 /* (void) */ dpm_modeswitch(dpm
, ARM_MODE_ANY
);
570 /* (void) */ dpm
->finish(dpm
);
574 static int arm_dpm_write_core_reg(struct target
*target
, struct reg
*r
,
575 int regnum
, enum arm_mode mode
, uint32_t value
)
577 struct arm_dpm
*dpm
= target_to_arm(target
)->dpm
;
581 if (regnum
< 0 || regnum
> 16)
582 return ERROR_INVALID_ARGUMENTS
;
585 if (mode
!= ARM_MODE_ANY
)
588 mode
= dpm_mapmode(dpm
->arm
, regnum
, mode
);
590 /* REVISIT what happens if we try to write SPSR in a core mode
591 * which has no such register?
594 retval
= dpm
->prepare(dpm
);
595 if (retval
!= ERROR_OK
)
598 if (mode
!= ARM_MODE_ANY
) {
599 retval
= dpm_modeswitch(dpm
, mode
);
600 if (retval
!= ERROR_OK
)
604 retval
= dpm_write_reg(dpm
, r
, regnum
);
605 /* always clean up, regardless of error */
607 if (mode
!= ARM_MODE_ANY
)
608 /* (void) */ dpm_modeswitch(dpm
, ARM_MODE_ANY
);
611 /* (void) */ dpm
->finish(dpm
);
615 static int arm_dpm_full_context(struct target
*target
)
617 struct arm
*arm
= target_to_arm(target
);
618 struct arm_dpm
*dpm
= arm
->dpm
;
619 struct reg_cache
*cache
= arm
->core_cache
;
623 retval
= dpm
->prepare(dpm
);
624 if (retval
!= ERROR_OK
)
628 enum arm_mode mode
= ARM_MODE_ANY
;
632 /* We "know" arm_dpm_read_current_registers() was called so
633 * the unmapped registers (R0..R7, PC, AND CPSR) and some
634 * view of R8..R14 are current. We also "know" oddities of
635 * register mapping: special cases for R8..R12 and SPSR.
637 * Pick some mode with unread registers and read them all.
640 for (unsigned i
= 0; i
< cache
->num_regs
; i
++) {
643 if (cache
->reg_list
[i
].valid
)
645 r
= cache
->reg_list
[i
].arch_info
;
647 /* may need to pick a mode and set CPSR */
652 /* For R8..R12 when we've entered debug
653 * state in FIQ mode... patch mode.
655 if (mode
== ARM_MODE_ANY
)
658 /* REVISIT error checks */
659 retval
= dpm_modeswitch(dpm
, mode
);
660 if (retval
!= ERROR_OK
)
666 /* CPSR was read, so "R16" must mean SPSR */
667 retval
= dpm_read_reg(dpm
,
669 (r
->num
== 16) ? 17 : r
->num
);
670 if (retval
!= ERROR_OK
)
676 retval
= dpm_modeswitch(dpm
, ARM_MODE_ANY
);
677 /* (void) */ dpm
->finish(dpm
);
683 /*----------------------------------------------------------------------*/
686 * Breakpoint and Watchpoint support.
688 * Hardware {break,watch}points are usually left active, to minimize
689 * debug entry/exit costs. When they are set or cleared, it's done in
690 * batches. Also, DPM-conformant hardware can update debug registers
691 * regardless of whether the CPU is running or halted ... though that
692 * fact isn't currently leveraged.
695 static int dpm_bpwp_setup(struct arm_dpm
*dpm
, struct dpm_bpwp
*xp
,
696 uint32_t addr
, uint32_t length
)
700 control
= (1 << 0) /* enable */
701 | (3 << 1); /* both user and privileged access */
703 /* Match 1, 2, or all 4 byte addresses in this word.
705 * FIXME: v7 hardware allows lengths up to 2 GB for BP and WP.
706 * Support larger length, when addr is suitably aligned. In
707 * particular, allow watchpoints on 8 byte "double" values.
709 * REVISIT allow watchpoints on unaligned 2-bit values; and on
710 * v7 hardware, unaligned 4-byte ones too.
714 control
|= (1 << (addr
& 3)) << 5;
717 /* require 2-byte alignment */
719 control
|= (3 << (addr
& 2)) << 5;
724 /* require 4-byte alignment */
731 LOG_ERROR("unsupported {break,watch}point length/alignment");
732 return ERROR_INVALID_ARGUMENTS
;
735 /* other shared control bits:
736 * bits 15:14 == 0 ... both secure and nonsecure states (v6.1+ only)
737 * bit 20 == 0 ... not linked to a context ID
738 * bit 28:24 == 0 ... not ignoring N LSBs (v7 only)
741 xp
->address
= addr
& ~3;
742 xp
->control
= control
;
745 LOG_DEBUG("BPWP: addr %8.8" PRIx32
", control %" PRIx32
", number %d",
746 xp
->address
, control
, xp
->number
);
748 /* hardware is updated in write_dirty_registers() */
752 static int dpm_add_breakpoint(struct target
*target
, struct breakpoint
*bp
)
754 struct arm
*arm
= target_to_arm(target
);
755 struct arm_dpm
*dpm
= arm
->dpm
;
756 int retval
= ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
759 return ERROR_INVALID_ARGUMENTS
;
760 if (!dpm
->bpwp_enable
)
763 /* FIXME we need a generic solution for software breakpoints. */
764 if (bp
->type
== BKPT_SOFT
)
765 LOG_DEBUG("using HW bkpt, not SW...");
767 for (unsigned i
= 0; i
< dpm
->nbp
; i
++) {
768 if (!dpm
->dbp
[i
].bp
) {
769 retval
= dpm_bpwp_setup(dpm
, &dpm
->dbp
[i
].bpwp
,
770 bp
->address
, bp
->length
);
771 if (retval
== ERROR_OK
)
780 static int dpm_remove_breakpoint(struct target
*target
, struct breakpoint
*bp
)
782 struct arm
*arm
= target_to_arm(target
);
783 struct arm_dpm
*dpm
= arm
->dpm
;
784 int retval
= ERROR_INVALID_ARGUMENTS
;
786 for (unsigned i
= 0; i
< dpm
->nbp
; i
++) {
787 if (dpm
->dbp
[i
].bp
== bp
) {
788 dpm
->dbp
[i
].bp
= NULL
;
789 dpm
->dbp
[i
].bpwp
.dirty
= true;
791 /* hardware is updated in write_dirty_registers() */
800 static int dpm_watchpoint_setup(struct arm_dpm
*dpm
, unsigned index_t
,
801 struct watchpoint
*wp
)
804 struct dpm_wp
*dwp
= dpm
->dwp
+ index_t
;
807 /* this hardware doesn't support data value matching or masking */
808 if (wp
->value
|| wp
->mask
!= ~(uint32_t)0) {
809 LOG_DEBUG("watchpoint values and masking not supported");
810 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
813 retval
= dpm_bpwp_setup(dpm
, &dwp
->bpwp
, wp
->address
, wp
->length
);
814 if (retval
!= ERROR_OK
)
817 control
= dwp
->bpwp
.control
;
829 dwp
->bpwp
.control
= control
;
831 dpm
->dwp
[index_t
].wp
= wp
;
836 static int dpm_add_watchpoint(struct target
*target
, struct watchpoint
*wp
)
838 struct arm
*arm
= target_to_arm(target
);
839 struct arm_dpm
*dpm
= arm
->dpm
;
840 int retval
= ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
842 if (dpm
->bpwp_enable
) {
843 for (unsigned i
= 0; i
< dpm
->nwp
; i
++) {
844 if (!dpm
->dwp
[i
].wp
) {
845 retval
= dpm_watchpoint_setup(dpm
, i
, wp
);
854 static int dpm_remove_watchpoint(struct target
*target
, struct watchpoint
*wp
)
856 struct arm
*arm
= target_to_arm(target
);
857 struct arm_dpm
*dpm
= arm
->dpm
;
858 int retval
= ERROR_INVALID_ARGUMENTS
;
860 for (unsigned i
= 0; i
< dpm
->nwp
; i
++) {
861 if (dpm
->dwp
[i
].wp
== wp
) {
862 dpm
->dwp
[i
].wp
= NULL
;
863 dpm
->dwp
[i
].bpwp
.dirty
= true;
865 /* hardware is updated in write_dirty_registers() */
874 void arm_dpm_report_wfar(struct arm_dpm
*dpm
, uint32_t addr
)
876 switch (dpm
->arm
->core_state
) {
880 case ARM_STATE_THUMB
:
881 case ARM_STATE_THUMB_EE
:
884 case ARM_STATE_JAZELLE
:
891 /*----------------------------------------------------------------------*/
894 * Other debug and support utilities
897 void arm_dpm_report_dscr(struct arm_dpm
*dpm
, uint32_t dscr
)
899 struct target
*target
= dpm
->arm
->target
;
903 /* Examine debug reason */
904 switch (DSCR_ENTRY(dscr
)) {
905 case 6: /* Data abort (v6 only) */
906 case 7: /* Prefetch abort (v6 only) */
907 /* FALL THROUGH -- assume a v6 core in abort mode */
908 case 0: /* HALT request from debugger */
910 target
->debug_reason
= DBG_REASON_DBGRQ
;
912 case 1: /* HW breakpoint */
913 case 3: /* SW BKPT */
914 case 5: /* vector catch */
915 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
917 case 2: /* asynch watchpoint */
918 case 10: /* precise watchpoint */
919 target
->debug_reason
= DBG_REASON_WATCHPOINT
;
922 target
->debug_reason
= DBG_REASON_UNDEFINED
;
927 /*----------------------------------------------------------------------*/
930 * Setup and management support.
934 * Hooks up this DPM to its associated target; call only once.
935 * Initially this only covers the register cache.
937 * Oh, and watchpoints. Yeah.
939 int arm_dpm_setup(struct arm_dpm
*dpm
)
941 struct arm
*arm
= dpm
->arm
;
942 struct target
*target
= arm
->target
;
943 struct reg_cache
*cache
;
947 /* register access setup */
948 arm
->full_context
= arm_dpm_full_context
;
949 arm
->read_core_reg
= arm_dpm_read_core_reg
;
950 arm
->write_core_reg
= arm_dpm_write_core_reg
;
952 cache
= arm_build_reg_cache(target
, arm
);
956 *register_get_last_cache_p(&target
->reg_cache
) = cache
;
958 /* coprocessor access setup */
962 /* breakpoint setup -- optional until it works everywhere */
963 if (!target
->type
->add_breakpoint
) {
964 target
->type
->add_breakpoint
= dpm_add_breakpoint
;
965 target
->type
->remove_breakpoint
= dpm_remove_breakpoint
;
968 /* watchpoint setup */
969 target
->type
->add_watchpoint
= dpm_add_watchpoint
;
970 target
->type
->remove_watchpoint
= dpm_remove_watchpoint
;
972 /* FIXME add vector catch support */
974 dpm
->nbp
= 1 + ((dpm
->didr
>> 24) & 0xf);
975 dpm
->dbp
= calloc(dpm
->nbp
, sizeof *dpm
->dbp
);
977 dpm
->nwp
= 1 + ((dpm
->didr
>> 28) & 0xf);
978 dpm
->dwp
= calloc(dpm
->nwp
, sizeof *dpm
->dwp
);
980 if (!dpm
->dbp
|| !dpm
->dwp
) {
986 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
987 target_name(target
), dpm
->nbp
, dpm
->nwp
);
989 /* REVISIT ... and some of those breakpoints could match
990 * execution context IDs...
997 * Reinitializes DPM state at the beginning of a new debug session
998 * or after a reset which may have affected the debug module.
1000 int arm_dpm_initialize(struct arm_dpm
*dpm
)
1002 /* Disable all breakpoints and watchpoints at startup. */
1003 if (dpm
->bpwp_disable
) {
1006 for (i
= 0; i
< dpm
->nbp
; i
++) {
1007 dpm
->dbp
[i
].bpwp
.number
= i
;
1008 (void) dpm
->bpwp_disable(dpm
, i
);
1010 for (i
= 0; i
< dpm
->nwp
; i
++) {
1011 dpm
->dwp
[i
].bpwp
.number
= 16 + i
;
1012 (void) dpm
->bpwp_disable(dpm
, 16 + i
);
1015 LOG_WARNING("%s: can't disable breakpoints and watchpoints",
1016 target_name(dpm
->arm
->target
));