2 * Copyright (C) 2009 by David Brownell
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the
16 * Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 #include <jtag/jtag.h>
28 #include "breakpoints.h"
29 #include "target_type.h"
30 #include "arm_opcodes.h"
35 * Implements various ARM DPM operations using architectural debug registers.
36 * These routines layer over core-specific communication methods to cope with
37 * implementation differences between cores like ARM1136 and Cortex-A8.
39 * The "Debug Programmers' Model" (DPM) for ARMv6 and ARMv7 is defined by
40 * Part C (Debug Architecture) of the ARM Architecture Reference Manual,
41 * ARMv7-A and ARMv7-R edition (ARM DDI 0406B). In OpenOCD, DPM operations
42 * are abstracted through internal programming interfaces to share code and
43 * to minimize needless differences in debug behavior between cores.
46 /*----------------------------------------------------------------------*/
52 /* Read coprocessor */
53 static int dpm_mrc(struct target
*target
, int cpnum
,
54 uint32_t op1
, uint32_t op2
, uint32_t CRn
, uint32_t CRm
,
57 struct arm
*arm
= target_to_arm(target
);
58 struct arm_dpm
*dpm
= arm
->dpm
;
61 retval
= dpm
->prepare(dpm
);
62 if (retval
!= ERROR_OK
)
65 LOG_DEBUG("MRC p%d, %d, r0, c%d, c%d, %d", cpnum
,
67 (int) CRm
, (int) op2
);
69 /* read coprocessor register into R0; return via DCC */
70 retval
= dpm
->instr_read_data_r0(dpm
,
71 ARMV4_5_MRC(cpnum
, op1
, 0, CRn
, CRm
, op2
),
74 /* (void) */ dpm
->finish(dpm
);
78 static int dpm_mcr(struct target
*target
, int cpnum
,
79 uint32_t op1
, uint32_t op2
, uint32_t CRn
, uint32_t CRm
,
82 struct arm
*arm
= target_to_arm(target
);
83 struct arm_dpm
*dpm
= arm
->dpm
;
86 retval
= dpm
->prepare(dpm
);
87 if (retval
!= ERROR_OK
)
90 LOG_DEBUG("MCR p%d, %d, r0, c%d, c%d, %d", cpnum
,
92 (int) CRm
, (int) op2
);
94 /* read DCC into r0; then write coprocessor register from R0 */
95 retval
= dpm
->instr_write_data_r0(dpm
,
96 ARMV4_5_MCR(cpnum
, op1
, 0, CRn
, CRm
, op2
),
99 /* (void) */ dpm
->finish(dpm
);
103 /*----------------------------------------------------------------------*/
106 * Register access utilities
109 /* Toggles between recorded core mode (USR, SVC, etc) and a temporary one.
110 * Routines *must* restore the original mode before returning!!
112 int dpm_modeswitch(struct arm_dpm
*dpm
, enum arm_mode mode
)
117 /* restore previous mode */
118 if (mode
== ARM_MODE_ANY
)
119 cpsr
= buf_get_u32(dpm
->arm
->cpsr
->value
, 0, 32);
121 /* else force to the specified mode */
125 retval
= dpm
->instr_write_data_r0(dpm
, ARMV4_5_MSR_GP(0, 0xf, 0), cpsr
);
126 if (retval
!= ERROR_OK
)
129 if (dpm
->instr_cpsr_sync
)
130 retval
= dpm
->instr_cpsr_sync(dpm
);
135 /* just read the register -- rely on the core mode being right */
136 static int dpm_read_reg(struct arm_dpm
*dpm
, struct reg
*r
, unsigned regnum
)
143 /* return via DCC: "MCR p14, 0, Rnum, c0, c5, 0" */
144 retval
= dpm
->instr_read_data_dcc(dpm
,
145 ARMV4_5_MCR(14, 0, regnum
, 0, 5, 0),
149 * "MOV r0, pc"; then return via DCC */
150 retval
= dpm
->instr_read_data_r0(dpm
, 0xe1a0000f, &value
);
152 /* NOTE: this seems like a slightly awkward place to update
153 * this value ... but if the PC gets written (the only way
154 * to change what we compute), the arch spec says subsequent
155 * reads return values which are "unpredictable". So this
156 * is always right except in those broken-by-intent cases.
158 switch (dpm
->arm
->core_state
) {
162 case ARM_STATE_THUMB
:
163 case ARM_STATE_THUMB_EE
:
166 case ARM_STATE_JAZELLE
:
167 /* core-specific ... ? */
168 LOG_WARNING("Jazelle PC adjustment unknown");
173 /* 16: "MRS r0, CPSR"; then return via DCC
174 * 17: "MRS r0, SPSR"; then return via DCC
176 retval
= dpm
->instr_read_data_r0(dpm
,
177 ARMV4_5_MRS(0, regnum
& 1),
182 if (retval
== ERROR_OK
) {
183 buf_set_u32(r
->value
, 0, 32, value
);
186 LOG_DEBUG("READ: %s, %8.8x", r
->name
, (unsigned) value
);
192 /* just write the register -- rely on the core mode being right */
193 static int dpm_write_reg(struct arm_dpm
*dpm
, struct reg
*r
, unsigned regnum
)
196 uint32_t value
= buf_get_u32(r
->value
, 0, 32);
200 /* load register from DCC: "MRC p14, 0, Rnum, c0, c5, 0" */
201 retval
= dpm
->instr_write_data_dcc(dpm
,
202 ARMV4_5_MRC(14, 0, regnum
, 0, 5, 0),
206 * read r0 from DCC; then "MOV pc, r0" */
207 retval
= dpm
->instr_write_data_r0(dpm
, 0xe1a0f000, value
);
210 /* 16: read r0 from DCC, then "MSR r0, CPSR_cxsf"
211 * 17: read r0 from DCC, then "MSR r0, SPSR_cxsf"
213 retval
= dpm
->instr_write_data_r0(dpm
,
214 ARMV4_5_MSR_GP(0, 0xf, regnum
& 1),
216 if (retval
!= ERROR_OK
)
219 if (regnum
== 16 && dpm
->instr_cpsr_sync
)
220 retval
= dpm
->instr_cpsr_sync(dpm
);
225 if (retval
== ERROR_OK
) {
227 LOG_DEBUG("WRITE: %s, %8.8x", r
->name
, (unsigned) value
);
234 * Read basic registers of the the current context: R0 to R15, and CPSR;
235 * sets the core mode (such as USR or IRQ) and state (such as ARM or Thumb).
236 * In normal operation this is called on entry to halting debug state,
237 * possibly after some other operations supporting restore of debug state
238 * or making sure the CPU is fully idle (drain write buffer, etc).
240 int arm_dpm_read_current_registers(struct arm_dpm
*dpm
)
242 struct arm
*arm
= dpm
->arm
;
247 retval
= dpm
->prepare(dpm
);
248 if (retval
!= ERROR_OK
)
251 /* read R0 first (it's used for scratch), then CPSR */
252 r
= arm
->core_cache
->reg_list
+ 0;
254 retval
= dpm_read_reg(dpm
, r
, 0);
255 if (retval
!= ERROR_OK
)
260 retval
= dpm
->instr_read_data_r0(dpm
, ARMV4_5_MRS(0, 0), &cpsr
);
261 if (retval
!= ERROR_OK
)
264 /* update core mode and state, plus shadow mapping for R8..R14 */
265 arm_set_cpsr(arm
, cpsr
);
267 /* REVISIT we can probably avoid reading R1..R14, saving time... */
268 for (unsigned i
= 1; i
< 16; i
++) {
269 r
= arm_reg_current(arm
, i
);
273 retval
= dpm_read_reg(dpm
, r
, i
);
274 if (retval
!= ERROR_OK
)
278 /* NOTE: SPSR ignored (if it's even relevant). */
280 /* REVISIT the debugger can trigger various exceptions. See the
281 * ARMv7A architecture spec, section C5.7, for more info about
282 * what defenses are needed; v6 debug has the most issues.
286 /* (void) */ dpm
->finish(dpm
);
290 /* Avoid needless I/O ... leave breakpoints and watchpoints alone
291 * unless they're removed, or need updating because of single-stepping
292 * or running debugger code.
294 static int dpm_maybe_update_bpwp(struct arm_dpm
*dpm
, bool bpwp
,
295 struct dpm_bpwp
*xp
, int *set_p
)
297 int retval
= ERROR_OK
;
304 /* removed or startup; we must disable it */
309 /* disabled, but we must set it */
310 xp
->dirty
= disable
= false;
315 /* set, but we must temporarily disable it */
316 xp
->dirty
= disable
= true;
321 retval
= dpm
->bpwp_disable(dpm
, xp
->number
);
323 retval
= dpm
->bpwp_enable(dpm
, xp
->number
,
324 xp
->address
, xp
->control
);
326 if (retval
!= ERROR_OK
)
327 LOG_ERROR("%s: can't %s HW %spoint %d",
328 disable
? "disable" : "enable",
329 target_name(dpm
->arm
->target
),
330 (xp
->number
< 16) ? "break" : "watch",
336 static int dpm_add_breakpoint(struct target
*target
, struct breakpoint
*bp
);
339 * Writes all modified core registers for all processor modes. In normal
340 * operation this is called on exit from halting debug state.
342 * @param dpm: represents the processor
343 * @param bpwp: true ensures breakpoints and watchpoints are set,
344 * false ensures they are cleared
346 int arm_dpm_write_dirty_registers(struct arm_dpm
*dpm
, bool bpwp
)
348 struct arm
*arm
= dpm
->arm
;
349 struct reg_cache
*cache
= arm
->core_cache
;
353 retval
= dpm
->prepare(dpm
);
354 if (retval
!= ERROR_OK
)
357 /* If we're managing hardware breakpoints for this core, enable
358 * or disable them as requested.
360 * REVISIT We don't yet manage them for ANY cores. Eventually
361 * we should be able to assume we handle them; but until then,
362 * cope with the hand-crafted breakpoint code.
364 if (arm
->target
->type
->add_breakpoint
== dpm_add_breakpoint
) {
365 for (unsigned i
= 0; i
< dpm
->nbp
; i
++) {
366 struct dpm_bp
*dbp
= dpm
->dbp
+ i
;
367 struct breakpoint
*bp
= dbp
->bp
;
369 retval
= dpm_maybe_update_bpwp(dpm
, bpwp
, &dbp
->bpwp
,
370 bp
? &bp
->set
: NULL
);
371 if (retval
!= ERROR_OK
)
376 /* enable/disable watchpoints */
377 for (unsigned i
= 0; i
< dpm
->nwp
; i
++) {
378 struct dpm_wp
*dwp
= dpm
->dwp
+ i
;
379 struct watchpoint
*wp
= dwp
->wp
;
381 retval
= dpm_maybe_update_bpwp(dpm
, bpwp
, &dwp
->bpwp
,
382 wp
? &wp
->set
: NULL
);
383 if (retval
!= ERROR_OK
)
387 /* NOTE: writes to breakpoint and watchpoint registers might
388 * be queued, and need (efficient/batched) flushing later.
391 /* Scan the registers until we find one that's both dirty and
392 * eligible for flushing. Flush that and everything else that
393 * shares the same core mode setting. Typically this won't
394 * actually find anything to do...
397 enum arm_mode mode
= ARM_MODE_ANY
;
401 /* check everything except our scratch register R0 */
402 for (unsigned i
= 1; i
< cache
->num_regs
; i
++) {
406 /* also skip PC, CPSR, and non-dirty */
409 if (arm
->cpsr
== cache
->reg_list
+ i
)
411 if (!cache
->reg_list
[i
].dirty
)
414 r
= cache
->reg_list
[i
].arch_info
;
417 /* may need to pick and set a mode */
422 mode
= tmode
= r
->mode
;
424 /* cope with special cases */
427 /* r8..r12 "anything but FIQ" case;
428 * we "know" core mode is accurate
429 * since we haven't changed it yet
431 if (arm
->core_mode
== ARM_MODE_FIQ
434 tmode
= ARM_MODE_USR
;
442 /* REVISIT error checks */
443 if (tmode
!= ARM_MODE_ANY
) {
444 retval
= dpm_modeswitch(dpm
, tmode
);
445 if (retval
!= ERROR_OK
)
452 retval
= dpm_write_reg(dpm
,
455 if (retval
!= ERROR_OK
)
461 /* Restore original CPSR ... assuming either that we changed it,
462 * or it's dirty. Must write PC to ensure the return address is
463 * defined, and must not write it before CPSR.
465 retval
= dpm_modeswitch(dpm
, ARM_MODE_ANY
);
466 if (retval
!= ERROR_OK
)
468 arm
->cpsr
->dirty
= false;
470 retval
= dpm_write_reg(dpm
, arm
->pc
, 15);
471 if (retval
!= ERROR_OK
)
473 arm
->pc
->dirty
= false;
475 /* flush R0 -- it's *very* dirty by now */
476 retval
= dpm_write_reg(dpm
, &cache
->reg_list
[0], 0);
477 if (retval
!= ERROR_OK
)
479 cache
->reg_list
[0].dirty
= false;
481 /* (void) */ dpm
->finish(dpm
);
486 /* Returns ARM_MODE_ANY or temporary mode to use while reading the
487 * specified register ... works around flakiness from ARM core calls.
488 * Caller already filtered out SPSR access; mode is never MODE_SYS
491 static enum arm_mode
dpm_mapmode(struct arm
*arm
,
492 unsigned num
, enum arm_mode mode
)
494 enum arm_mode amode
= arm
->core_mode
;
496 /* don't switch if the mode is already correct */
497 if (amode
== ARM_MODE_SYS
)
498 amode
= ARM_MODE_USR
;
503 /* don't switch for non-shadowed registers (r0..r7, r15/pc, cpsr) */
508 /* r8..r12 aren't shadowed for anything except FIQ */
510 if (mode
== ARM_MODE_FIQ
)
513 /* r13/sp, and r14/lr are always shadowed */
518 LOG_WARNING("invalid register #%u", num
);
526 * Standard ARM register accessors ... there are three methods
527 * in "struct arm", to support individual read/write and bulk read
531 static int arm_dpm_read_core_reg(struct target
*target
, struct reg
*r
,
532 int regnum
, enum arm_mode mode
)
534 struct arm_dpm
*dpm
= target_to_arm(target
)->dpm
;
537 if (regnum
< 0 || regnum
> 16)
538 return ERROR_COMMAND_SYNTAX_ERROR
;
541 if (mode
!= ARM_MODE_ANY
)
544 mode
= dpm_mapmode(dpm
->arm
, regnum
, mode
);
546 /* REVISIT what happens if we try to read SPSR in a core mode
547 * which has no such register?
550 retval
= dpm
->prepare(dpm
);
551 if (retval
!= ERROR_OK
)
554 if (mode
!= ARM_MODE_ANY
) {
555 retval
= dpm_modeswitch(dpm
, mode
);
556 if (retval
!= ERROR_OK
)
560 retval
= dpm_read_reg(dpm
, r
, regnum
);
561 if (retval
!= ERROR_OK
)
563 /* always clean up, regardless of error */
565 if (mode
!= ARM_MODE_ANY
)
566 /* (void) */ dpm_modeswitch(dpm
, ARM_MODE_ANY
);
569 /* (void) */ dpm
->finish(dpm
);
573 static int arm_dpm_write_core_reg(struct target
*target
, struct reg
*r
,
574 int regnum
, enum arm_mode mode
, uint32_t value
)
576 struct arm_dpm
*dpm
= target_to_arm(target
)->dpm
;
580 if (regnum
< 0 || regnum
> 16)
581 return ERROR_COMMAND_SYNTAX_ERROR
;
584 if (mode
!= ARM_MODE_ANY
)
587 mode
= dpm_mapmode(dpm
->arm
, regnum
, mode
);
589 /* REVISIT what happens if we try to write SPSR in a core mode
590 * which has no such register?
593 retval
= dpm
->prepare(dpm
);
594 if (retval
!= ERROR_OK
)
597 if (mode
!= ARM_MODE_ANY
) {
598 retval
= dpm_modeswitch(dpm
, mode
);
599 if (retval
!= ERROR_OK
)
603 retval
= dpm_write_reg(dpm
, r
, regnum
);
604 /* always clean up, regardless of error */
606 if (mode
!= ARM_MODE_ANY
)
607 /* (void) */ dpm_modeswitch(dpm
, ARM_MODE_ANY
);
610 /* (void) */ dpm
->finish(dpm
);
614 static int arm_dpm_full_context(struct target
*target
)
616 struct arm
*arm
= target_to_arm(target
);
617 struct arm_dpm
*dpm
= arm
->dpm
;
618 struct reg_cache
*cache
= arm
->core_cache
;
622 retval
= dpm
->prepare(dpm
);
623 if (retval
!= ERROR_OK
)
627 enum arm_mode mode
= ARM_MODE_ANY
;
631 /* We "know" arm_dpm_read_current_registers() was called so
632 * the unmapped registers (R0..R7, PC, AND CPSR) and some
633 * view of R8..R14 are current. We also "know" oddities of
634 * register mapping: special cases for R8..R12 and SPSR.
636 * Pick some mode with unread registers and read them all.
639 for (unsigned i
= 0; i
< cache
->num_regs
; i
++) {
642 if (cache
->reg_list
[i
].valid
)
644 r
= cache
->reg_list
[i
].arch_info
;
646 /* may need to pick a mode and set CPSR */
651 /* For R8..R12 when we've entered debug
652 * state in FIQ mode... patch mode.
654 if (mode
== ARM_MODE_ANY
)
657 /* REVISIT error checks */
658 retval
= dpm_modeswitch(dpm
, mode
);
659 if (retval
!= ERROR_OK
)
665 /* CPSR was read, so "R16" must mean SPSR */
666 retval
= dpm_read_reg(dpm
,
668 (r
->num
== 16) ? 17 : r
->num
);
669 if (retval
!= ERROR_OK
)
675 retval
= dpm_modeswitch(dpm
, ARM_MODE_ANY
);
676 /* (void) */ dpm
->finish(dpm
);
682 /*----------------------------------------------------------------------*/
685 * Breakpoint and Watchpoint support.
687 * Hardware {break,watch}points are usually left active, to minimize
688 * debug entry/exit costs. When they are set or cleared, it's done in
689 * batches. Also, DPM-conformant hardware can update debug registers
690 * regardless of whether the CPU is running or halted ... though that
691 * fact isn't currently leveraged.
694 static int dpm_bpwp_setup(struct arm_dpm
*dpm
, struct dpm_bpwp
*xp
,
695 uint32_t addr
, uint32_t length
)
699 control
= (1 << 0) /* enable */
700 | (3 << 1); /* both user and privileged access */
702 /* Match 1, 2, or all 4 byte addresses in this word.
704 * FIXME: v7 hardware allows lengths up to 2 GB for BP and WP.
705 * Support larger length, when addr is suitably aligned. In
706 * particular, allow watchpoints on 8 byte "double" values.
708 * REVISIT allow watchpoints on unaligned 2-bit values; and on
709 * v7 hardware, unaligned 4-byte ones too.
713 control
|= (1 << (addr
& 3)) << 5;
716 /* require 2-byte alignment */
718 control
|= (3 << (addr
& 2)) << 5;
723 /* require 4-byte alignment */
730 LOG_ERROR("unsupported {break,watch}point length/alignment");
731 return ERROR_COMMAND_SYNTAX_ERROR
;
734 /* other shared control bits:
735 * bits 15:14 == 0 ... both secure and nonsecure states (v6.1+ only)
736 * bit 20 == 0 ... not linked to a context ID
737 * bit 28:24 == 0 ... not ignoring N LSBs (v7 only)
740 xp
->address
= addr
& ~3;
741 xp
->control
= control
;
744 LOG_DEBUG("BPWP: addr %8.8" PRIx32
", control %" PRIx32
", number %d",
745 xp
->address
, control
, xp
->number
);
747 /* hardware is updated in write_dirty_registers() */
751 static int dpm_add_breakpoint(struct target
*target
, struct breakpoint
*bp
)
753 struct arm
*arm
= target_to_arm(target
);
754 struct arm_dpm
*dpm
= arm
->dpm
;
755 int retval
= ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
758 return ERROR_COMMAND_SYNTAX_ERROR
;
759 if (!dpm
->bpwp_enable
)
762 /* FIXME we need a generic solution for software breakpoints. */
763 if (bp
->type
== BKPT_SOFT
)
764 LOG_DEBUG("using HW bkpt, not SW...");
766 for (unsigned i
= 0; i
< dpm
->nbp
; i
++) {
767 if (!dpm
->dbp
[i
].bp
) {
768 retval
= dpm_bpwp_setup(dpm
, &dpm
->dbp
[i
].bpwp
,
769 bp
->address
, bp
->length
);
770 if (retval
== ERROR_OK
)
779 static int dpm_remove_breakpoint(struct target
*target
, struct breakpoint
*bp
)
781 struct arm
*arm
= target_to_arm(target
);
782 struct arm_dpm
*dpm
= arm
->dpm
;
783 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
785 for (unsigned i
= 0; i
< dpm
->nbp
; i
++) {
786 if (dpm
->dbp
[i
].bp
== bp
) {
787 dpm
->dbp
[i
].bp
= NULL
;
788 dpm
->dbp
[i
].bpwp
.dirty
= true;
790 /* hardware is updated in write_dirty_registers() */
799 static int dpm_watchpoint_setup(struct arm_dpm
*dpm
, unsigned index_t
,
800 struct watchpoint
*wp
)
803 struct dpm_wp
*dwp
= dpm
->dwp
+ index_t
;
806 /* this hardware doesn't support data value matching or masking */
807 if (wp
->value
|| wp
->mask
!= ~(uint32_t)0) {
808 LOG_DEBUG("watchpoint values and masking not supported");
809 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
812 retval
= dpm_bpwp_setup(dpm
, &dwp
->bpwp
, wp
->address
, wp
->length
);
813 if (retval
!= ERROR_OK
)
816 control
= dwp
->bpwp
.control
;
828 dwp
->bpwp
.control
= control
;
830 dpm
->dwp
[index_t
].wp
= wp
;
835 static int dpm_add_watchpoint(struct target
*target
, struct watchpoint
*wp
)
837 struct arm
*arm
= target_to_arm(target
);
838 struct arm_dpm
*dpm
= arm
->dpm
;
839 int retval
= ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
841 if (dpm
->bpwp_enable
) {
842 for (unsigned i
= 0; i
< dpm
->nwp
; i
++) {
843 if (!dpm
->dwp
[i
].wp
) {
844 retval
= dpm_watchpoint_setup(dpm
, i
, wp
);
853 static int dpm_remove_watchpoint(struct target
*target
, struct watchpoint
*wp
)
855 struct arm
*arm
= target_to_arm(target
);
856 struct arm_dpm
*dpm
= arm
->dpm
;
857 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
859 for (unsigned i
= 0; i
< dpm
->nwp
; i
++) {
860 if (dpm
->dwp
[i
].wp
== wp
) {
861 dpm
->dwp
[i
].wp
= NULL
;
862 dpm
->dwp
[i
].bpwp
.dirty
= true;
864 /* hardware is updated in write_dirty_registers() */
873 void arm_dpm_report_wfar(struct arm_dpm
*dpm
, uint32_t addr
)
875 switch (dpm
->arm
->core_state
) {
879 case ARM_STATE_THUMB
:
880 case ARM_STATE_THUMB_EE
:
883 case ARM_STATE_JAZELLE
:
890 /*----------------------------------------------------------------------*/
893 * Other debug and support utilities
896 void arm_dpm_report_dscr(struct arm_dpm
*dpm
, uint32_t dscr
)
898 struct target
*target
= dpm
->arm
->target
;
902 /* Examine debug reason */
903 switch (DSCR_ENTRY(dscr
)) {
904 case 6: /* Data abort (v6 only) */
905 case 7: /* Prefetch abort (v6 only) */
906 /* FALL THROUGH -- assume a v6 core in abort mode */
907 case 0: /* HALT request from debugger */
909 target
->debug_reason
= DBG_REASON_DBGRQ
;
911 case 1: /* HW breakpoint */
912 case 3: /* SW BKPT */
913 case 5: /* vector catch */
914 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
916 case 2: /* asynch watchpoint */
917 case 10:/* precise watchpoint */
918 target
->debug_reason
= DBG_REASON_WATCHPOINT
;
921 target
->debug_reason
= DBG_REASON_UNDEFINED
;
926 /*----------------------------------------------------------------------*/
929 * Setup and management support.
933 * Hooks up this DPM to its associated target; call only once.
934 * Initially this only covers the register cache.
936 * Oh, and watchpoints. Yeah.
938 int arm_dpm_setup(struct arm_dpm
*dpm
)
940 struct arm
*arm
= dpm
->arm
;
941 struct target
*target
= arm
->target
;
942 struct reg_cache
*cache
;
946 /* register access setup */
947 arm
->full_context
= arm_dpm_full_context
;
948 arm
->read_core_reg
= arm_dpm_read_core_reg
;
949 arm
->write_core_reg
= arm_dpm_write_core_reg
;
951 cache
= arm_build_reg_cache(target
, arm
);
955 *register_get_last_cache_p(&target
->reg_cache
) = cache
;
957 /* coprocessor access setup */
961 /* breakpoint setup -- optional until it works everywhere */
962 if (!target
->type
->add_breakpoint
) {
963 target
->type
->add_breakpoint
= dpm_add_breakpoint
;
964 target
->type
->remove_breakpoint
= dpm_remove_breakpoint
;
967 /* watchpoint setup */
968 target
->type
->add_watchpoint
= dpm_add_watchpoint
;
969 target
->type
->remove_watchpoint
= dpm_remove_watchpoint
;
971 /* FIXME add vector catch support */
973 dpm
->nbp
= 1 + ((dpm
->didr
>> 24) & 0xf);
974 dpm
->dbp
= calloc(dpm
->nbp
, sizeof *dpm
->dbp
);
976 dpm
->nwp
= 1 + ((dpm
->didr
>> 28) & 0xf);
977 dpm
->dwp
= calloc(dpm
->nwp
, sizeof *dpm
->dwp
);
979 if (!dpm
->dbp
|| !dpm
->dwp
) {
985 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
986 target_name(target
), dpm
->nbp
, dpm
->nwp
);
988 /* REVISIT ... and some of those breakpoints could match
989 * execution context IDs...
996 * Reinitializes DPM state at the beginning of a new debug session
997 * or after a reset which may have affected the debug module.
999 int arm_dpm_initialize(struct arm_dpm
*dpm
)
1001 /* Disable all breakpoints and watchpoints at startup. */
1002 if (dpm
->bpwp_disable
) {
1005 for (i
= 0; i
< dpm
->nbp
; i
++) {
1006 dpm
->dbp
[i
].bpwp
.number
= i
;
1007 (void) dpm
->bpwp_disable(dpm
, i
);
1009 for (i
= 0; i
< dpm
->nwp
; i
++) {
1010 dpm
->dwp
[i
].bpwp
.number
= 16 + i
;
1011 (void) dpm
->bpwp_disable(dpm
, 16 + i
);
1014 LOG_WARNING("%s: can't disable breakpoints and watchpoints",
1015 target_name(dpm
->arm
->target
));