1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 2009 by David Brownell
13 #include "armv8_dpm.h"
14 #include <jtag/jtag.h>
16 #include "breakpoints.h"
17 #include "target_type.h"
18 #include "arm_opcodes.h"
23 * Implements various ARM DPM operations using architectural debug registers.
24 * These routines layer over core-specific communication methods to cope with
25 * implementation differences between cores like ARM1136 and Cortex-A8.
27 * The "Debug Programmers' Model" (DPM) for ARMv6 and ARMv7 is defined by
28 * Part C (Debug Architecture) of the ARM Architecture Reference Manual,
29 * ARMv7-A and ARMv7-R edition (ARM DDI 0406B). In OpenOCD, DPM operations
30 * are abstracted through internal programming interfaces to share code and
31 * to minimize needless differences in debug behavior between cores.
34 /*----------------------------------------------------------------------*/
40 /* Read coprocessor */
41 static int dpm_mrc(struct target
*target
, int cpnum
,
42 uint32_t op1
, uint32_t op2
, uint32_t crn
, uint32_t crm
,
45 struct arm
*arm
= target_to_arm(target
);
46 struct arm_dpm
*dpm
= arm
->dpm
;
49 retval
= dpm
->prepare(dpm
);
50 if (retval
!= ERROR_OK
)
53 LOG_DEBUG("MRC p%d, %d, r0, c%d, c%d, %d", cpnum
,
55 (int) crm
, (int) op2
);
57 /* read coprocessor register into R0; return via DCC */
58 retval
= dpm
->instr_read_data_r0(dpm
,
59 ARMV4_5_MRC(cpnum
, op1
, 0, crn
, crm
, op2
),
62 /* (void) */ dpm
->finish(dpm
);
66 static int dpm_mcr(struct target
*target
, int cpnum
,
67 uint32_t op1
, uint32_t op2
, uint32_t crn
, uint32_t crm
,
70 struct arm
*arm
= target_to_arm(target
);
71 struct arm_dpm
*dpm
= arm
->dpm
;
74 retval
= dpm
->prepare(dpm
);
75 if (retval
!= ERROR_OK
)
78 LOG_DEBUG("MCR p%d, %d, r0, c%d, c%d, %d", cpnum
,
80 (int) crm
, (int) op2
);
82 /* read DCC into r0; then write coprocessor register from R0 */
83 retval
= dpm
->instr_write_data_r0(dpm
,
84 ARMV4_5_MCR(cpnum
, op1
, 0, crn
, crm
, op2
),
87 /* (void) */ dpm
->finish(dpm
);
91 /*----------------------------------------------------------------------*/
94 * Register access utilities
97 /* Toggles between recorded core mode (USR, SVC, etc) and a temporary one.
98 * Routines *must* restore the original mode before returning!!
100 int arm_dpm_modeswitch(struct arm_dpm
*dpm
, enum arm_mode mode
)
105 /* restore previous mode */
106 if (mode
== ARM_MODE_ANY
)
107 cpsr
= buf_get_u32(dpm
->arm
->cpsr
->value
, 0, 32);
109 /* else force to the specified mode */
113 retval
= dpm
->instr_write_data_r0(dpm
, ARMV4_5_MSR_GP(0, 0xf, 0), cpsr
);
114 if (retval
!= ERROR_OK
)
117 if (dpm
->instr_cpsr_sync
)
118 retval
= dpm
->instr_cpsr_sync(dpm
);
123 /* Read 64bit VFP registers */
124 static int dpm_read_reg_u64(struct arm_dpm
*dpm
, struct reg
*r
, unsigned regnum
)
126 int retval
= ERROR_FAIL
;
127 uint32_t value_r0
, value_r1
;
130 case ARM_VFP_V3_D0
... ARM_VFP_V3_D31
:
131 /* move from double word register to r0:r1: "vmov r0, r1, vm"
132 * then read r0 via dcc
134 retval
= dpm
->instr_read_data_r0(dpm
,
135 ARMV4_5_VMOV(1, 1, 0, ((regnum
- ARM_VFP_V3_D0
) >> 4),
136 ((regnum
- ARM_VFP_V3_D0
) & 0xf)), &value_r0
);
137 if (retval
!= ERROR_OK
)
140 /* read r1 via dcc */
141 retval
= dpm
->instr_read_data_dcc(dpm
,
142 ARMV4_5_MCR(14, 0, 1, 0, 5, 0),
150 if (retval
== ERROR_OK
) {
151 buf_set_u32(r
->value
, 0, 32, value_r0
);
152 buf_set_u32(r
->value
+ 4, 0, 32, value_r1
);
155 LOG_DEBUG("READ: %s, %8.8x, %8.8x", r
->name
,
156 (unsigned) value_r0
, (unsigned) value_r1
);
162 /* just read the register -- rely on the core mode being right */
163 int arm_dpm_read_reg(struct arm_dpm
*dpm
, struct reg
*r
, unsigned regnum
)
170 /* return via DCC: "MCR p14, 0, Rnum, c0, c5, 0" */
171 retval
= dpm
->instr_read_data_dcc(dpm
,
172 ARMV4_5_MCR(14, 0, regnum
, 0, 5, 0),
176 * "MOV r0, pc"; then return via DCC */
177 retval
= dpm
->instr_read_data_r0(dpm
, 0xe1a0000f, &value
);
179 /* NOTE: this seems like a slightly awkward place to update
180 * this value ... but if the PC gets written (the only way
181 * to change what we compute), the arch spec says subsequent
182 * reads return values which are "unpredictable". So this
183 * is always right except in those broken-by-intent cases.
185 switch (dpm
->arm
->core_state
) {
189 case ARM_STATE_THUMB
:
190 case ARM_STATE_THUMB_EE
:
193 case ARM_STATE_JAZELLE
:
194 /* core-specific ... ? */
195 LOG_WARNING("Jazelle PC adjustment unknown");
198 LOG_WARNING("unknown core state");
202 case ARM_VFP_V3_D0
... ARM_VFP_V3_D31
:
203 return dpm_read_reg_u64(dpm
, r
, regnum
);
204 case ARM_VFP_V3_FPSCR
:
205 /* "VMRS r0, FPSCR"; then return via DCC */
206 retval
= dpm
->instr_read_data_r0(dpm
,
207 ARMV4_5_VMRS(0), &value
);
210 /* 16: "MRS r0, CPSR"; then return via DCC
211 * 17: "MRS r0, SPSR"; then return via DCC
213 retval
= dpm
->instr_read_data_r0(dpm
,
214 ARMV4_5_MRS(0, regnum
& 1),
219 if (retval
== ERROR_OK
) {
220 buf_set_u32(r
->value
, 0, 32, value
);
223 LOG_DEBUG("READ: %s, %8.8x", r
->name
, (unsigned) value
);
229 /* Write 64bit VFP registers */
230 static int dpm_write_reg_u64(struct arm_dpm
*dpm
, struct reg
*r
, unsigned regnum
)
232 int retval
= ERROR_FAIL
;
233 uint32_t value_r0
= buf_get_u32(r
->value
, 0, 32);
234 uint32_t value_r1
= buf_get_u32(r
->value
+ 4, 0, 32);
237 case ARM_VFP_V3_D0
... ARM_VFP_V3_D31
:
238 /* write value_r1 to r1 via dcc */
239 retval
= dpm
->instr_write_data_dcc(dpm
,
240 ARMV4_5_MRC(14, 0, 1, 0, 5, 0),
242 if (retval
!= ERROR_OK
)
245 /* write value_r0 to r0 via dcc then,
246 * move to double word register from r0:r1: "vmov vm, r0, r1"
248 retval
= dpm
->instr_write_data_r0(dpm
,
249 ARMV4_5_VMOV(0, 1, 0, ((regnum
- ARM_VFP_V3_D0
) >> 4),
250 ((regnum
- ARM_VFP_V3_D0
) & 0xf)), value_r0
);
257 if (retval
== ERROR_OK
) {
259 LOG_DEBUG("WRITE: %s, %8.8x, %8.8x", r
->name
,
260 (unsigned) value_r0
, (unsigned) value_r1
);
266 /* just write the register -- rely on the core mode being right */
267 static int dpm_write_reg(struct arm_dpm
*dpm
, struct reg
*r
, unsigned regnum
)
270 uint32_t value
= buf_get_u32(r
->value
, 0, 32);
274 /* load register from DCC: "MRC p14, 0, Rnum, c0, c5, 0" */
275 retval
= dpm
->instr_write_data_dcc(dpm
,
276 ARMV4_5_MRC(14, 0, regnum
, 0, 5, 0),
280 * read r0 from DCC; then "MOV pc, r0" */
281 retval
= dpm
->instr_write_data_r0(dpm
, 0xe1a0f000, value
);
283 case ARM_VFP_V3_D0
... ARM_VFP_V3_D31
:
284 return dpm_write_reg_u64(dpm
, r
, regnum
);
285 case ARM_VFP_V3_FPSCR
:
286 /* move to r0 from DCC, then "VMSR FPSCR, r0" */
287 retval
= dpm
->instr_write_data_r0(dpm
,
288 ARMV4_5_VMSR(0), value
);
291 /* 16: read r0 from DCC, then "MSR r0, CPSR_cxsf"
292 * 17: read r0 from DCC, then "MSR r0, SPSR_cxsf"
294 retval
= dpm
->instr_write_data_r0(dpm
,
295 ARMV4_5_MSR_GP(0, 0xf, regnum
& 1),
297 if (retval
!= ERROR_OK
)
300 if (regnum
== 16 && dpm
->instr_cpsr_sync
)
301 retval
= dpm
->instr_cpsr_sync(dpm
);
306 if (retval
== ERROR_OK
) {
308 LOG_DEBUG("WRITE: %s, %8.8x", r
->name
, (unsigned) value
);
315 * Write to program counter and switch the core state (arm/thumb) according to
318 static int dpm_write_pc_core_state(struct arm_dpm
*dpm
, struct reg
*r
)
320 uint32_t value
= buf_get_u32(r
->value
, 0, 32);
322 /* read r0 from DCC; then "BX r0" */
323 return dpm
->instr_write_data_r0(dpm
, ARMV4_5_BX(0), value
);
327 * Read basic registers of the current context: R0 to R15, and CPSR;
328 * sets the core mode (such as USR or IRQ) and state (such as ARM or Thumb).
329 * In normal operation this is called on entry to halting debug state,
330 * possibly after some other operations supporting restore of debug state
331 * or making sure the CPU is fully idle (drain write buffer, etc).
333 int arm_dpm_read_current_registers(struct arm_dpm
*dpm
)
335 struct arm
*arm
= dpm
->arm
;
340 retval
= dpm
->prepare(dpm
);
341 if (retval
!= ERROR_OK
)
344 /* read R0 and R1 first (it's used for scratch), then CPSR */
345 for (unsigned i
= 0; i
< 2; i
++) {
346 r
= arm
->core_cache
->reg_list
+ i
;
348 retval
= arm_dpm_read_reg(dpm
, r
, i
);
349 if (retval
!= ERROR_OK
)
355 retval
= dpm
->instr_read_data_r0(dpm
, ARMV4_5_MRS(0, 0), &cpsr
);
356 if (retval
!= ERROR_OK
)
359 /* update core mode and state, plus shadow mapping for R8..R14 */
360 arm_set_cpsr(arm
, cpsr
);
362 /* REVISIT we can probably avoid reading R1..R14, saving time... */
363 for (unsigned i
= 2; i
< 16; i
++) {
364 r
= arm_reg_current(arm
, i
);
368 retval
= arm_dpm_read_reg(dpm
, r
, i
);
369 if (retval
!= ERROR_OK
)
373 /* NOTE: SPSR ignored (if it's even relevant). */
375 /* REVISIT the debugger can trigger various exceptions. See the
376 * ARMv7A architecture spec, section C5.7, for more info about
377 * what defenses are needed; v6 debug has the most issues.
381 /* (void) */ dpm
->finish(dpm
);
385 /* Avoid needless I/O ... leave breakpoints and watchpoints alone
386 * unless they're removed, or need updating because of single-stepping
387 * or running debugger code.
389 static int dpm_maybe_update_bpwp(struct arm_dpm
*dpm
, bool bpwp
,
390 struct dpm_bpwp
*xp
, bool *set_p
)
392 int retval
= ERROR_OK
;
399 /* removed or startup; we must disable it */
404 /* disabled, but we must set it */
405 xp
->dirty
= disable
= false;
410 /* set, but we must temporarily disable it */
411 xp
->dirty
= disable
= true;
416 retval
= dpm
->bpwp_disable(dpm
, xp
->number
);
418 retval
= dpm
->bpwp_enable(dpm
, xp
->number
,
419 xp
->address
, xp
->control
);
421 if (retval
!= ERROR_OK
)
422 LOG_ERROR("%s: can't %s HW %spoint %d",
423 disable
? "disable" : "enable",
424 target_name(dpm
->arm
->target
),
425 (xp
->number
< 16) ? "break" : "watch",
431 static int dpm_add_breakpoint(struct target
*target
, struct breakpoint
*bp
);
434 * Writes all modified core registers for all processor modes. In normal
435 * operation this is called on exit from halting debug state.
437 * @param dpm: represents the processor
438 * @param bpwp: true ensures breakpoints and watchpoints are set,
439 * false ensures they are cleared
441 int arm_dpm_write_dirty_registers(struct arm_dpm
*dpm
, bool bpwp
)
443 struct arm
*arm
= dpm
->arm
;
444 struct reg_cache
*cache
= arm
->core_cache
;
448 retval
= dpm
->prepare(dpm
);
449 if (retval
!= ERROR_OK
)
452 /* If we're managing hardware breakpoints for this core, enable
453 * or disable them as requested.
455 * REVISIT We don't yet manage them for ANY cores. Eventually
456 * we should be able to assume we handle them; but until then,
457 * cope with the hand-crafted breakpoint code.
459 if (arm
->target
->type
->add_breakpoint
== dpm_add_breakpoint
) {
460 for (unsigned i
= 0; i
< dpm
->nbp
; i
++) {
461 struct dpm_bp
*dbp
= dpm
->dbp
+ i
;
462 struct breakpoint
*bp
= dbp
->bp
;
464 retval
= dpm_maybe_update_bpwp(dpm
, bpwp
, &dbp
->bpwp
,
465 bp
? &bp
->is_set
: NULL
);
466 if (retval
!= ERROR_OK
)
471 /* enable/disable watchpoints */
472 for (unsigned i
= 0; i
< dpm
->nwp
; i
++) {
473 struct dpm_wp
*dwp
= dpm
->dwp
+ i
;
474 struct watchpoint
*wp
= dwp
->wp
;
476 retval
= dpm_maybe_update_bpwp(dpm
, bpwp
, &dwp
->bpwp
,
477 wp
? &wp
->is_set
: NULL
);
478 if (retval
!= ERROR_OK
)
482 /* NOTE: writes to breakpoint and watchpoint registers might
483 * be queued, and need (efficient/batched) flushing later.
486 /* Scan the registers until we find one that's both dirty and
487 * eligible for flushing. Flush that and everything else that
488 * shares the same core mode setting. Typically this won't
489 * actually find anything to do...
492 enum arm_mode mode
= ARM_MODE_ANY
;
496 /* check everything except our scratch registers R0 and R1 */
497 for (unsigned i
= 2; i
< cache
->num_regs
; i
++) {
501 /* also skip PC, CPSR, and non-dirty */
504 if (arm
->cpsr
== cache
->reg_list
+ i
)
506 if (!cache
->reg_list
[i
].exist
|| !cache
->reg_list
[i
].dirty
)
509 r
= cache
->reg_list
[i
].arch_info
;
512 /* may need to pick and set a mode */
517 mode
= tmode
= r
->mode
;
519 /* cope with special cases */
522 /* r8..r12 "anything but FIQ" case;
523 * we "know" core mode is accurate
524 * since we haven't changed it yet
526 if (arm
->core_mode
== ARM_MODE_FIQ
529 tmode
= ARM_MODE_USR
;
537 /* REVISIT error checks */
538 if (tmode
!= ARM_MODE_ANY
) {
539 retval
= arm_dpm_modeswitch(dpm
, tmode
);
540 if (retval
!= ERROR_OK
)
547 retval
= dpm_write_reg(dpm
,
550 if (retval
!= ERROR_OK
)
556 /* Restore original CPSR ... assuming either that we changed it,
557 * or it's dirty. Must write PC to ensure the return address is
558 * defined, and must not write it before CPSR.
560 retval
= arm_dpm_modeswitch(dpm
, ARM_MODE_ANY
);
561 if (retval
!= ERROR_OK
)
563 arm
->cpsr
->dirty
= false;
565 /* restore the PC, make sure to also switch the core state
566 * to whatever it was set to with "arm core_state" command.
567 * target code will have set PC to an appropriate resume address.
569 retval
= dpm_write_pc_core_state(dpm
, arm
->pc
);
570 if (retval
!= ERROR_OK
)
572 /* on Cortex-A5 (as found on NXP VF610 SoC), BX instruction
573 * executed in debug state doesn't appear to set the PC,
574 * explicitly set it with a "MOV pc, r0". This doesn't influence
575 * CPSR on Cortex-A9 so it should be OK. Maybe due to different
578 retval
= dpm_write_reg(dpm
, arm
->pc
, 15);
579 if (retval
!= ERROR_OK
)
581 arm
->pc
->dirty
= false;
583 /* flush R0 and R1 (our scratch registers) */
584 for (unsigned i
= 0; i
< 2; i
++) {
585 retval
= dpm_write_reg(dpm
, &cache
->reg_list
[i
], i
);
586 if (retval
!= ERROR_OK
)
588 cache
->reg_list
[i
].dirty
= false;
591 /* (void) */ dpm
->finish(dpm
);
596 /* Returns ARM_MODE_ANY or temporary mode to use while reading the
597 * specified register ... works around flakiness from ARM core calls.
598 * Caller already filtered out SPSR access; mode is never MODE_SYS
601 static enum arm_mode
dpm_mapmode(struct arm
*arm
,
602 unsigned num
, enum arm_mode mode
)
604 enum arm_mode amode
= arm
->core_mode
;
606 /* don't switch if the mode is already correct */
607 if (amode
== ARM_MODE_SYS
)
608 amode
= ARM_MODE_USR
;
613 /* don't switch for non-shadowed registers (r0..r7, r15/pc, cpsr) */
618 /* r8..r12 aren't shadowed for anything except FIQ */
620 if (mode
== ARM_MODE_FIQ
)
623 /* r13/sp, and r14/lr are always shadowed */
626 case ARM_VFP_V3_D0
... ARM_VFP_V3_FPSCR
:
629 LOG_WARNING("invalid register #%u", num
);
637 * Standard ARM register accessors ... there are three methods
638 * in "struct arm", to support individual read/write and bulk read
642 static int arm_dpm_read_core_reg(struct target
*target
, struct reg
*r
,
643 int regnum
, enum arm_mode mode
)
645 struct arm_dpm
*dpm
= target_to_arm(target
)->dpm
;
648 if (regnum
< 0 || (regnum
> 16 && regnum
< ARM_VFP_V3_D0
) ||
649 (regnum
> ARM_VFP_V3_FPSCR
))
650 return ERROR_COMMAND_SYNTAX_ERROR
;
653 if (mode
!= ARM_MODE_ANY
)
656 mode
= dpm_mapmode(dpm
->arm
, regnum
, mode
);
658 /* REVISIT what happens if we try to read SPSR in a core mode
659 * which has no such register?
662 retval
= dpm
->prepare(dpm
);
663 if (retval
!= ERROR_OK
)
666 if (mode
!= ARM_MODE_ANY
) {
667 retval
= arm_dpm_modeswitch(dpm
, mode
);
668 if (retval
!= ERROR_OK
)
672 retval
= arm_dpm_read_reg(dpm
, r
, regnum
);
673 if (retval
!= ERROR_OK
)
675 /* always clean up, regardless of error */
677 if (mode
!= ARM_MODE_ANY
)
678 /* (void) */ arm_dpm_modeswitch(dpm
, ARM_MODE_ANY
);
681 /* (void) */ dpm
->finish(dpm
);
685 static int arm_dpm_write_core_reg(struct target
*target
, struct reg
*r
,
686 int regnum
, enum arm_mode mode
, uint8_t *value
)
688 struct arm_dpm
*dpm
= target_to_arm(target
)->dpm
;
692 if (regnum
< 0 || (regnum
> 16 && regnum
< ARM_VFP_V3_D0
) ||
693 (regnum
> ARM_VFP_V3_FPSCR
))
694 return ERROR_COMMAND_SYNTAX_ERROR
;
697 if (mode
!= ARM_MODE_ANY
)
700 mode
= dpm_mapmode(dpm
->arm
, regnum
, mode
);
702 /* REVISIT what happens if we try to write SPSR in a core mode
703 * which has no such register?
706 retval
= dpm
->prepare(dpm
);
707 if (retval
!= ERROR_OK
)
710 if (mode
!= ARM_MODE_ANY
) {
711 retval
= arm_dpm_modeswitch(dpm
, mode
);
712 if (retval
!= ERROR_OK
)
716 retval
= dpm_write_reg(dpm
, r
, regnum
);
717 /* always clean up, regardless of error */
719 if (mode
!= ARM_MODE_ANY
)
720 /* (void) */ arm_dpm_modeswitch(dpm
, ARM_MODE_ANY
);
723 /* (void) */ dpm
->finish(dpm
);
727 static int arm_dpm_full_context(struct target
*target
)
729 struct arm
*arm
= target_to_arm(target
);
730 struct arm_dpm
*dpm
= arm
->dpm
;
731 struct reg_cache
*cache
= arm
->core_cache
;
735 retval
= dpm
->prepare(dpm
);
736 if (retval
!= ERROR_OK
)
740 enum arm_mode mode
= ARM_MODE_ANY
;
744 /* We "know" arm_dpm_read_current_registers() was called so
745 * the unmapped registers (R0..R7, PC, AND CPSR) and some
746 * view of R8..R14 are current. We also "know" oddities of
747 * register mapping: special cases for R8..R12 and SPSR.
749 * Pick some mode with unread registers and read them all.
752 for (unsigned i
= 0; i
< cache
->num_regs
; i
++) {
755 if (!cache
->reg_list
[i
].exist
|| cache
->reg_list
[i
].valid
)
757 r
= cache
->reg_list
[i
].arch_info
;
759 /* may need to pick a mode and set CPSR */
764 /* For regular (ARM_MODE_ANY) R8..R12
765 * in case we've entered debug state
766 * in FIQ mode we need to patch mode.
768 if (mode
!= ARM_MODE_ANY
)
769 retval
= arm_dpm_modeswitch(dpm
, mode
);
771 retval
= arm_dpm_modeswitch(dpm
, ARM_MODE_USR
);
773 if (retval
!= ERROR_OK
)
779 /* CPSR was read, so "R16" must mean SPSR */
780 retval
= arm_dpm_read_reg(dpm
,
782 (r
->num
== 16) ? 17 : r
->num
);
783 if (retval
!= ERROR_OK
)
789 retval
= arm_dpm_modeswitch(dpm
, ARM_MODE_ANY
);
790 /* (void) */ dpm
->finish(dpm
);
796 /*----------------------------------------------------------------------*/
799 * Breakpoint and Watchpoint support.
801 * Hardware {break,watch}points are usually left active, to minimize
802 * debug entry/exit costs. When they are set or cleared, it's done in
803 * batches. Also, DPM-conformant hardware can update debug registers
804 * regardless of whether the CPU is running or halted ... though that
805 * fact isn't currently leveraged.
808 static int dpm_bpwp_setup(struct arm_dpm
*dpm
, struct dpm_bpwp
*xp
,
809 uint32_t addr
, uint32_t length
)
813 control
= (1 << 0) /* enable */
814 | (3 << 1); /* both user and privileged access */
816 /* Match 1, 2, or all 4 byte addresses in this word.
818 * FIXME: v7 hardware allows lengths up to 2 GB for BP and WP.
819 * Support larger length, when addr is suitably aligned. In
820 * particular, allow watchpoints on 8 byte "double" values.
822 * REVISIT allow watchpoints on unaligned 2-bit values; and on
823 * v7 hardware, unaligned 4-byte ones too.
827 control
|= (1 << (addr
& 3)) << 5;
830 /* require 2-byte alignment */
832 control
|= (3 << (addr
& 2)) << 5;
837 /* require 4-byte alignment */
844 LOG_ERROR("unsupported {break,watch}point length/alignment");
845 return ERROR_COMMAND_SYNTAX_ERROR
;
848 /* other shared control bits:
849 * bits 15:14 == 0 ... both secure and nonsecure states (v6.1+ only)
850 * bit 20 == 0 ... not linked to a context ID
851 * bit 28:24 == 0 ... not ignoring N LSBs (v7 only)
854 xp
->address
= addr
& ~3;
855 xp
->control
= control
;
858 LOG_DEBUG("BPWP: addr %8.8" PRIx32
", control %" PRIx32
", number %d",
859 xp
->address
, control
, xp
->number
);
861 /* hardware is updated in write_dirty_registers() */
865 static int dpm_add_breakpoint(struct target
*target
, struct breakpoint
*bp
)
867 struct arm
*arm
= target_to_arm(target
);
868 struct arm_dpm
*dpm
= arm
->dpm
;
869 int retval
= ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
872 return ERROR_COMMAND_SYNTAX_ERROR
;
873 if (!dpm
->bpwp_enable
)
876 /* FIXME we need a generic solution for software breakpoints. */
877 if (bp
->type
== BKPT_SOFT
)
878 LOG_DEBUG("using HW bkpt, not SW...");
880 for (unsigned i
= 0; i
< dpm
->nbp
; i
++) {
881 if (!dpm
->dbp
[i
].bp
) {
882 retval
= dpm_bpwp_setup(dpm
, &dpm
->dbp
[i
].bpwp
,
883 bp
->address
, bp
->length
);
884 if (retval
== ERROR_OK
)
893 static int dpm_remove_breakpoint(struct target
*target
, struct breakpoint
*bp
)
895 struct arm
*arm
= target_to_arm(target
);
896 struct arm_dpm
*dpm
= arm
->dpm
;
897 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
899 for (unsigned i
= 0; i
< dpm
->nbp
; i
++) {
900 if (dpm
->dbp
[i
].bp
== bp
) {
901 dpm
->dbp
[i
].bp
= NULL
;
902 dpm
->dbp
[i
].bpwp
.dirty
= true;
904 /* hardware is updated in write_dirty_registers() */
913 static int dpm_watchpoint_setup(struct arm_dpm
*dpm
, unsigned index_t
,
914 struct watchpoint
*wp
)
917 struct dpm_wp
*dwp
= dpm
->dwp
+ index_t
;
920 /* this hardware doesn't support data value matching or masking */
921 if (wp
->value
|| wp
->mask
!= ~(uint32_t)0) {
922 LOG_DEBUG("watchpoint values and masking not supported");
923 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
926 retval
= dpm_bpwp_setup(dpm
, &dwp
->bpwp
, wp
->address
, wp
->length
);
927 if (retval
!= ERROR_OK
)
930 control
= dwp
->bpwp
.control
;
942 dwp
->bpwp
.control
= control
;
944 dpm
->dwp
[index_t
].wp
= wp
;
949 static int dpm_add_watchpoint(struct target
*target
, struct watchpoint
*wp
)
951 struct arm
*arm
= target_to_arm(target
);
952 struct arm_dpm
*dpm
= arm
->dpm
;
953 int retval
= ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
955 if (dpm
->bpwp_enable
) {
956 for (unsigned i
= 0; i
< dpm
->nwp
; i
++) {
957 if (!dpm
->dwp
[i
].wp
) {
958 retval
= dpm_watchpoint_setup(dpm
, i
, wp
);
967 static int dpm_remove_watchpoint(struct target
*target
, struct watchpoint
*wp
)
969 struct arm
*arm
= target_to_arm(target
);
970 struct arm_dpm
*dpm
= arm
->dpm
;
971 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
973 for (unsigned i
= 0; i
< dpm
->nwp
; i
++) {
974 if (dpm
->dwp
[i
].wp
== wp
) {
975 dpm
->dwp
[i
].wp
= NULL
;
976 dpm
->dwp
[i
].bpwp
.dirty
= true;
978 /* hardware is updated in write_dirty_registers() */
987 void arm_dpm_report_wfar(struct arm_dpm
*dpm
, uint32_t addr
)
989 switch (dpm
->arm
->core_state
) {
993 case ARM_STATE_THUMB
:
994 case ARM_STATE_THUMB_EE
:
997 case ARM_STATE_JAZELLE
:
998 case ARM_STATE_AARCH64
:
1002 dpm
->wp_addr
= addr
;
1005 /*----------------------------------------------------------------------*/
1008 * Other debug and support utilities
1011 void arm_dpm_report_dscr(struct arm_dpm
*dpm
, uint32_t dscr
)
1013 struct target
*target
= dpm
->arm
->target
;
1017 /* Examine debug reason */
1018 switch (DSCR_ENTRY(dscr
)) {
1019 case DSCR_ENTRY_HALT_REQ
: /* HALT request from debugger */
1020 case DSCR_ENTRY_EXT_DBG_REQ
: /* EDBGRQ */
1021 target
->debug_reason
= DBG_REASON_DBGRQ
;
1023 case DSCR_ENTRY_BREAKPOINT
: /* HW breakpoint */
1024 case DSCR_ENTRY_BKPT_INSTR
: /* vector catch */
1025 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
1027 case DSCR_ENTRY_IMPRECISE_WATCHPT
: /* asynch watchpoint */
1028 case DSCR_ENTRY_PRECISE_WATCHPT
:/* precise watchpoint */
1029 target
->debug_reason
= DBG_REASON_WATCHPOINT
;
1032 target
->debug_reason
= DBG_REASON_UNDEFINED
;
1037 /*----------------------------------------------------------------------*/
1040 * Setup and management support.
1044 * Hooks up this DPM to its associated target; call only once.
1045 * Initially this only covers the register cache.
1047 * Oh, and watchpoints. Yeah.
1049 int arm_dpm_setup(struct arm_dpm
*dpm
)
1051 struct arm
*arm
= dpm
->arm
;
1052 struct target
*target
= arm
->target
;
1053 struct reg_cache
*cache
= 0;
1057 /* register access setup */
1058 arm
->full_context
= arm_dpm_full_context
;
1059 arm
->read_core_reg
= arm_dpm_read_core_reg
;
1060 arm
->write_core_reg
= arm_dpm_write_core_reg
;
1062 if (!arm
->core_cache
) {
1063 cache
= arm_build_reg_cache(target
, arm
);
1067 *register_get_last_cache_p(&target
->reg_cache
) = cache
;
1070 /* coprocessor access setup */
1074 /* breakpoint setup -- optional until it works everywhere */
1075 if (!target
->type
->add_breakpoint
) {
1076 target
->type
->add_breakpoint
= dpm_add_breakpoint
;
1077 target
->type
->remove_breakpoint
= dpm_remove_breakpoint
;
1080 /* watchpoint setup -- optional until it works everywhere */
1081 if (!target
->type
->add_watchpoint
) {
1082 target
->type
->add_watchpoint
= dpm_add_watchpoint
;
1083 target
->type
->remove_watchpoint
= dpm_remove_watchpoint
;
1086 /* FIXME add vector catch support */
1088 dpm
->nbp
= 1 + ((dpm
->didr
>> 24) & 0xf);
1089 dpm
->nwp
= 1 + ((dpm
->didr
>> 28) & 0xf);
1090 dpm
->dbp
= calloc(dpm
->nbp
, sizeof(*dpm
->dbp
));
1091 dpm
->dwp
= calloc(dpm
->nwp
, sizeof(*dpm
->dwp
));
1093 if (!dpm
->dbp
|| !dpm
->dwp
) {
1094 arm_free_reg_cache(arm
);
1100 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
1101 target_name(target
), dpm
->nbp
, dpm
->nwp
);
1103 /* REVISIT ... and some of those breakpoints could match
1104 * execution context IDs...
1111 * Reinitializes DPM state at the beginning of a new debug session
1112 * or after a reset which may have affected the debug module.
1114 int arm_dpm_initialize(struct arm_dpm
*dpm
)
1116 /* Disable all breakpoints and watchpoints at startup. */
1117 if (dpm
->bpwp_disable
) {
1120 for (i
= 0; i
< dpm
->nbp
; i
++) {
1121 dpm
->dbp
[i
].bpwp
.number
= i
;
1122 (void) dpm
->bpwp_disable(dpm
, i
);
1124 for (i
= 0; i
< dpm
->nwp
; i
++) {
1125 dpm
->dwp
[i
].bpwp
.number
= 16 + i
;
1126 (void) dpm
->bpwp_disable(dpm
, 16 + i
);
1129 LOG_WARNING("%s: can't disable breakpoints and watchpoints",
1130 target_name(dpm
->arm
->target
));