2 * Copyright (C) 2009 by David Brownell
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 #include "armv8_dpm.h"
25 #include <jtag/jtag.h>
27 #include "breakpoints.h"
28 #include "target_type.h"
29 #include "arm_opcodes.h"
34 * Implements various ARM DPM operations using architectural debug registers.
35 * These routines layer over core-specific communication methods to cope with
36 * implementation differences between cores like ARM1136 and Cortex-A8.
38 * The "Debug Programmers' Model" (DPM) for ARMv6 and ARMv7 is defined by
39 * Part C (Debug Architecture) of the ARM Architecture Reference Manual,
40 * ARMv7-A and ARMv7-R edition (ARM DDI 0406B). In OpenOCD, DPM operations
41 * are abstracted through internal programming interfaces to share code and
42 * to minimize needless differences in debug behavior between cores.
45 /*----------------------------------------------------------------------*/
51 /* Read coprocessor */
52 static int dpm_mrc(struct target
*target
, int cpnum
,
53 uint32_t op1
, uint32_t op2
, uint32_t CRn
, uint32_t CRm
,
56 struct arm
*arm
= target_to_arm(target
);
57 struct arm_dpm
*dpm
= arm
->dpm
;
60 retval
= dpm
->prepare(dpm
);
61 if (retval
!= ERROR_OK
)
64 LOG_DEBUG("MRC p%d, %d, r0, c%d, c%d, %d", cpnum
,
66 (int) CRm
, (int) op2
);
68 /* read coprocessor register into R0; return via DCC */
69 retval
= dpm
->instr_read_data_r0(dpm
,
70 ARMV4_5_MRC(cpnum
, op1
, 0, CRn
, CRm
, op2
),
73 /* (void) */ dpm
->finish(dpm
);
77 static int dpm_mcr(struct target
*target
, int cpnum
,
78 uint32_t op1
, uint32_t op2
, uint32_t CRn
, uint32_t CRm
,
81 struct arm
*arm
= target_to_arm(target
);
82 struct arm_dpm
*dpm
= arm
->dpm
;
85 retval
= dpm
->prepare(dpm
);
86 if (retval
!= ERROR_OK
)
89 LOG_DEBUG("MCR p%d, %d, r0, c%d, c%d, %d", cpnum
,
91 (int) CRm
, (int) op2
);
93 /* read DCC into r0; then write coprocessor register from R0 */
94 retval
= dpm
->instr_write_data_r0(dpm
,
95 ARMV4_5_MCR(cpnum
, op1
, 0, CRn
, CRm
, op2
),
98 /* (void) */ dpm
->finish(dpm
);
102 /*----------------------------------------------------------------------*/
105 * Register access utilities
108 /* Toggles between recorded core mode (USR, SVC, etc) and a temporary one.
109 * Routines *must* restore the original mode before returning!!
111 int arm_dpm_modeswitch(struct arm_dpm
*dpm
, enum arm_mode mode
)
116 /* restore previous mode */
117 if (mode
== ARM_MODE_ANY
)
118 cpsr
= buf_get_u32(dpm
->arm
->cpsr
->value
, 0, 32);
120 /* else force to the specified mode */
124 retval
= dpm
->instr_write_data_r0(dpm
, ARMV4_5_MSR_GP(0, 0xf, 0), cpsr
);
125 if (retval
!= ERROR_OK
)
128 if (dpm
->instr_cpsr_sync
)
129 retval
= dpm
->instr_cpsr_sync(dpm
);
134 /* Read 64bit VFP registers */
135 static int dpm_read_reg_u64(struct arm_dpm
*dpm
, struct reg
*r
, unsigned regnum
)
137 int retval
= ERROR_FAIL
;
138 uint32_t value_r0
, value_r1
;
141 case ARM_VFP_V3_D0
... ARM_VFP_V3_D31
:
142 /* move from double word register to r0:r1: "vmov r0, r1, vm"
143 * then read r0 via dcc
145 retval
= dpm
->instr_read_data_r0(dpm
,
146 ARMV4_5_VMOV(1, 1, 0, ((regnum
- ARM_VFP_V3_D0
) >> 4),
147 ((regnum
- ARM_VFP_V3_D0
) & 0xf)), &value_r0
);
148 if (retval
!= ERROR_OK
)
151 /* read r1 via dcc */
152 retval
= dpm
->instr_read_data_dcc(dpm
,
153 ARMV4_5_MCR(14, 0, 1, 0, 5, 0),
161 if (retval
== ERROR_OK
) {
162 buf_set_u32(r
->value
, 0, 32, value_r0
);
163 buf_set_u32(r
->value
+ 4, 0, 32, value_r1
);
166 LOG_DEBUG("READ: %s, %8.8x, %8.8x", r
->name
,
167 (unsigned) value_r0
, (unsigned) value_r1
);
173 /* just read the register -- rely on the core mode being right */
174 int arm_dpm_read_reg(struct arm_dpm
*dpm
, struct reg
*r
, unsigned regnum
)
181 /* return via DCC: "MCR p14, 0, Rnum, c0, c5, 0" */
182 retval
= dpm
->instr_read_data_dcc(dpm
,
183 ARMV4_5_MCR(14, 0, regnum
, 0, 5, 0),
187 * "MOV r0, pc"; then return via DCC */
188 retval
= dpm
->instr_read_data_r0(dpm
, 0xe1a0000f, &value
);
190 /* NOTE: this seems like a slightly awkward place to update
191 * this value ... but if the PC gets written (the only way
192 * to change what we compute), the arch spec says subsequent
193 * reads return values which are "unpredictable". So this
194 * is always right except in those broken-by-intent cases.
196 switch (dpm
->arm
->core_state
) {
200 case ARM_STATE_THUMB
:
201 case ARM_STATE_THUMB_EE
:
204 case ARM_STATE_JAZELLE
:
205 /* core-specific ... ? */
206 LOG_WARNING("Jazelle PC adjustment unknown");
209 LOG_WARNING("unknown core state");
213 case ARM_VFP_V3_D0
... ARM_VFP_V3_D31
:
214 return dpm_read_reg_u64(dpm
, r
, regnum
);
215 case ARM_VFP_V3_FPSCR
:
216 /* "VMRS r0, FPSCR"; then return via DCC */
217 retval
= dpm
->instr_read_data_r0(dpm
,
218 ARMV4_5_VMRS(0), &value
);
221 /* 16: "MRS r0, CPSR"; then return via DCC
222 * 17: "MRS r0, SPSR"; then return via DCC
224 retval
= dpm
->instr_read_data_r0(dpm
,
225 ARMV4_5_MRS(0, regnum
& 1),
230 if (retval
== ERROR_OK
) {
231 buf_set_u32(r
->value
, 0, 32, value
);
234 LOG_DEBUG("READ: %s, %8.8x", r
->name
, (unsigned) value
);
240 /* Write 64bit VFP registers */
241 static int dpm_write_reg_u64(struct arm_dpm
*dpm
, struct reg
*r
, unsigned regnum
)
243 int retval
= ERROR_FAIL
;
244 uint32_t value_r0
= buf_get_u32(r
->value
, 0, 32);
245 uint32_t value_r1
= buf_get_u32(r
->value
+ 4, 0, 32);
248 case ARM_VFP_V3_D0
... ARM_VFP_V3_D31
:
249 /* write value_r1 to r1 via dcc */
250 retval
= dpm
->instr_write_data_dcc(dpm
,
251 ARMV4_5_MRC(14, 0, 1, 0, 5, 0),
253 if (retval
!= ERROR_OK
)
256 /* write value_r0 to r0 via dcc then,
257 * move to double word register from r0:r1: "vmov vm, r0, r1"
259 retval
= dpm
->instr_write_data_r0(dpm
,
260 ARMV4_5_VMOV(0, 1, 0, ((regnum
- ARM_VFP_V3_D0
) >> 4),
261 ((regnum
- ARM_VFP_V3_D0
) & 0xf)), value_r0
);
268 if (retval
== ERROR_OK
) {
270 LOG_DEBUG("WRITE: %s, %8.8x, %8.8x", r
->name
,
271 (unsigned) value_r0
, (unsigned) value_r1
);
277 /* just write the register -- rely on the core mode being right */
278 static int dpm_write_reg(struct arm_dpm
*dpm
, struct reg
*r
, unsigned regnum
)
281 uint32_t value
= buf_get_u32(r
->value
, 0, 32);
285 /* load register from DCC: "MRC p14, 0, Rnum, c0, c5, 0" */
286 retval
= dpm
->instr_write_data_dcc(dpm
,
287 ARMV4_5_MRC(14, 0, regnum
, 0, 5, 0),
291 * read r0 from DCC; then "MOV pc, r0" */
292 retval
= dpm
->instr_write_data_r0(dpm
, 0xe1a0f000, value
);
294 case ARM_VFP_V3_D0
... ARM_VFP_V3_D31
:
295 return dpm_write_reg_u64(dpm
, r
, regnum
);
296 case ARM_VFP_V3_FPSCR
:
297 /* move to r0 from DCC, then "VMSR FPSCR, r0" */
298 retval
= dpm
->instr_write_data_r0(dpm
,
299 ARMV4_5_VMSR(0), value
);
302 /* 16: read r0 from DCC, then "MSR r0, CPSR_cxsf"
303 * 17: read r0 from DCC, then "MSR r0, SPSR_cxsf"
305 retval
= dpm
->instr_write_data_r0(dpm
,
306 ARMV4_5_MSR_GP(0, 0xf, regnum
& 1),
308 if (retval
!= ERROR_OK
)
311 if (regnum
== 16 && dpm
->instr_cpsr_sync
)
312 retval
= dpm
->instr_cpsr_sync(dpm
);
317 if (retval
== ERROR_OK
) {
319 LOG_DEBUG("WRITE: %s, %8.8x", r
->name
, (unsigned) value
);
326 * Write to program counter and switch the core state (arm/thumb) according to
329 static int dpm_write_pc_core_state(struct arm_dpm
*dpm
, struct reg
*r
)
331 uint32_t value
= buf_get_u32(r
->value
, 0, 32);
333 /* read r0 from DCC; then "BX r0" */
334 return dpm
->instr_write_data_r0(dpm
, ARMV4_5_BX(0), value
);
338 * Read basic registers of the current context: R0 to R15, and CPSR;
339 * sets the core mode (such as USR or IRQ) and state (such as ARM or Thumb).
340 * In normal operation this is called on entry to halting debug state,
341 * possibly after some other operations supporting restore of debug state
342 * or making sure the CPU is fully idle (drain write buffer, etc).
344 int arm_dpm_read_current_registers(struct arm_dpm
*dpm
)
346 struct arm
*arm
= dpm
->arm
;
351 retval
= dpm
->prepare(dpm
);
352 if (retval
!= ERROR_OK
)
355 /* read R0 and R1 first (it's used for scratch), then CPSR */
356 for (unsigned i
= 0; i
< 2; i
++) {
357 r
= arm
->core_cache
->reg_list
+ i
;
359 retval
= arm_dpm_read_reg(dpm
, r
, i
);
360 if (retval
!= ERROR_OK
)
366 retval
= dpm
->instr_read_data_r0(dpm
, ARMV4_5_MRS(0, 0), &cpsr
);
367 if (retval
!= ERROR_OK
)
370 /* update core mode and state, plus shadow mapping for R8..R14 */
371 arm_set_cpsr(arm
, cpsr
);
373 /* REVISIT we can probably avoid reading R1..R14, saving time... */
374 for (unsigned i
= 2; i
< 16; i
++) {
375 r
= arm_reg_current(arm
, i
);
379 retval
= arm_dpm_read_reg(dpm
, r
, i
);
380 if (retval
!= ERROR_OK
)
384 /* NOTE: SPSR ignored (if it's even relevant). */
386 /* REVISIT the debugger can trigger various exceptions. See the
387 * ARMv7A architecture spec, section C5.7, for more info about
388 * what defenses are needed; v6 debug has the most issues.
392 /* (void) */ dpm
->finish(dpm
);
396 /* Avoid needless I/O ... leave breakpoints and watchpoints alone
397 * unless they're removed, or need updating because of single-stepping
398 * or running debugger code.
400 static int dpm_maybe_update_bpwp(struct arm_dpm
*dpm
, bool bpwp
,
401 struct dpm_bpwp
*xp
, int *set_p
)
403 int retval
= ERROR_OK
;
410 /* removed or startup; we must disable it */
415 /* disabled, but we must set it */
416 xp
->dirty
= disable
= false;
421 /* set, but we must temporarily disable it */
422 xp
->dirty
= disable
= true;
427 retval
= dpm
->bpwp_disable(dpm
, xp
->number
);
429 retval
= dpm
->bpwp_enable(dpm
, xp
->number
,
430 xp
->address
, xp
->control
);
432 if (retval
!= ERROR_OK
)
433 LOG_ERROR("%s: can't %s HW %spoint %d",
434 disable
? "disable" : "enable",
435 target_name(dpm
->arm
->target
),
436 (xp
->number
< 16) ? "break" : "watch",
442 static int dpm_add_breakpoint(struct target
*target
, struct breakpoint
*bp
);
445 * Writes all modified core registers for all processor modes. In normal
446 * operation this is called on exit from halting debug state.
448 * @param dpm: represents the processor
449 * @param bpwp: true ensures breakpoints and watchpoints are set,
450 * false ensures they are cleared
452 int arm_dpm_write_dirty_registers(struct arm_dpm
*dpm
, bool bpwp
)
454 struct arm
*arm
= dpm
->arm
;
455 struct reg_cache
*cache
= arm
->core_cache
;
459 retval
= dpm
->prepare(dpm
);
460 if (retval
!= ERROR_OK
)
463 /* If we're managing hardware breakpoints for this core, enable
464 * or disable them as requested.
466 * REVISIT We don't yet manage them for ANY cores. Eventually
467 * we should be able to assume we handle them; but until then,
468 * cope with the hand-crafted breakpoint code.
470 if (arm
->target
->type
->add_breakpoint
== dpm_add_breakpoint
) {
471 for (unsigned i
= 0; i
< dpm
->nbp
; i
++) {
472 struct dpm_bp
*dbp
= dpm
->dbp
+ i
;
473 struct breakpoint
*bp
= dbp
->bp
;
475 retval
= dpm_maybe_update_bpwp(dpm
, bpwp
, &dbp
->bpwp
,
476 bp
? &bp
->set
: NULL
);
477 if (retval
!= ERROR_OK
)
482 /* enable/disable watchpoints */
483 for (unsigned i
= 0; i
< dpm
->nwp
; i
++) {
484 struct dpm_wp
*dwp
= dpm
->dwp
+ i
;
485 struct watchpoint
*wp
= dwp
->wp
;
487 retval
= dpm_maybe_update_bpwp(dpm
, bpwp
, &dwp
->bpwp
,
488 wp
? &wp
->set
: NULL
);
489 if (retval
!= ERROR_OK
)
493 /* NOTE: writes to breakpoint and watchpoint registers might
494 * be queued, and need (efficient/batched) flushing later.
497 /* Scan the registers until we find one that's both dirty and
498 * eligible for flushing. Flush that and everything else that
499 * shares the same core mode setting. Typically this won't
500 * actually find anything to do...
503 enum arm_mode mode
= ARM_MODE_ANY
;
507 /* check everything except our scratch registers R0 and R1 */
508 for (unsigned i
= 2; i
< cache
->num_regs
; i
++) {
512 /* also skip PC, CPSR, and non-dirty */
515 if (arm
->cpsr
== cache
->reg_list
+ i
)
517 if (!cache
->reg_list
[i
].exist
|| !cache
->reg_list
[i
].dirty
)
520 r
= cache
->reg_list
[i
].arch_info
;
523 /* may need to pick and set a mode */
528 mode
= tmode
= r
->mode
;
530 /* cope with special cases */
533 /* r8..r12 "anything but FIQ" case;
534 * we "know" core mode is accurate
535 * since we haven't changed it yet
537 if (arm
->core_mode
== ARM_MODE_FIQ
540 tmode
= ARM_MODE_USR
;
548 /* REVISIT error checks */
549 if (tmode
!= ARM_MODE_ANY
) {
550 retval
= arm_dpm_modeswitch(dpm
, tmode
);
551 if (retval
!= ERROR_OK
)
558 retval
= dpm_write_reg(dpm
,
561 if (retval
!= ERROR_OK
)
567 /* Restore original CPSR ... assuming either that we changed it,
568 * or it's dirty. Must write PC to ensure the return address is
569 * defined, and must not write it before CPSR.
571 retval
= arm_dpm_modeswitch(dpm
, ARM_MODE_ANY
);
572 if (retval
!= ERROR_OK
)
574 arm
->cpsr
->dirty
= false;
576 /* restore the PC, make sure to also switch the core state
577 * to whatever it was set to with "arm core_state" command.
578 * target code will have set PC to an appropriate resume address.
580 retval
= dpm_write_pc_core_state(dpm
, arm
->pc
);
581 if (retval
!= ERROR_OK
)
583 /* on Cortex-A5 (as found on NXP VF610 SoC), BX instruction
584 * executed in debug state doesn't appear to set the PC,
585 * explicitly set it with a "MOV pc, r0". This doesn't influence
586 * CPSR on Cortex-A9 so it should be OK. Maybe due to different
589 retval
= dpm_write_reg(dpm
, arm
->pc
, 15);
590 if (retval
!= ERROR_OK
)
592 arm
->pc
->dirty
= false;
594 /* flush R0 and R1 (our scratch registers) */
595 for (unsigned i
= 0; i
< 2; i
++) {
596 retval
= dpm_write_reg(dpm
, &cache
->reg_list
[i
], i
);
597 if (retval
!= ERROR_OK
)
599 cache
->reg_list
[i
].dirty
= false;
602 /* (void) */ dpm
->finish(dpm
);
607 /* Returns ARM_MODE_ANY or temporary mode to use while reading the
608 * specified register ... works around flakiness from ARM core calls.
609 * Caller already filtered out SPSR access; mode is never MODE_SYS
612 static enum arm_mode
dpm_mapmode(struct arm
*arm
,
613 unsigned num
, enum arm_mode mode
)
615 enum arm_mode amode
= arm
->core_mode
;
617 /* don't switch if the mode is already correct */
618 if (amode
== ARM_MODE_SYS
)
619 amode
= ARM_MODE_USR
;
624 /* don't switch for non-shadowed registers (r0..r7, r15/pc, cpsr) */
629 /* r8..r12 aren't shadowed for anything except FIQ */
631 if (mode
== ARM_MODE_FIQ
)
634 /* r13/sp, and r14/lr are always shadowed */
637 case ARM_VFP_V3_D0
... ARM_VFP_V3_FPSCR
:
640 LOG_WARNING("invalid register #%u", num
);
648 * Standard ARM register accessors ... there are three methods
649 * in "struct arm", to support individual read/write and bulk read
653 static int arm_dpm_read_core_reg(struct target
*target
, struct reg
*r
,
654 int regnum
, enum arm_mode mode
)
656 struct arm_dpm
*dpm
= target_to_arm(target
)->dpm
;
659 if (regnum
< 0 || (regnum
> 16 && regnum
< ARM_VFP_V3_D0
) ||
660 (regnum
> ARM_VFP_V3_FPSCR
))
661 return ERROR_COMMAND_SYNTAX_ERROR
;
664 if (mode
!= ARM_MODE_ANY
)
667 mode
= dpm_mapmode(dpm
->arm
, regnum
, mode
);
669 /* REVISIT what happens if we try to read SPSR in a core mode
670 * which has no such register?
673 retval
= dpm
->prepare(dpm
);
674 if (retval
!= ERROR_OK
)
677 if (mode
!= ARM_MODE_ANY
) {
678 retval
= arm_dpm_modeswitch(dpm
, mode
);
679 if (retval
!= ERROR_OK
)
683 retval
= arm_dpm_read_reg(dpm
, r
, regnum
);
684 if (retval
!= ERROR_OK
)
686 /* always clean up, regardless of error */
688 if (mode
!= ARM_MODE_ANY
)
689 /* (void) */ arm_dpm_modeswitch(dpm
, ARM_MODE_ANY
);
692 /* (void) */ dpm
->finish(dpm
);
696 static int arm_dpm_write_core_reg(struct target
*target
, struct reg
*r
,
697 int regnum
, enum arm_mode mode
, uint8_t *value
)
699 struct arm_dpm
*dpm
= target_to_arm(target
)->dpm
;
703 if (regnum
< 0 || (regnum
> 16 && regnum
< ARM_VFP_V3_D0
) ||
704 (regnum
> ARM_VFP_V3_FPSCR
))
705 return ERROR_COMMAND_SYNTAX_ERROR
;
708 if (mode
!= ARM_MODE_ANY
)
711 mode
= dpm_mapmode(dpm
->arm
, regnum
, mode
);
713 /* REVISIT what happens if we try to write SPSR in a core mode
714 * which has no such register?
717 retval
= dpm
->prepare(dpm
);
718 if (retval
!= ERROR_OK
)
721 if (mode
!= ARM_MODE_ANY
) {
722 retval
= arm_dpm_modeswitch(dpm
, mode
);
723 if (retval
!= ERROR_OK
)
727 retval
= dpm_write_reg(dpm
, r
, regnum
);
728 /* always clean up, regardless of error */
730 if (mode
!= ARM_MODE_ANY
)
731 /* (void) */ arm_dpm_modeswitch(dpm
, ARM_MODE_ANY
);
734 /* (void) */ dpm
->finish(dpm
);
738 static int arm_dpm_full_context(struct target
*target
)
740 struct arm
*arm
= target_to_arm(target
);
741 struct arm_dpm
*dpm
= arm
->dpm
;
742 struct reg_cache
*cache
= arm
->core_cache
;
746 retval
= dpm
->prepare(dpm
);
747 if (retval
!= ERROR_OK
)
751 enum arm_mode mode
= ARM_MODE_ANY
;
755 /* We "know" arm_dpm_read_current_registers() was called so
756 * the unmapped registers (R0..R7, PC, AND CPSR) and some
757 * view of R8..R14 are current. We also "know" oddities of
758 * register mapping: special cases for R8..R12 and SPSR.
760 * Pick some mode with unread registers and read them all.
763 for (unsigned i
= 0; i
< cache
->num_regs
; i
++) {
766 if (!cache
->reg_list
[i
].exist
|| cache
->reg_list
[i
].valid
)
768 r
= cache
->reg_list
[i
].arch_info
;
770 /* may need to pick a mode and set CPSR */
775 /* For regular (ARM_MODE_ANY) R8..R12
776 * in case we've entered debug state
777 * in FIQ mode we need to patch mode.
779 if (mode
!= ARM_MODE_ANY
)
780 retval
= arm_dpm_modeswitch(dpm
, mode
);
782 retval
= arm_dpm_modeswitch(dpm
, ARM_MODE_USR
);
784 if (retval
!= ERROR_OK
)
790 /* CPSR was read, so "R16" must mean SPSR */
791 retval
= arm_dpm_read_reg(dpm
,
793 (r
->num
== 16) ? 17 : r
->num
);
794 if (retval
!= ERROR_OK
)
800 retval
= arm_dpm_modeswitch(dpm
, ARM_MODE_ANY
);
801 /* (void) */ dpm
->finish(dpm
);
807 /*----------------------------------------------------------------------*/
810 * Breakpoint and Watchpoint support.
812 * Hardware {break,watch}points are usually left active, to minimize
813 * debug entry/exit costs. When they are set or cleared, it's done in
814 * batches. Also, DPM-conformant hardware can update debug registers
815 * regardless of whether the CPU is running or halted ... though that
816 * fact isn't currently leveraged.
819 static int dpm_bpwp_setup(struct arm_dpm
*dpm
, struct dpm_bpwp
*xp
,
820 uint32_t addr
, uint32_t length
)
824 control
= (1 << 0) /* enable */
825 | (3 << 1); /* both user and privileged access */
827 /* Match 1, 2, or all 4 byte addresses in this word.
829 * FIXME: v7 hardware allows lengths up to 2 GB for BP and WP.
830 * Support larger length, when addr is suitably aligned. In
831 * particular, allow watchpoints on 8 byte "double" values.
833 * REVISIT allow watchpoints on unaligned 2-bit values; and on
834 * v7 hardware, unaligned 4-byte ones too.
838 control
|= (1 << (addr
& 3)) << 5;
841 /* require 2-byte alignment */
843 control
|= (3 << (addr
& 2)) << 5;
848 /* require 4-byte alignment */
855 LOG_ERROR("unsupported {break,watch}point length/alignment");
856 return ERROR_COMMAND_SYNTAX_ERROR
;
859 /* other shared control bits:
860 * bits 15:14 == 0 ... both secure and nonsecure states (v6.1+ only)
861 * bit 20 == 0 ... not linked to a context ID
862 * bit 28:24 == 0 ... not ignoring N LSBs (v7 only)
865 xp
->address
= addr
& ~3;
866 xp
->control
= control
;
869 LOG_DEBUG("BPWP: addr %8.8" PRIx32
", control %" PRIx32
", number %d",
870 xp
->address
, control
, xp
->number
);
872 /* hardware is updated in write_dirty_registers() */
876 static int dpm_add_breakpoint(struct target
*target
, struct breakpoint
*bp
)
878 struct arm
*arm
= target_to_arm(target
);
879 struct arm_dpm
*dpm
= arm
->dpm
;
880 int retval
= ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
883 return ERROR_COMMAND_SYNTAX_ERROR
;
884 if (!dpm
->bpwp_enable
)
887 /* FIXME we need a generic solution for software breakpoints. */
888 if (bp
->type
== BKPT_SOFT
)
889 LOG_DEBUG("using HW bkpt, not SW...");
891 for (unsigned i
= 0; i
< dpm
->nbp
; i
++) {
892 if (!dpm
->dbp
[i
].bp
) {
893 retval
= dpm_bpwp_setup(dpm
, &dpm
->dbp
[i
].bpwp
,
894 bp
->address
, bp
->length
);
895 if (retval
== ERROR_OK
)
904 static int dpm_remove_breakpoint(struct target
*target
, struct breakpoint
*bp
)
906 struct arm
*arm
= target_to_arm(target
);
907 struct arm_dpm
*dpm
= arm
->dpm
;
908 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
910 for (unsigned i
= 0; i
< dpm
->nbp
; i
++) {
911 if (dpm
->dbp
[i
].bp
== bp
) {
912 dpm
->dbp
[i
].bp
= NULL
;
913 dpm
->dbp
[i
].bpwp
.dirty
= true;
915 /* hardware is updated in write_dirty_registers() */
924 static int dpm_watchpoint_setup(struct arm_dpm
*dpm
, unsigned index_t
,
925 struct watchpoint
*wp
)
928 struct dpm_wp
*dwp
= dpm
->dwp
+ index_t
;
931 /* this hardware doesn't support data value matching or masking */
932 if (wp
->value
|| wp
->mask
!= ~(uint32_t)0) {
933 LOG_DEBUG("watchpoint values and masking not supported");
934 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
937 retval
= dpm_bpwp_setup(dpm
, &dwp
->bpwp
, wp
->address
, wp
->length
);
938 if (retval
!= ERROR_OK
)
941 control
= dwp
->bpwp
.control
;
953 dwp
->bpwp
.control
= control
;
955 dpm
->dwp
[index_t
].wp
= wp
;
960 static int dpm_add_watchpoint(struct target
*target
, struct watchpoint
*wp
)
962 struct arm
*arm
= target_to_arm(target
);
963 struct arm_dpm
*dpm
= arm
->dpm
;
964 int retval
= ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
966 if (dpm
->bpwp_enable
) {
967 for (unsigned i
= 0; i
< dpm
->nwp
; i
++) {
968 if (!dpm
->dwp
[i
].wp
) {
969 retval
= dpm_watchpoint_setup(dpm
, i
, wp
);
978 static int dpm_remove_watchpoint(struct target
*target
, struct watchpoint
*wp
)
980 struct arm
*arm
= target_to_arm(target
);
981 struct arm_dpm
*dpm
= arm
->dpm
;
982 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
984 for (unsigned i
= 0; i
< dpm
->nwp
; i
++) {
985 if (dpm
->dwp
[i
].wp
== wp
) {
986 dpm
->dwp
[i
].wp
= NULL
;
987 dpm
->dwp
[i
].bpwp
.dirty
= true;
989 /* hardware is updated in write_dirty_registers() */
998 void arm_dpm_report_wfar(struct arm_dpm
*dpm
, uint32_t addr
)
1000 switch (dpm
->arm
->core_state
) {
1004 case ARM_STATE_THUMB
:
1005 case ARM_STATE_THUMB_EE
:
1008 case ARM_STATE_JAZELLE
:
1009 case ARM_STATE_AARCH64
:
1013 dpm
->wp_addr
= addr
;
1016 /*----------------------------------------------------------------------*/
1019 * Other debug and support utilities
1022 void arm_dpm_report_dscr(struct arm_dpm
*dpm
, uint32_t dscr
)
1024 struct target
*target
= dpm
->arm
->target
;
1028 /* Examine debug reason */
1029 switch (DSCR_ENTRY(dscr
)) {
1030 case DSCR_ENTRY_HALT_REQ
: /* HALT request from debugger */
1031 case DSCR_ENTRY_EXT_DBG_REQ
: /* EDBGRQ */
1032 target
->debug_reason
= DBG_REASON_DBGRQ
;
1034 case DSCR_ENTRY_BREAKPOINT
: /* HW breakpoint */
1035 case DSCR_ENTRY_BKPT_INSTR
: /* vector catch */
1036 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
1038 case DSCR_ENTRY_IMPRECISE_WATCHPT
: /* asynch watchpoint */
1039 case DSCR_ENTRY_PRECISE_WATCHPT
:/* precise watchpoint */
1040 target
->debug_reason
= DBG_REASON_WATCHPOINT
;
1043 target
->debug_reason
= DBG_REASON_UNDEFINED
;
1048 /*----------------------------------------------------------------------*/
1051 * Setup and management support.
1055 * Hooks up this DPM to its associated target; call only once.
1056 * Initially this only covers the register cache.
1058 * Oh, and watchpoints. Yeah.
1060 int arm_dpm_setup(struct arm_dpm
*dpm
)
1062 struct arm
*arm
= dpm
->arm
;
1063 struct target
*target
= arm
->target
;
1064 struct reg_cache
*cache
= 0;
1068 /* register access setup */
1069 arm
->full_context
= arm_dpm_full_context
;
1070 arm
->read_core_reg
= arm_dpm_read_core_reg
;
1071 arm
->write_core_reg
= arm_dpm_write_core_reg
;
1073 if (arm
->core_cache
== NULL
) {
1074 cache
= arm_build_reg_cache(target
, arm
);
1078 *register_get_last_cache_p(&target
->reg_cache
) = cache
;
1081 /* coprocessor access setup */
1085 /* breakpoint setup -- optional until it works everywhere */
1086 if (!target
->type
->add_breakpoint
) {
1087 target
->type
->add_breakpoint
= dpm_add_breakpoint
;
1088 target
->type
->remove_breakpoint
= dpm_remove_breakpoint
;
1091 /* watchpoint setup -- optional until it works everywhere */
1092 if (!target
->type
->add_watchpoint
) {
1093 target
->type
->add_watchpoint
= dpm_add_watchpoint
;
1094 target
->type
->remove_watchpoint
= dpm_remove_watchpoint
;
1097 /* FIXME add vector catch support */
1099 dpm
->nbp
= 1 + ((dpm
->didr
>> 24) & 0xf);
1100 dpm
->nwp
= 1 + ((dpm
->didr
>> 28) & 0xf);
1101 dpm
->dbp
= calloc(dpm
->nbp
, sizeof(*dpm
->dbp
));
1102 dpm
->dwp
= calloc(dpm
->nwp
, sizeof(*dpm
->dwp
));
1104 if (!dpm
->dbp
|| !dpm
->dwp
) {
1105 arm_free_reg_cache(arm
);
1111 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
1112 target_name(target
), dpm
->nbp
, dpm
->nwp
);
1114 /* REVISIT ... and some of those breakpoints could match
1115 * execution context IDs...
1122 * Reinitializes DPM state at the beginning of a new debug session
1123 * or after a reset which may have affected the debug module.
1125 int arm_dpm_initialize(struct arm_dpm
*dpm
)
1127 /* Disable all breakpoints and watchpoints at startup. */
1128 if (dpm
->bpwp_disable
) {
1131 for (i
= 0; i
< dpm
->nbp
; i
++) {
1132 dpm
->dbp
[i
].bpwp
.number
= i
;
1133 (void) dpm
->bpwp_disable(dpm
, i
);
1135 for (i
= 0; i
< dpm
->nwp
; i
++) {
1136 dpm
->dwp
[i
].bpwp
.number
= 16 + i
;
1137 (void) dpm
->bpwp_disable(dpm
, 16 + i
);
1140 LOG_WARNING("%s: can't disable breakpoints and watchpoints",
1141 target_name(dpm
->arm
->target
));