2 * Copyright (C) 2009 by David Brownell
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 #include "armv8_dpm.h"
25 #include <jtag/jtag.h>
27 #include "breakpoints.h"
28 #include "target_type.h"
29 #include "arm_opcodes.h"
34 * Implements various ARM DPM operations using architectural debug registers.
35 * These routines layer over core-specific communication methods to cope with
36 * implementation differences between cores like ARM1136 and Cortex-A8.
38 * The "Debug Programmers' Model" (DPM) for ARMv6 and ARMv7 is defined by
39 * Part C (Debug Architecture) of the ARM Architecture Reference Manual,
40 * ARMv7-A and ARMv7-R edition (ARM DDI 0406B). In OpenOCD, DPM operations
41 * are abstracted through internal programming interfaces to share code and
42 * to minimize needless differences in debug behavior between cores.
45 /*----------------------------------------------------------------------*/
51 /* Read coprocessor */
52 static int dpm_mrc(struct target
*target
, int cpnum
,
53 uint32_t op1
, uint32_t op2
, uint32_t CRn
, uint32_t CRm
,
56 struct arm
*arm
= target_to_arm(target
);
57 struct arm_dpm
*dpm
= arm
->dpm
;
60 retval
= dpm
->prepare(dpm
);
61 if (retval
!= ERROR_OK
)
64 LOG_DEBUG("MRC p%d, %d, r0, c%d, c%d, %d", cpnum
,
66 (int) CRm
, (int) op2
);
68 /* read coprocessor register into R0; return via DCC */
69 retval
= dpm
->instr_read_data_r0(dpm
,
70 ARMV4_5_MRC(cpnum
, op1
, 0, CRn
, CRm
, op2
),
73 /* (void) */ dpm
->finish(dpm
);
77 static int dpm_mcr(struct target
*target
, int cpnum
,
78 uint32_t op1
, uint32_t op2
, uint32_t CRn
, uint32_t CRm
,
81 struct arm
*arm
= target_to_arm(target
);
82 struct arm_dpm
*dpm
= arm
->dpm
;
85 retval
= dpm
->prepare(dpm
);
86 if (retval
!= ERROR_OK
)
89 LOG_DEBUG("MCR p%d, %d, r0, c%d, c%d, %d", cpnum
,
91 (int) CRm
, (int) op2
);
93 /* read DCC into r0; then write coprocessor register from R0 */
94 retval
= dpm
->instr_write_data_r0(dpm
,
95 ARMV4_5_MCR(cpnum
, op1
, 0, CRn
, CRm
, op2
),
98 /* (void) */ dpm
->finish(dpm
);
102 /*----------------------------------------------------------------------*/
105 * Register access utilities
108 /* Toggles between recorded core mode (USR, SVC, etc) and a temporary one.
109 * Routines *must* restore the original mode before returning!!
111 int arm_dpm_modeswitch(struct arm_dpm
*dpm
, enum arm_mode mode
)
116 /* restore previous mode */
117 if (mode
== ARM_MODE_ANY
)
118 cpsr
= buf_get_u32(dpm
->arm
->cpsr
->value
, 0, 32);
120 /* else force to the specified mode */
124 retval
= dpm
->instr_write_data_r0(dpm
, ARMV4_5_MSR_GP(0, 0xf, 0), cpsr
);
125 if (retval
!= ERROR_OK
)
128 if (dpm
->instr_cpsr_sync
)
129 retval
= dpm
->instr_cpsr_sync(dpm
);
134 /* Read 64bit VFP registers */
135 static int dpm_read_reg_u64(struct arm_dpm
*dpm
, struct reg
*r
, unsigned regnum
)
137 int retval
= ERROR_FAIL
;
138 uint32_t value_r0
, value_r1
;
141 case ARM_VFP_V3_D0
... ARM_VFP_V3_D31
:
142 /* move from double word register to r0:r1: "vmov r0, r1, vm"
143 * then read r0 via dcc
145 retval
= dpm
->instr_read_data_r0(dpm
,
146 ARMV4_5_VMOV(1, 1, 0, ((regnum
- ARM_VFP_V3_D0
) >> 4),
147 ((regnum
- ARM_VFP_V3_D0
) & 0xf)), &value_r0
);
148 /* read r1 via dcc */
149 retval
= dpm
->instr_read_data_dcc(dpm
,
150 ARMV4_5_MCR(14, 0, 1, 0, 5, 0),
158 if (retval
== ERROR_OK
) {
159 buf_set_u32(r
->value
, 0, 32, value_r0
);
160 buf_set_u32(r
->value
+ 4, 0, 32, value_r1
);
163 LOG_DEBUG("READ: %s, %8.8x, %8.8x", r
->name
,
164 (unsigned) value_r0
, (unsigned) value_r1
);
170 /* just read the register -- rely on the core mode being right */
171 int arm_dpm_read_reg(struct arm_dpm
*dpm
, struct reg
*r
, unsigned regnum
)
178 /* return via DCC: "MCR p14, 0, Rnum, c0, c5, 0" */
179 retval
= dpm
->instr_read_data_dcc(dpm
,
180 ARMV4_5_MCR(14, 0, regnum
, 0, 5, 0),
184 * "MOV r0, pc"; then return via DCC */
185 retval
= dpm
->instr_read_data_r0(dpm
, 0xe1a0000f, &value
);
187 /* NOTE: this seems like a slightly awkward place to update
188 * this value ... but if the PC gets written (the only way
189 * to change what we compute), the arch spec says subsequent
190 * reads return values which are "unpredictable". So this
191 * is always right except in those broken-by-intent cases.
193 switch (dpm
->arm
->core_state
) {
197 case ARM_STATE_THUMB
:
198 case ARM_STATE_THUMB_EE
:
201 case ARM_STATE_JAZELLE
:
202 /* core-specific ... ? */
203 LOG_WARNING("Jazelle PC adjustment unknown");
206 LOG_WARNING("unknow core state");
210 case ARM_VFP_V3_D0
... ARM_VFP_V3_D31
:
211 return dpm_read_reg_u64(dpm
, r
, regnum
);
213 case ARM_VFP_V3_FPSCR
:
214 /* "VMRS r0, FPSCR"; then return via DCC */
215 retval
= dpm
->instr_read_data_r0(dpm
,
216 ARMV4_5_VMRS(0), &value
);
219 /* 16: "MRS r0, CPSR"; then return via DCC
220 * 17: "MRS r0, SPSR"; then return via DCC
222 retval
= dpm
->instr_read_data_r0(dpm
,
223 ARMV4_5_MRS(0, regnum
& 1),
228 if (retval
== ERROR_OK
) {
229 buf_set_u32(r
->value
, 0, 32, value
);
232 LOG_DEBUG("READ: %s, %8.8x", r
->name
, (unsigned) value
);
238 /* Write 64bit VFP registers */
239 static int dpm_write_reg_u64(struct arm_dpm
*dpm
, struct reg
*r
, unsigned regnum
)
241 int retval
= ERROR_FAIL
;
242 uint32_t value_r0
= buf_get_u32(r
->value
, 0, 32);
243 uint32_t value_r1
= buf_get_u32(r
->value
+ 4, 0, 32);
246 case ARM_VFP_V3_D0
... ARM_VFP_V3_D31
:
247 /* write value_r1 to r1 via dcc */
248 retval
= dpm
->instr_write_data_dcc(dpm
,
249 ARMV4_5_MRC(14, 0, 1, 0, 5, 0),
251 /* write value_r0 to r0 via dcc then,
252 * move to double word register from r0:r1: "vmov vm, r0, r1"
254 retval
= dpm
->instr_write_data_r0(dpm
,
255 ARMV4_5_VMOV(0, 1, 0, ((regnum
- ARM_VFP_V3_D0
) >> 4),
256 ((regnum
- ARM_VFP_V3_D0
) & 0xf)), value_r0
);
263 if (retval
== ERROR_OK
) {
265 LOG_DEBUG("WRITE: %s, %8.8x, %8.8x", r
->name
,
266 (unsigned) value_r0
, (unsigned) value_r1
);
272 /* just write the register -- rely on the core mode being right */
273 static int dpm_write_reg(struct arm_dpm
*dpm
, struct reg
*r
, unsigned regnum
)
276 uint32_t value
= buf_get_u32(r
->value
, 0, 32);
280 /* load register from DCC: "MRC p14, 0, Rnum, c0, c5, 0" */
281 retval
= dpm
->instr_write_data_dcc(dpm
,
282 ARMV4_5_MRC(14, 0, regnum
, 0, 5, 0),
286 * read r0 from DCC; then "MOV pc, r0" */
287 retval
= dpm
->instr_write_data_r0(dpm
, 0xe1a0f000, value
);
289 case ARM_VFP_V3_D0
... ARM_VFP_V3_D31
:
290 return dpm_write_reg_u64(dpm
, r
, regnum
);
292 case ARM_VFP_V3_FPSCR
:
293 /* move to r0 from DCC, then "VMSR FPSCR, r0" */
294 retval
= dpm
->instr_write_data_r0(dpm
,
295 ARMV4_5_VMSR(0), value
);
298 /* 16: read r0 from DCC, then "MSR r0, CPSR_cxsf"
299 * 17: read r0 from DCC, then "MSR r0, SPSR_cxsf"
301 retval
= dpm
->instr_write_data_r0(dpm
,
302 ARMV4_5_MSR_GP(0, 0xf, regnum
& 1),
304 if (retval
!= ERROR_OK
)
307 if (regnum
== 16 && dpm
->instr_cpsr_sync
)
308 retval
= dpm
->instr_cpsr_sync(dpm
);
313 if (retval
== ERROR_OK
) {
315 LOG_DEBUG("WRITE: %s, %8.8x", r
->name
, (unsigned) value
);
322 * Write to program counter and switch the core state (arm/thumb) according to
325 static int dpm_write_pc_core_state(struct arm_dpm
*dpm
, struct reg
*r
)
327 uint32_t value
= buf_get_u32(r
->value
, 0, 32);
329 /* read r0 from DCC; then "BX r0" */
330 return dpm
->instr_write_data_r0(dpm
, ARMV4_5_BX(0), value
);
334 * Read basic registers of the the current context: R0 to R15, and CPSR;
335 * sets the core mode (such as USR or IRQ) and state (such as ARM or Thumb).
336 * In normal operation this is called on entry to halting debug state,
337 * possibly after some other operations supporting restore of debug state
338 * or making sure the CPU is fully idle (drain write buffer, etc).
340 int arm_dpm_read_current_registers(struct arm_dpm
*dpm
)
342 struct arm
*arm
= dpm
->arm
;
347 retval
= dpm
->prepare(dpm
);
348 if (retval
!= ERROR_OK
)
351 /* read R0 and R1 first (it's used for scratch), then CPSR */
352 for (unsigned i
= 0; i
< 2; i
++) {
353 r
= arm
->core_cache
->reg_list
+ i
;
355 retval
= arm_dpm_read_reg(dpm
, r
, i
);
356 if (retval
!= ERROR_OK
)
362 retval
= dpm
->instr_read_data_r0(dpm
, ARMV4_5_MRS(0, 0), &cpsr
);
363 if (retval
!= ERROR_OK
)
366 /* update core mode and state, plus shadow mapping for R8..R14 */
367 arm_set_cpsr(arm
, cpsr
);
369 /* REVISIT we can probably avoid reading R1..R14, saving time... */
370 for (unsigned i
= 2; i
< 16; i
++) {
371 r
= arm_reg_current(arm
, i
);
375 retval
= arm_dpm_read_reg(dpm
, r
, i
);
376 if (retval
!= ERROR_OK
)
380 /* NOTE: SPSR ignored (if it's even relevant). */
382 /* REVISIT the debugger can trigger various exceptions. See the
383 * ARMv7A architecture spec, section C5.7, for more info about
384 * what defenses are needed; v6 debug has the most issues.
388 /* (void) */ dpm
->finish(dpm
);
392 /* Avoid needless I/O ... leave breakpoints and watchpoints alone
393 * unless they're removed, or need updating because of single-stepping
394 * or running debugger code.
396 static int dpm_maybe_update_bpwp(struct arm_dpm
*dpm
, bool bpwp
,
397 struct dpm_bpwp
*xp
, int *set_p
)
399 int retval
= ERROR_OK
;
406 /* removed or startup; we must disable it */
411 /* disabled, but we must set it */
412 xp
->dirty
= disable
= false;
417 /* set, but we must temporarily disable it */
418 xp
->dirty
= disable
= true;
423 retval
= dpm
->bpwp_disable(dpm
, xp
->number
);
425 retval
= dpm
->bpwp_enable(dpm
, xp
->number
,
426 xp
->address
, xp
->control
);
428 if (retval
!= ERROR_OK
)
429 LOG_ERROR("%s: can't %s HW %spoint %d",
430 disable
? "disable" : "enable",
431 target_name(dpm
->arm
->target
),
432 (xp
->number
< 16) ? "break" : "watch",
438 static int dpm_add_breakpoint(struct target
*target
, struct breakpoint
*bp
);
441 * Writes all modified core registers for all processor modes. In normal
442 * operation this is called on exit from halting debug state.
444 * @param dpm: represents the processor
445 * @param bpwp: true ensures breakpoints and watchpoints are set,
446 * false ensures they are cleared
448 int arm_dpm_write_dirty_registers(struct arm_dpm
*dpm
, bool bpwp
)
450 struct arm
*arm
= dpm
->arm
;
451 struct reg_cache
*cache
= arm
->core_cache
;
455 retval
= dpm
->prepare(dpm
);
456 if (retval
!= ERROR_OK
)
459 /* If we're managing hardware breakpoints for this core, enable
460 * or disable them as requested.
462 * REVISIT We don't yet manage them for ANY cores. Eventually
463 * we should be able to assume we handle them; but until then,
464 * cope with the hand-crafted breakpoint code.
466 if (arm
->target
->type
->add_breakpoint
== dpm_add_breakpoint
) {
467 for (unsigned i
= 0; i
< dpm
->nbp
; i
++) {
468 struct dpm_bp
*dbp
= dpm
->dbp
+ i
;
469 struct breakpoint
*bp
= dbp
->bp
;
471 retval
= dpm_maybe_update_bpwp(dpm
, bpwp
, &dbp
->bpwp
,
472 bp
? &bp
->set
: NULL
);
473 if (retval
!= ERROR_OK
)
478 /* enable/disable watchpoints */
479 for (unsigned i
= 0; i
< dpm
->nwp
; i
++) {
480 struct dpm_wp
*dwp
= dpm
->dwp
+ i
;
481 struct watchpoint
*wp
= dwp
->wp
;
483 retval
= dpm_maybe_update_bpwp(dpm
, bpwp
, &dwp
->bpwp
,
484 wp
? &wp
->set
: NULL
);
485 if (retval
!= ERROR_OK
)
489 /* NOTE: writes to breakpoint and watchpoint registers might
490 * be queued, and need (efficient/batched) flushing later.
493 /* Scan the registers until we find one that's both dirty and
494 * eligible for flushing. Flush that and everything else that
495 * shares the same core mode setting. Typically this won't
496 * actually find anything to do...
499 enum arm_mode mode
= ARM_MODE_ANY
;
503 /* check everything except our scratch registers R0 and R1 */
504 for (unsigned i
= 2; i
< cache
->num_regs
; i
++) {
508 /* also skip PC, CPSR, and non-dirty */
511 if (arm
->cpsr
== cache
->reg_list
+ i
)
513 if (!cache
->reg_list
[i
].dirty
)
516 r
= cache
->reg_list
[i
].arch_info
;
519 /* may need to pick and set a mode */
524 mode
= tmode
= r
->mode
;
526 /* cope with special cases */
529 /* r8..r12 "anything but FIQ" case;
530 * we "know" core mode is accurate
531 * since we haven't changed it yet
533 if (arm
->core_mode
== ARM_MODE_FIQ
536 tmode
= ARM_MODE_USR
;
544 /* REVISIT error checks */
545 if (tmode
!= ARM_MODE_ANY
) {
546 retval
= arm_dpm_modeswitch(dpm
, tmode
);
547 if (retval
!= ERROR_OK
)
554 retval
= dpm_write_reg(dpm
,
557 if (retval
!= ERROR_OK
)
563 /* Restore original CPSR ... assuming either that we changed it,
564 * or it's dirty. Must write PC to ensure the return address is
565 * defined, and must not write it before CPSR.
567 retval
= arm_dpm_modeswitch(dpm
, ARM_MODE_ANY
);
568 if (retval
!= ERROR_OK
)
570 arm
->cpsr
->dirty
= false;
572 /* restore the PC, make sure to also switch the core state
573 * to whatever it was set to with "arm core_state" command.
574 * target code will have set PC to an appropriate resume address.
576 retval
= dpm_write_pc_core_state(dpm
, arm
->pc
);
577 if (retval
!= ERROR_OK
)
579 /* on Cortex-A5 (as found on NXP VF610 SoC), BX instruction
580 * executed in debug state doesn't appear to set the PC,
581 * explicitly set it with a "MOV pc, r0". This doesn't influence
582 * CPSR on Cortex-A9 so it should be OK. Maybe due to different
585 retval
= dpm_write_reg(dpm
, arm
->pc
, 15);
586 if (retval
!= ERROR_OK
)
588 arm
->pc
->dirty
= false;
590 /* flush R0 and R1 (our scratch registers) */
591 for (unsigned i
= 0; i
< 2; i
++) {
592 retval
= dpm_write_reg(dpm
, &cache
->reg_list
[i
], i
);
593 if (retval
!= ERROR_OK
)
595 cache
->reg_list
[i
].dirty
= false;
598 /* (void) */ dpm
->finish(dpm
);
603 /* Returns ARM_MODE_ANY or temporary mode to use while reading the
604 * specified register ... works around flakiness from ARM core calls.
605 * Caller already filtered out SPSR access; mode is never MODE_SYS
608 static enum arm_mode
dpm_mapmode(struct arm
*arm
,
609 unsigned num
, enum arm_mode mode
)
611 enum arm_mode amode
= arm
->core_mode
;
613 /* don't switch if the mode is already correct */
614 if (amode
== ARM_MODE_SYS
)
615 amode
= ARM_MODE_USR
;
620 /* don't switch for non-shadowed registers (r0..r7, r15/pc, cpsr) */
625 /* r8..r12 aren't shadowed for anything except FIQ */
627 if (mode
== ARM_MODE_FIQ
)
630 /* r13/sp, and r14/lr are always shadowed */
633 case ARM_VFP_V3_D0
... ARM_VFP_V3_FPSCR
:
636 LOG_WARNING("invalid register #%u", num
);
644 * Standard ARM register accessors ... there are three methods
645 * in "struct arm", to support individual read/write and bulk read
649 static int arm_dpm_read_core_reg(struct target
*target
, struct reg
*r
,
650 int regnum
, enum arm_mode mode
)
652 struct arm_dpm
*dpm
= target_to_arm(target
)->dpm
;
655 if (regnum
< 0 || (regnum
> 16 && regnum
< ARM_VFP_V3_D0
) ||
656 (regnum
> ARM_VFP_V3_FPSCR
))
657 return ERROR_COMMAND_SYNTAX_ERROR
;
660 if (mode
!= ARM_MODE_ANY
)
663 mode
= dpm_mapmode(dpm
->arm
, regnum
, mode
);
665 /* REVISIT what happens if we try to read SPSR in a core mode
666 * which has no such register?
669 retval
= dpm
->prepare(dpm
);
670 if (retval
!= ERROR_OK
)
673 if (mode
!= ARM_MODE_ANY
) {
674 retval
= arm_dpm_modeswitch(dpm
, mode
);
675 if (retval
!= ERROR_OK
)
679 retval
= arm_dpm_read_reg(dpm
, r
, regnum
);
680 if (retval
!= ERROR_OK
)
682 /* always clean up, regardless of error */
684 if (mode
!= ARM_MODE_ANY
)
685 /* (void) */ arm_dpm_modeswitch(dpm
, ARM_MODE_ANY
);
688 /* (void) */ dpm
->finish(dpm
);
692 static int arm_dpm_write_core_reg(struct target
*target
, struct reg
*r
,
693 int regnum
, enum arm_mode mode
, uint8_t *value
)
695 struct arm_dpm
*dpm
= target_to_arm(target
)->dpm
;
699 if (regnum
< 0 || (regnum
> 16 && regnum
< ARM_VFP_V3_D0
) ||
700 (regnum
> ARM_VFP_V3_FPSCR
))
701 return ERROR_COMMAND_SYNTAX_ERROR
;
704 if (mode
!= ARM_MODE_ANY
)
707 mode
= dpm_mapmode(dpm
->arm
, regnum
, mode
);
709 /* REVISIT what happens if we try to write SPSR in a core mode
710 * which has no such register?
713 retval
= dpm
->prepare(dpm
);
714 if (retval
!= ERROR_OK
)
717 if (mode
!= ARM_MODE_ANY
) {
718 retval
= arm_dpm_modeswitch(dpm
, mode
);
719 if (retval
!= ERROR_OK
)
723 retval
= dpm_write_reg(dpm
, r
, regnum
);
724 /* always clean up, regardless of error */
726 if (mode
!= ARM_MODE_ANY
)
727 /* (void) */ arm_dpm_modeswitch(dpm
, ARM_MODE_ANY
);
730 /* (void) */ dpm
->finish(dpm
);
734 static int arm_dpm_full_context(struct target
*target
)
736 struct arm
*arm
= target_to_arm(target
);
737 struct arm_dpm
*dpm
= arm
->dpm
;
738 struct reg_cache
*cache
= arm
->core_cache
;
742 retval
= dpm
->prepare(dpm
);
743 if (retval
!= ERROR_OK
)
747 enum arm_mode mode
= ARM_MODE_ANY
;
751 /* We "know" arm_dpm_read_current_registers() was called so
752 * the unmapped registers (R0..R7, PC, AND CPSR) and some
753 * view of R8..R14 are current. We also "know" oddities of
754 * register mapping: special cases for R8..R12 and SPSR.
756 * Pick some mode with unread registers and read them all.
759 for (unsigned i
= 0; i
< cache
->num_regs
; i
++) {
762 if (cache
->reg_list
[i
].valid
)
764 r
= cache
->reg_list
[i
].arch_info
;
766 /* may need to pick a mode and set CPSR */
771 /* For regular (ARM_MODE_ANY) R8..R12
772 * in case we've entered debug state
773 * in FIQ mode we need to patch mode.
775 if (mode
!= ARM_MODE_ANY
)
776 retval
= arm_dpm_modeswitch(dpm
, mode
);
778 retval
= arm_dpm_modeswitch(dpm
, ARM_MODE_USR
);
780 if (retval
!= ERROR_OK
)
786 /* CPSR was read, so "R16" must mean SPSR */
787 retval
= arm_dpm_read_reg(dpm
,
789 (r
->num
== 16) ? 17 : r
->num
);
790 if (retval
!= ERROR_OK
)
796 retval
= arm_dpm_modeswitch(dpm
, ARM_MODE_ANY
);
797 /* (void) */ dpm
->finish(dpm
);
803 /*----------------------------------------------------------------------*/
806 * Breakpoint and Watchpoint support.
808 * Hardware {break,watch}points are usually left active, to minimize
809 * debug entry/exit costs. When they are set or cleared, it's done in
810 * batches. Also, DPM-conformant hardware can update debug registers
811 * regardless of whether the CPU is running or halted ... though that
812 * fact isn't currently leveraged.
815 static int dpm_bpwp_setup(struct arm_dpm
*dpm
, struct dpm_bpwp
*xp
,
816 uint32_t addr
, uint32_t length
)
820 control
= (1 << 0) /* enable */
821 | (3 << 1); /* both user and privileged access */
823 /* Match 1, 2, or all 4 byte addresses in this word.
825 * FIXME: v7 hardware allows lengths up to 2 GB for BP and WP.
826 * Support larger length, when addr is suitably aligned. In
827 * particular, allow watchpoints on 8 byte "double" values.
829 * REVISIT allow watchpoints on unaligned 2-bit values; and on
830 * v7 hardware, unaligned 4-byte ones too.
834 control
|= (1 << (addr
& 3)) << 5;
837 /* require 2-byte alignment */
839 control
|= (3 << (addr
& 2)) << 5;
844 /* require 4-byte alignment */
851 LOG_ERROR("unsupported {break,watch}point length/alignment");
852 return ERROR_COMMAND_SYNTAX_ERROR
;
855 /* other shared control bits:
856 * bits 15:14 == 0 ... both secure and nonsecure states (v6.1+ only)
857 * bit 20 == 0 ... not linked to a context ID
858 * bit 28:24 == 0 ... not ignoring N LSBs (v7 only)
861 xp
->address
= addr
& ~3;
862 xp
->control
= control
;
865 LOG_DEBUG("BPWP: addr %8.8" PRIx32
", control %" PRIx32
", number %d",
866 xp
->address
, control
, xp
->number
);
868 /* hardware is updated in write_dirty_registers() */
872 static int dpm_add_breakpoint(struct target
*target
, struct breakpoint
*bp
)
874 struct arm
*arm
= target_to_arm(target
);
875 struct arm_dpm
*dpm
= arm
->dpm
;
876 int retval
= ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
879 return ERROR_COMMAND_SYNTAX_ERROR
;
880 if (!dpm
->bpwp_enable
)
883 /* FIXME we need a generic solution for software breakpoints. */
884 if (bp
->type
== BKPT_SOFT
)
885 LOG_DEBUG("using HW bkpt, not SW...");
887 for (unsigned i
= 0; i
< dpm
->nbp
; i
++) {
888 if (!dpm
->dbp
[i
].bp
) {
889 retval
= dpm_bpwp_setup(dpm
, &dpm
->dbp
[i
].bpwp
,
890 bp
->address
, bp
->length
);
891 if (retval
== ERROR_OK
)
900 static int dpm_remove_breakpoint(struct target
*target
, struct breakpoint
*bp
)
902 struct arm
*arm
= target_to_arm(target
);
903 struct arm_dpm
*dpm
= arm
->dpm
;
904 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
906 for (unsigned i
= 0; i
< dpm
->nbp
; i
++) {
907 if (dpm
->dbp
[i
].bp
== bp
) {
908 dpm
->dbp
[i
].bp
= NULL
;
909 dpm
->dbp
[i
].bpwp
.dirty
= true;
911 /* hardware is updated in write_dirty_registers() */
920 static int dpm_watchpoint_setup(struct arm_dpm
*dpm
, unsigned index_t
,
921 struct watchpoint
*wp
)
924 struct dpm_wp
*dwp
= dpm
->dwp
+ index_t
;
927 /* this hardware doesn't support data value matching or masking */
928 if (wp
->value
|| wp
->mask
!= ~(uint32_t)0) {
929 LOG_DEBUG("watchpoint values and masking not supported");
930 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
933 retval
= dpm_bpwp_setup(dpm
, &dwp
->bpwp
, wp
->address
, wp
->length
);
934 if (retval
!= ERROR_OK
)
937 control
= dwp
->bpwp
.control
;
949 dwp
->bpwp
.control
= control
;
951 dpm
->dwp
[index_t
].wp
= wp
;
956 static int dpm_add_watchpoint(struct target
*target
, struct watchpoint
*wp
)
958 struct arm
*arm
= target_to_arm(target
);
959 struct arm_dpm
*dpm
= arm
->dpm
;
960 int retval
= ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
962 if (dpm
->bpwp_enable
) {
963 for (unsigned i
= 0; i
< dpm
->nwp
; i
++) {
964 if (!dpm
->dwp
[i
].wp
) {
965 retval
= dpm_watchpoint_setup(dpm
, i
, wp
);
974 static int dpm_remove_watchpoint(struct target
*target
, struct watchpoint
*wp
)
976 struct arm
*arm
= target_to_arm(target
);
977 struct arm_dpm
*dpm
= arm
->dpm
;
978 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
980 for (unsigned i
= 0; i
< dpm
->nwp
; i
++) {
981 if (dpm
->dwp
[i
].wp
== wp
) {
982 dpm
->dwp
[i
].wp
= NULL
;
983 dpm
->dwp
[i
].bpwp
.dirty
= true;
985 /* hardware is updated in write_dirty_registers() */
994 void arm_dpm_report_wfar(struct arm_dpm
*dpm
, uint32_t addr
)
996 switch (dpm
->arm
->core_state
) {
1000 case ARM_STATE_THUMB
:
1001 case ARM_STATE_THUMB_EE
:
1004 case ARM_STATE_JAZELLE
:
1005 case ARM_STATE_AARCH64
:
1012 /*----------------------------------------------------------------------*/
1015 * Other debug and support utilities
1018 void arm_dpm_report_dscr(struct arm_dpm
*dpm
, uint32_t dscr
)
1020 struct target
*target
= dpm
->arm
->target
;
1024 /* Examine debug reason */
1025 switch (DSCR_ENTRY(dscr
)) {
1026 case DSCR_ENTRY_HALT_REQ
: /* HALT request from debugger */
1027 case DSCR_ENTRY_EXT_DBG_REQ
: /* EDBGRQ */
1028 target
->debug_reason
= DBG_REASON_DBGRQ
;
1030 case DSCR_ENTRY_BREAKPOINT
: /* HW breakpoint */
1031 case DSCR_ENTRY_BKPT_INSTR
: /* vector catch */
1032 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
1034 case DSCR_ENTRY_IMPRECISE_WATCHPT
: /* asynch watchpoint */
1035 case DSCR_ENTRY_PRECISE_WATCHPT
:/* precise watchpoint */
1036 target
->debug_reason
= DBG_REASON_WATCHPOINT
;
1039 target
->debug_reason
= DBG_REASON_UNDEFINED
;
1044 /*----------------------------------------------------------------------*/
1047 * Setup and management support.
1051 * Hooks up this DPM to its associated target; call only once.
1052 * Initially this only covers the register cache.
1054 * Oh, and watchpoints. Yeah.
1056 int arm_dpm_setup(struct arm_dpm
*dpm
)
1058 struct arm
*arm
= dpm
->arm
;
1059 struct target
*target
= arm
->target
;
1060 struct reg_cache
*cache
= 0;
1064 /* register access setup */
1065 arm
->full_context
= arm_dpm_full_context
;
1066 arm
->read_core_reg
= arm_dpm_read_core_reg
;
1067 arm
->write_core_reg
= arm_dpm_write_core_reg
;
1069 if (arm
->core_cache
== NULL
) {
1070 cache
= arm_build_reg_cache(target
, arm
);
1074 *register_get_last_cache_p(&target
->reg_cache
) = cache
;
1077 /* coprocessor access setup */
1081 /* breakpoint setup -- optional until it works everywhere */
1082 if (!target
->type
->add_breakpoint
) {
1083 target
->type
->add_breakpoint
= dpm_add_breakpoint
;
1084 target
->type
->remove_breakpoint
= dpm_remove_breakpoint
;
1087 /* watchpoint setup */
1088 target
->type
->add_watchpoint
= dpm_add_watchpoint
;
1089 target
->type
->remove_watchpoint
= dpm_remove_watchpoint
;
1091 /* FIXME add vector catch support */
1093 dpm
->nbp
= 1 + ((dpm
->didr
>> 24) & 0xf);
1094 dpm
->nwp
= 1 + ((dpm
->didr
>> 28) & 0xf);
1095 dpm
->dbp
= calloc(dpm
->nbp
, sizeof *dpm
->dbp
);
1096 dpm
->dwp
= calloc(dpm
->nwp
, sizeof *dpm
->dwp
);
1098 if (!dpm
->dbp
|| !dpm
->dwp
) {
1104 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
1105 target_name(target
), dpm
->nbp
, dpm
->nwp
);
1107 /* REVISIT ... and some of those breakpoints could match
1108 * execution context IDs...
1115 * Reinitializes DPM state at the beginning of a new debug session
1116 * or after a reset which may have affected the debug module.
1118 int arm_dpm_initialize(struct arm_dpm
*dpm
)
1120 /* Disable all breakpoints and watchpoints at startup. */
1121 if (dpm
->bpwp_disable
) {
1124 for (i
= 0; i
< dpm
->nbp
; i
++) {
1125 dpm
->dbp
[i
].bpwp
.number
= i
;
1126 (void) dpm
->bpwp_disable(dpm
, i
);
1128 for (i
= 0; i
< dpm
->nwp
; i
++) {
1129 dpm
->dwp
[i
].bpwp
.number
= 16 + i
;
1130 (void) dpm
->bpwp_disable(dpm
, 16 + i
);
1133 LOG_WARNING("%s: can't disable breakpoints and watchpoints",
1134 target_name(dpm
->arm
->target
));