ipdbg: fix double free of virtual-ir data
[openocd.git] / src / target / cortex_m.c
blobc225b1aa9d2c0e88f4cec406c7c1e07649e16e02
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2006 by Magnus Lundin *
8 * lundin@mlu.mine.nu *
9 * *
10 * Copyright (C) 2008 by Spencer Oliver *
11 * spen@spen-soft.co.uk *
12 * *
13 * *
14 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
15 * *
16 ***************************************************************************/
17 #ifdef HAVE_CONFIG_H
18 #include "config.h"
19 #endif
21 #include "jtag/interface.h"
22 #include "breakpoints.h"
23 #include "cortex_m.h"
24 #include "target_request.h"
25 #include "target_type.h"
26 #include "arm_adi_v5.h"
27 #include "arm_disassembler.h"
28 #include "register.h"
29 #include "arm_opcodes.h"
30 #include "arm_semihosting.h"
31 #include "smp.h"
32 #include <helper/nvp.h>
33 #include <helper/time_support.h>
34 #include <rtt/rtt.h>
36 /* NOTE: most of this should work fine for the Cortex-M1 and
37 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
38 * Some differences: M0/M1 doesn't have FPB remapping or the
39 * DWT tracing/profiling support. (So the cycle counter will
40 * not be usable; the other stuff isn't currently used here.)
42 * Although there are some workarounds for errata seen only in r0p0
43 * silicon, such old parts are hard to find and thus not much tested
44 * any longer.
47 /* Timeout for register r/w */
48 #define DHCSR_S_REGRDY_TIMEOUT (500)
50 /* Supported Cortex-M Cores */
51 static const struct cortex_m_part_info cortex_m_parts[] = {
53 .impl_part = CORTEX_M0_PARTNO,
54 .name = "Cortex-M0",
55 .arch = ARM_ARCH_V6M,
58 .impl_part = CORTEX_M0P_PARTNO,
59 .name = "Cortex-M0+",
60 .arch = ARM_ARCH_V6M,
63 .impl_part = CORTEX_M1_PARTNO,
64 .name = "Cortex-M1",
65 .arch = ARM_ARCH_V6M,
68 .impl_part = CORTEX_M3_PARTNO,
69 .name = "Cortex-M3",
70 .arch = ARM_ARCH_V7M,
71 .flags = CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
74 .impl_part = CORTEX_M4_PARTNO,
75 .name = "Cortex-M4",
76 .arch = ARM_ARCH_V7M,
77 .flags = CORTEX_M_F_HAS_FPV4 | CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
80 .impl_part = CORTEX_M7_PARTNO,
81 .name = "Cortex-M7",
82 .arch = ARM_ARCH_V7M,
83 .flags = CORTEX_M_F_HAS_FPV5,
86 .impl_part = CORTEX_M23_PARTNO,
87 .name = "Cortex-M23",
88 .arch = ARM_ARCH_V8M,
91 .impl_part = CORTEX_M33_PARTNO,
92 .name = "Cortex-M33",
93 .arch = ARM_ARCH_V8M,
94 .flags = CORTEX_M_F_HAS_FPV5,
97 .impl_part = CORTEX_M35P_PARTNO,
98 .name = "Cortex-M35P",
99 .arch = ARM_ARCH_V8M,
100 .flags = CORTEX_M_F_HAS_FPV5,
103 .impl_part = CORTEX_M55_PARTNO,
104 .name = "Cortex-M55",
105 .arch = ARM_ARCH_V8M,
106 .flags = CORTEX_M_F_HAS_FPV5,
109 .impl_part = CORTEX_M85_PARTNO,
110 .name = "Cortex-M85",
111 .arch = ARM_ARCH_V8M,
112 .flags = CORTEX_M_F_HAS_FPV5,
115 .impl_part = STAR_MC1_PARTNO,
116 .name = "STAR-MC1",
117 .arch = ARM_ARCH_V8M,
118 .flags = CORTEX_M_F_HAS_FPV5,
121 .impl_part = INFINEON_SLX2_PARTNO,
122 .name = "Infineon-SLx2",
123 .arch = ARM_ARCH_V8M,
126 .impl_part = REALTEK_M200_PARTNO,
127 .name = "Real-M200 (KM0)",
128 .arch = ARM_ARCH_V8M,
131 .impl_part = REALTEK_M300_PARTNO,
132 .name = "Real-M300 (KM4)",
133 .arch = ARM_ARCH_V8M,
134 .flags = CORTEX_M_F_HAS_FPV5,
138 /* forward declarations */
139 static int cortex_m_store_core_reg_u32(struct target *target,
140 uint32_t num, uint32_t value);
141 static void cortex_m_dwt_free(struct target *target);
143 /** DCB DHCSR register contains S_RETIRE_ST and S_RESET_ST bits cleared
144 * on a read. Call this helper function each time DHCSR is read
145 * to preserve S_RESET_ST state in case of a reset event was detected.
147 static inline void cortex_m_cumulate_dhcsr_sticky(struct cortex_m_common *cortex_m,
148 uint32_t dhcsr)
150 cortex_m->dcb_dhcsr_cumulated_sticky |= dhcsr;
153 /** Read DCB DHCSR register to cortex_m->dcb_dhcsr and cumulate
154 * sticky bits in cortex_m->dcb_dhcsr_cumulated_sticky
156 static int cortex_m_read_dhcsr_atomic_sticky(struct target *target)
158 struct cortex_m_common *cortex_m = target_to_cm(target);
159 struct armv7m_common *armv7m = target_to_armv7m(target);
161 int retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
162 &cortex_m->dcb_dhcsr);
163 if (retval != ERROR_OK)
164 return retval;
166 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
167 return ERROR_OK;
170 static int cortex_m_load_core_reg_u32(struct target *target,
171 uint32_t regsel, uint32_t *value)
173 struct cortex_m_common *cortex_m = target_to_cm(target);
174 struct armv7m_common *armv7m = target_to_armv7m(target);
175 int retval;
176 uint32_t dcrdr, tmp_value;
177 int64_t then;
179 /* because the DCB_DCRDR is used for the emulated dcc channel
180 * we have to save/restore the DCB_DCRDR when used */
181 if (target->dbg_msg_enabled) {
182 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
183 if (retval != ERROR_OK)
184 return retval;
187 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
188 if (retval != ERROR_OK)
189 return retval;
191 /* check if value from register is ready and pre-read it */
192 then = timeval_ms();
193 while (1) {
194 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR,
195 &cortex_m->dcb_dhcsr);
196 if (retval != ERROR_OK)
197 return retval;
198 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DCRDR,
199 &tmp_value);
200 if (retval != ERROR_OK)
201 return retval;
202 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
203 if (cortex_m->dcb_dhcsr & S_REGRDY)
204 break;
205 cortex_m->slow_register_read = true; /* Polling (still) needed. */
206 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
207 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
208 return ERROR_TIMEOUT_REACHED;
210 keep_alive();
213 *value = tmp_value;
215 if (target->dbg_msg_enabled) {
216 /* restore DCB_DCRDR - this needs to be in a separate
217 * transaction otherwise the emulated DCC channel breaks */
218 if (retval == ERROR_OK)
219 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
222 return retval;
225 static int cortex_m_slow_read_all_regs(struct target *target)
227 struct cortex_m_common *cortex_m = target_to_cm(target);
228 struct armv7m_common *armv7m = target_to_armv7m(target);
229 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
231 /* Opportunistically restore fast read, it'll revert to slow
232 * if any register needed polling in cortex_m_load_core_reg_u32(). */
233 cortex_m->slow_register_read = false;
235 for (unsigned int reg_id = 0; reg_id < num_regs; reg_id++) {
236 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
237 if (r->exist) {
238 int retval = armv7m->arm.read_core_reg(target, r, reg_id, ARM_MODE_ANY);
239 if (retval != ERROR_OK)
240 return retval;
244 if (!cortex_m->slow_register_read)
245 LOG_TARGET_DEBUG(target, "Switching back to fast register reads");
247 return ERROR_OK;
250 static int cortex_m_queue_reg_read(struct target *target, uint32_t regsel,
251 uint32_t *reg_value, uint32_t *dhcsr)
253 struct armv7m_common *armv7m = target_to_armv7m(target);
254 int retval;
256 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
257 if (retval != ERROR_OK)
258 return retval;
260 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR, dhcsr);
261 if (retval != ERROR_OK)
262 return retval;
264 return mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, reg_value);
267 static int cortex_m_fast_read_all_regs(struct target *target)
269 struct cortex_m_common *cortex_m = target_to_cm(target);
270 struct armv7m_common *armv7m = target_to_armv7m(target);
271 int retval;
272 uint32_t dcrdr;
274 /* because the DCB_DCRDR is used for the emulated dcc channel
275 * we have to save/restore the DCB_DCRDR when used */
276 if (target->dbg_msg_enabled) {
277 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
278 if (retval != ERROR_OK)
279 return retval;
282 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
283 const unsigned int n_r32 = ARMV7M_LAST_REG - ARMV7M_CORE_FIRST_REG + 1
284 + ARMV7M_FPU_LAST_REG - ARMV7M_FPU_FIRST_REG + 1;
285 /* we need one 32-bit word for each register except FP D0..D15, which
286 * need two words */
287 uint32_t r_vals[n_r32];
288 uint32_t dhcsr[n_r32];
290 unsigned int wi = 0; /* write index to r_vals and dhcsr arrays */
291 unsigned int reg_id; /* register index in the reg_list, ARMV7M_R0... */
292 for (reg_id = 0; reg_id < num_regs; reg_id++) {
293 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
294 if (!r->exist)
295 continue; /* skip non existent registers */
297 if (r->size <= 8) {
298 /* Any 8-bit or shorter register is unpacked from a 32-bit
299 * container register. Skip it now. */
300 continue;
303 uint32_t regsel = armv7m_map_id_to_regsel(reg_id);
304 retval = cortex_m_queue_reg_read(target, regsel, &r_vals[wi],
305 &dhcsr[wi]);
306 if (retval != ERROR_OK)
307 return retval;
308 wi++;
310 assert(r->size == 32 || r->size == 64);
311 if (r->size == 32)
312 continue; /* done with 32-bit register */
314 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
315 /* the odd part of FP register (S1, S3...) */
316 retval = cortex_m_queue_reg_read(target, regsel + 1, &r_vals[wi],
317 &dhcsr[wi]);
318 if (retval != ERROR_OK)
319 return retval;
320 wi++;
323 assert(wi <= n_r32);
325 retval = dap_run(armv7m->debug_ap->dap);
326 if (retval != ERROR_OK)
327 return retval;
329 if (target->dbg_msg_enabled) {
330 /* restore DCB_DCRDR - this needs to be in a separate
331 * transaction otherwise the emulated DCC channel breaks */
332 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
333 if (retval != ERROR_OK)
334 return retval;
337 bool not_ready = false;
338 for (unsigned int i = 0; i < wi; i++) {
339 if ((dhcsr[i] & S_REGRDY) == 0) {
340 not_ready = true;
341 LOG_TARGET_DEBUG(target, "Register %u was not ready during fast read", i);
343 cortex_m_cumulate_dhcsr_sticky(cortex_m, dhcsr[i]);
346 if (not_ready) {
347 /* Any register was not ready,
348 * fall back to slow read with S_REGRDY polling */
349 return ERROR_TIMEOUT_REACHED;
352 LOG_TARGET_DEBUG(target, "read %u 32-bit registers", wi);
354 unsigned int ri = 0; /* read index from r_vals array */
355 for (reg_id = 0; reg_id < num_regs; reg_id++) {
356 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
357 if (!r->exist)
358 continue; /* skip non existent registers */
360 r->dirty = false;
362 unsigned int reg32_id;
363 uint32_t offset;
364 if (armv7m_map_reg_packing(reg_id, &reg32_id, &offset)) {
365 /* Unpack a partial register from 32-bit container register */
366 struct reg *r32 = &armv7m->arm.core_cache->reg_list[reg32_id];
368 /* The container register ought to precede all regs unpacked
369 * from it in the reg_list. So the value should be ready
370 * to unpack */
371 assert(r32->valid);
372 buf_cpy(r32->value + offset, r->value, r->size);
374 } else {
375 assert(r->size == 32 || r->size == 64);
376 buf_set_u32(r->value, 0, 32, r_vals[ri++]);
378 if (r->size == 64) {
379 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
380 /* the odd part of FP register (S1, S3...) */
381 buf_set_u32(r->value + 4, 0, 32, r_vals[ri++]);
384 r->valid = true;
386 assert(ri == wi);
388 return retval;
391 static int cortex_m_store_core_reg_u32(struct target *target,
392 uint32_t regsel, uint32_t value)
394 struct cortex_m_common *cortex_m = target_to_cm(target);
395 struct armv7m_common *armv7m = target_to_armv7m(target);
396 int retval;
397 uint32_t dcrdr;
398 int64_t then;
400 /* because the DCB_DCRDR is used for the emulated dcc channel
401 * we have to save/restore the DCB_DCRDR when used */
402 if (target->dbg_msg_enabled) {
403 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
404 if (retval != ERROR_OK)
405 return retval;
408 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, value);
409 if (retval != ERROR_OK)
410 return retval;
412 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel | DCRSR_WNR);
413 if (retval != ERROR_OK)
414 return retval;
416 /* check if value is written into register */
417 then = timeval_ms();
418 while (1) {
419 retval = cortex_m_read_dhcsr_atomic_sticky(target);
420 if (retval != ERROR_OK)
421 return retval;
422 if (cortex_m->dcb_dhcsr & S_REGRDY)
423 break;
424 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
425 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
426 return ERROR_TIMEOUT_REACHED;
428 keep_alive();
431 if (target->dbg_msg_enabled) {
432 /* restore DCB_DCRDR - this needs to be in a separate
433 * transaction otherwise the emulated DCC channel breaks */
434 if (retval == ERROR_OK)
435 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
438 return retval;
441 static int cortex_m_write_debug_halt_mask(struct target *target,
442 uint32_t mask_on, uint32_t mask_off)
444 struct cortex_m_common *cortex_m = target_to_cm(target);
445 struct armv7m_common *armv7m = &cortex_m->armv7m;
447 /* mask off status bits */
448 cortex_m->dcb_dhcsr &= ~((0xFFFFul << 16) | mask_off);
449 /* create new register mask */
450 cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
452 return mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR, cortex_m->dcb_dhcsr);
455 static int cortex_m_set_maskints(struct target *target, bool mask)
457 struct cortex_m_common *cortex_m = target_to_cm(target);
458 if (!!(cortex_m->dcb_dhcsr & C_MASKINTS) != mask)
459 return cortex_m_write_debug_halt_mask(target, mask ? C_MASKINTS : 0, mask ? 0 : C_MASKINTS);
460 else
461 return ERROR_OK;
464 static int cortex_m_set_maskints_for_halt(struct target *target)
466 struct cortex_m_common *cortex_m = target_to_cm(target);
467 switch (cortex_m->isrmasking_mode) {
468 case CORTEX_M_ISRMASK_AUTO:
469 /* interrupts taken at resume, whether for step or run -> no mask */
470 return cortex_m_set_maskints(target, false);
472 case CORTEX_M_ISRMASK_OFF:
473 /* interrupts never masked */
474 return cortex_m_set_maskints(target, false);
476 case CORTEX_M_ISRMASK_ON:
477 /* interrupts always masked */
478 return cortex_m_set_maskints(target, true);
480 case CORTEX_M_ISRMASK_STEPONLY:
481 /* interrupts masked for single step only -> mask now if MASKINTS
482 * erratum, otherwise only mask before stepping */
483 return cortex_m_set_maskints(target, cortex_m->maskints_erratum);
485 return ERROR_OK;
488 static int cortex_m_set_maskints_for_run(struct target *target)
490 switch (target_to_cm(target)->isrmasking_mode) {
491 case CORTEX_M_ISRMASK_AUTO:
492 /* interrupts taken at resume, whether for step or run -> no mask */
493 return cortex_m_set_maskints(target, false);
495 case CORTEX_M_ISRMASK_OFF:
496 /* interrupts never masked */
497 return cortex_m_set_maskints(target, false);
499 case CORTEX_M_ISRMASK_ON:
500 /* interrupts always masked */
501 return cortex_m_set_maskints(target, true);
503 case CORTEX_M_ISRMASK_STEPONLY:
504 /* interrupts masked for single step only -> no mask */
505 return cortex_m_set_maskints(target, false);
507 return ERROR_OK;
510 static int cortex_m_set_maskints_for_step(struct target *target)
512 switch (target_to_cm(target)->isrmasking_mode) {
513 case CORTEX_M_ISRMASK_AUTO:
514 /* the auto-interrupt should already be done -> mask */
515 return cortex_m_set_maskints(target, true);
517 case CORTEX_M_ISRMASK_OFF:
518 /* interrupts never masked */
519 return cortex_m_set_maskints(target, false);
521 case CORTEX_M_ISRMASK_ON:
522 /* interrupts always masked */
523 return cortex_m_set_maskints(target, true);
525 case CORTEX_M_ISRMASK_STEPONLY:
526 /* interrupts masked for single step only -> mask */
527 return cortex_m_set_maskints(target, true);
529 return ERROR_OK;
532 static int cortex_m_clear_halt(struct target *target)
534 struct cortex_m_common *cortex_m = target_to_cm(target);
535 struct armv7m_common *armv7m = &cortex_m->armv7m;
536 int retval;
538 /* clear step if any */
539 cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP);
541 /* Read Debug Fault Status Register */
542 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR, &cortex_m->nvic_dfsr);
543 if (retval != ERROR_OK)
544 return retval;
546 /* Clear Debug Fault Status */
547 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_DFSR, cortex_m->nvic_dfsr);
548 if (retval != ERROR_OK)
549 return retval;
550 LOG_TARGET_DEBUG(target, "NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr);
552 return ERROR_OK;
555 static int cortex_m_single_step_core(struct target *target)
557 struct cortex_m_common *cortex_m = target_to_cm(target);
558 int retval;
560 /* Mask interrupts before clearing halt, if not done already. This avoids
561 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
562 * HALT can put the core into an unknown state.
564 if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) {
565 retval = cortex_m_write_debug_halt_mask(target, C_MASKINTS, 0);
566 if (retval != ERROR_OK)
567 return retval;
569 retval = cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
570 if (retval != ERROR_OK)
571 return retval;
572 LOG_TARGET_DEBUG(target, "single step");
574 /* restore dhcsr reg */
575 cortex_m_clear_halt(target);
577 return ERROR_OK;
580 static int cortex_m_enable_fpb(struct target *target)
582 int retval = target_write_u32(target, FP_CTRL, 3);
583 if (retval != ERROR_OK)
584 return retval;
586 /* check the fpb is actually enabled */
587 uint32_t fpctrl;
588 retval = target_read_u32(target, FP_CTRL, &fpctrl);
589 if (retval != ERROR_OK)
590 return retval;
592 if (fpctrl & 1)
593 return ERROR_OK;
595 return ERROR_FAIL;
598 static int cortex_m_endreset_event(struct target *target)
600 int retval;
601 uint32_t dcb_demcr;
602 struct cortex_m_common *cortex_m = target_to_cm(target);
603 struct armv7m_common *armv7m = &cortex_m->armv7m;
604 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
605 struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list;
606 struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list;
608 /* REVISIT The four debug monitor bits are currently ignored... */
609 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &dcb_demcr);
610 if (retval != ERROR_OK)
611 return retval;
612 LOG_TARGET_DEBUG(target, "DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
614 /* this register is used for emulated dcc channel */
615 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
616 if (retval != ERROR_OK)
617 return retval;
619 retval = cortex_m_read_dhcsr_atomic_sticky(target);
620 if (retval != ERROR_OK)
621 return retval;
623 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
624 /* Enable debug requests */
625 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
626 if (retval != ERROR_OK)
627 return retval;
630 /* Restore proper interrupt masking setting for running CPU. */
631 cortex_m_set_maskints_for_run(target);
633 /* Enable features controlled by ITM and DWT blocks, and catch only
634 * the vectors we were told to pay attention to.
636 * Target firmware is responsible for all fault handling policy
637 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
638 * or manual updates to the NVIC SHCSR and CCR registers.
640 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, TRCENA | armv7m->demcr);
641 if (retval != ERROR_OK)
642 return retval;
644 /* Paranoia: evidently some (early?) chips don't preserve all the
645 * debug state (including FPB, DWT, etc) across reset...
648 /* Enable FPB */
649 retval = cortex_m_enable_fpb(target);
650 if (retval != ERROR_OK) {
651 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
652 return retval;
655 cortex_m->fpb_enabled = true;
657 /* Restore FPB registers */
658 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
659 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
660 if (retval != ERROR_OK)
661 return retval;
664 /* Restore DWT registers */
665 for (unsigned int i = 0; i < cortex_m->dwt_num_comp; i++) {
666 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
667 dwt_list[i].comp);
668 if (retval != ERROR_OK)
669 return retval;
670 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
671 dwt_list[i].mask);
672 if (retval != ERROR_OK)
673 return retval;
674 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
675 dwt_list[i].function);
676 if (retval != ERROR_OK)
677 return retval;
679 retval = dap_run(swjdp);
680 if (retval != ERROR_OK)
681 return retval;
683 register_cache_invalidate(armv7m->arm.core_cache);
685 /* TODO: invalidate also working areas (needed in the case of detected reset).
686 * Doing so will require flash drivers to test if working area
687 * is still valid in all target algo calling loops.
690 /* make sure we have latest dhcsr flags */
691 retval = cortex_m_read_dhcsr_atomic_sticky(target);
692 if (retval != ERROR_OK)
693 return retval;
695 return retval;
698 static int cortex_m_examine_debug_reason(struct target *target)
700 struct cortex_m_common *cortex_m = target_to_cm(target);
702 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
703 * only check the debug reason if we don't know it already */
705 if ((target->debug_reason != DBG_REASON_DBGRQ)
706 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
707 if (cortex_m->nvic_dfsr & DFSR_BKPT) {
708 target->debug_reason = DBG_REASON_BREAKPOINT;
709 if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
710 target->debug_reason = DBG_REASON_WPTANDBKPT;
711 } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
712 target->debug_reason = DBG_REASON_WATCHPOINT;
713 else if (cortex_m->nvic_dfsr & DFSR_VCATCH)
714 target->debug_reason = DBG_REASON_BREAKPOINT;
715 else if (cortex_m->nvic_dfsr & DFSR_EXTERNAL)
716 target->debug_reason = DBG_REASON_DBGRQ;
717 else /* HALTED */
718 target->debug_reason = DBG_REASON_UNDEFINED;
721 return ERROR_OK;
724 static int cortex_m_examine_exception_reason(struct target *target)
726 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
727 struct armv7m_common *armv7m = target_to_armv7m(target);
728 struct adiv5_dap *swjdp = armv7m->arm.dap;
729 int retval;
731 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SHCSR, &shcsr);
732 if (retval != ERROR_OK)
733 return retval;
734 switch (armv7m->exception_number) {
735 case 2: /* NMI */
736 break;
737 case 3: /* Hard Fault */
738 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_HFSR, &except_sr);
739 if (retval != ERROR_OK)
740 return retval;
741 if (except_sr & 0x40000000) {
742 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &cfsr);
743 if (retval != ERROR_OK)
744 return retval;
746 break;
747 case 4: /* Memory Management */
748 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
749 if (retval != ERROR_OK)
750 return retval;
751 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_MMFAR, &except_ar);
752 if (retval != ERROR_OK)
753 return retval;
754 break;
755 case 5: /* Bus Fault */
756 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
757 if (retval != ERROR_OK)
758 return retval;
759 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_BFAR, &except_ar);
760 if (retval != ERROR_OK)
761 return retval;
762 break;
763 case 6: /* Usage Fault */
764 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
765 if (retval != ERROR_OK)
766 return retval;
767 break;
768 case 7: /* Secure Fault */
769 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFSR, &except_sr);
770 if (retval != ERROR_OK)
771 return retval;
772 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFAR, &except_ar);
773 if (retval != ERROR_OK)
774 return retval;
775 break;
776 case 11: /* SVCall */
777 break;
778 case 12: /* Debug Monitor */
779 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_DFSR, &except_sr);
780 if (retval != ERROR_OK)
781 return retval;
782 break;
783 case 14: /* PendSV */
784 break;
785 case 15: /* SysTick */
786 break;
787 default:
788 except_sr = 0;
789 break;
791 retval = dap_run(swjdp);
792 if (retval == ERROR_OK)
793 LOG_TARGET_DEBUG(target, "%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
794 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
795 armv7m_exception_string(armv7m->exception_number),
796 shcsr, except_sr, cfsr, except_ar);
797 return retval;
800 static int cortex_m_debug_entry(struct target *target)
802 uint32_t xpsr;
803 int retval;
804 struct cortex_m_common *cortex_m = target_to_cm(target);
805 struct armv7m_common *armv7m = &cortex_m->armv7m;
806 struct arm *arm = &armv7m->arm;
807 struct reg *r;
809 LOG_TARGET_DEBUG(target, " ");
811 /* Do this really early to minimize the window where the MASKINTS erratum
812 * can pile up pending interrupts. */
813 cortex_m_set_maskints_for_halt(target);
815 cortex_m_clear_halt(target);
817 retval = cortex_m_read_dhcsr_atomic_sticky(target);
818 if (retval != ERROR_OK)
819 return retval;
821 retval = armv7m->examine_debug_reason(target);
822 if (retval != ERROR_OK)
823 return retval;
825 /* examine PE security state */
826 uint32_t dscsr = 0;
827 if (armv7m->arm.arch == ARM_ARCH_V8M) {
828 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DSCSR, &dscsr);
829 if (retval != ERROR_OK)
830 return retval;
833 /* Load all registers to arm.core_cache */
834 if (!cortex_m->slow_register_read) {
835 retval = cortex_m_fast_read_all_regs(target);
836 if (retval == ERROR_TIMEOUT_REACHED) {
837 cortex_m->slow_register_read = true;
838 LOG_TARGET_DEBUG(target, "Switched to slow register read");
842 if (cortex_m->slow_register_read)
843 retval = cortex_m_slow_read_all_regs(target);
845 if (retval != ERROR_OK)
846 return retval;
848 r = arm->cpsr;
849 xpsr = buf_get_u32(r->value, 0, 32);
851 /* Are we in an exception handler */
852 if (xpsr & 0x1FF) {
853 armv7m->exception_number = (xpsr & 0x1FF);
855 arm->core_mode = ARM_MODE_HANDLER;
856 arm->map = armv7m_msp_reg_map;
857 } else {
858 unsigned control = buf_get_u32(arm->core_cache
859 ->reg_list[ARMV7M_CONTROL].value, 0, 3);
861 /* is this thread privileged? */
862 arm->core_mode = control & 1
863 ? ARM_MODE_USER_THREAD
864 : ARM_MODE_THREAD;
866 /* which stack is it using? */
867 if (control & 2)
868 arm->map = armv7m_psp_reg_map;
869 else
870 arm->map = armv7m_msp_reg_map;
872 armv7m->exception_number = 0;
875 if (armv7m->exception_number)
876 cortex_m_examine_exception_reason(target);
878 bool secure_state = (dscsr & DSCSR_CDS) == DSCSR_CDS;
879 LOG_TARGET_DEBUG(target, "entered debug state in core mode: %s at PC 0x%" PRIx32
880 ", cpu in %s state, target->state: %s",
881 arm_mode_name(arm->core_mode),
882 buf_get_u32(arm->pc->value, 0, 32),
883 secure_state ? "Secure" : "Non-Secure",
884 target_state_name(target));
886 if (armv7m->post_debug_entry) {
887 retval = armv7m->post_debug_entry(target);
888 if (retval != ERROR_OK)
889 return retval;
892 return ERROR_OK;
895 static int cortex_m_poll_one(struct target *target)
897 int detected_failure = ERROR_OK;
898 int retval = ERROR_OK;
899 enum target_state prev_target_state = target->state;
900 struct cortex_m_common *cortex_m = target_to_cm(target);
901 struct armv7m_common *armv7m = &cortex_m->armv7m;
903 /* Read from Debug Halting Control and Status Register */
904 retval = cortex_m_read_dhcsr_atomic_sticky(target);
905 if (retval != ERROR_OK) {
906 target->state = TARGET_UNKNOWN;
907 return retval;
910 /* Recover from lockup. See ARMv7-M architecture spec,
911 * section B1.5.15 "Unrecoverable exception cases".
913 if (cortex_m->dcb_dhcsr & S_LOCKUP) {
914 LOG_TARGET_ERROR(target, "clearing lockup after double fault");
915 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
916 target->debug_reason = DBG_REASON_DBGRQ;
918 /* We have to execute the rest (the "finally" equivalent, but
919 * still throw this exception again).
921 detected_failure = ERROR_FAIL;
923 /* refresh status bits */
924 retval = cortex_m_read_dhcsr_atomic_sticky(target);
925 if (retval != ERROR_OK)
926 return retval;
929 if (cortex_m->dcb_dhcsr_cumulated_sticky & S_RESET_ST) {
930 cortex_m->dcb_dhcsr_cumulated_sticky &= ~S_RESET_ST;
931 if (target->state != TARGET_RESET) {
932 target->state = TARGET_RESET;
933 LOG_TARGET_INFO(target, "external reset detected");
935 return ERROR_OK;
938 if (target->state == TARGET_RESET) {
939 /* Cannot switch context while running so endreset is
940 * called with target->state == TARGET_RESET
942 LOG_TARGET_DEBUG(target, "Exit from reset with dcb_dhcsr 0x%" PRIx32,
943 cortex_m->dcb_dhcsr);
944 retval = cortex_m_endreset_event(target);
945 if (retval != ERROR_OK) {
946 target->state = TARGET_UNKNOWN;
947 return retval;
949 target->state = TARGET_RUNNING;
950 prev_target_state = TARGET_RUNNING;
953 if (cortex_m->dcb_dhcsr & S_HALT) {
954 target->state = TARGET_HALTED;
956 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
957 retval = cortex_m_debug_entry(target);
959 /* arm_semihosting needs to know registers, don't run if debug entry returned error */
960 if (retval == ERROR_OK && arm_semihosting(target, &retval) != 0)
961 return retval;
963 if (target->smp) {
964 LOG_TARGET_DEBUG(target, "postpone target event 'halted'");
965 target->smp_halt_event_postponed = true;
966 } else {
967 /* regardless of errors returned in previous code update state */
968 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
971 if (prev_target_state == TARGET_DEBUG_RUNNING) {
972 retval = cortex_m_debug_entry(target);
974 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
976 if (retval != ERROR_OK)
977 return retval;
980 if (target->state == TARGET_UNKNOWN) {
981 /* Check if processor is retiring instructions or sleeping.
982 * Unlike S_RESET_ST here we test if the target *is* running now,
983 * not if it has been running (possibly in the past). Instructions are
984 * typically processed much faster than OpenOCD polls DHCSR so S_RETIRE_ST
985 * is read always 1. That's the reason not to use dcb_dhcsr_cumulated_sticky.
987 if (cortex_m->dcb_dhcsr & S_RETIRE_ST || cortex_m->dcb_dhcsr & S_SLEEP) {
988 target->state = TARGET_RUNNING;
989 retval = ERROR_OK;
993 /* Check that target is truly halted, since the target could be resumed externally */
994 if ((prev_target_state == TARGET_HALTED) && !(cortex_m->dcb_dhcsr & S_HALT)) {
995 /* registers are now invalid */
996 register_cache_invalidate(armv7m->arm.core_cache);
998 target->state = TARGET_RUNNING;
999 LOG_TARGET_WARNING(target, "external resume detected");
1000 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1001 retval = ERROR_OK;
1004 /* Did we detect a failure condition that we cleared? */
1005 if (detected_failure != ERROR_OK)
1006 retval = detected_failure;
1007 return retval;
1010 static int cortex_m_halt_one(struct target *target);
1012 static int cortex_m_smp_halt_all(struct list_head *smp_targets)
1014 int retval = ERROR_OK;
1015 struct target_list *head;
1017 foreach_smp_target(head, smp_targets) {
1018 struct target *curr = head->target;
1019 if (!target_was_examined(curr))
1020 continue;
1021 if (curr->state == TARGET_HALTED)
1022 continue;
1024 int ret2 = cortex_m_halt_one(curr);
1025 if (retval == ERROR_OK)
1026 retval = ret2; /* store the first error code ignore others */
1028 return retval;
1031 static int cortex_m_smp_post_halt_poll(struct list_head *smp_targets)
1033 int retval = ERROR_OK;
1034 struct target_list *head;
1036 foreach_smp_target(head, smp_targets) {
1037 struct target *curr = head->target;
1038 if (!target_was_examined(curr))
1039 continue;
1040 /* skip targets that were already halted */
1041 if (curr->state == TARGET_HALTED)
1042 continue;
1044 int ret2 = cortex_m_poll_one(curr);
1045 if (retval == ERROR_OK)
1046 retval = ret2; /* store the first error code ignore others */
1048 return retval;
1051 static int cortex_m_poll_smp(struct list_head *smp_targets)
1053 int retval = ERROR_OK;
1054 struct target_list *head;
1055 bool halted = false;
1057 foreach_smp_target(head, smp_targets) {
1058 struct target *curr = head->target;
1059 if (curr->smp_halt_event_postponed) {
1060 halted = true;
1061 break;
1065 if (halted) {
1066 retval = cortex_m_smp_halt_all(smp_targets);
1068 int ret2 = cortex_m_smp_post_halt_poll(smp_targets);
1069 if (retval == ERROR_OK)
1070 retval = ret2; /* store the first error code ignore others */
1072 foreach_smp_target(head, smp_targets) {
1073 struct target *curr = head->target;
1074 if (!curr->smp_halt_event_postponed)
1075 continue;
1077 curr->smp_halt_event_postponed = false;
1078 if (curr->state == TARGET_HALTED) {
1079 LOG_TARGET_DEBUG(curr, "sending postponed target event 'halted'");
1080 target_call_event_callbacks(curr, TARGET_EVENT_HALTED);
1083 /* There is no need to set gdb_service->target
1084 * as hwthread_update_threads() selects an interesting thread
1085 * by its own
1088 return retval;
1091 static int cortex_m_poll(struct target *target)
1093 int retval = cortex_m_poll_one(target);
1095 if (target->smp) {
1096 struct target_list *last;
1097 last = list_last_entry(target->smp_targets, struct target_list, lh);
1098 if (target == last->target)
1099 /* After the last target in SMP group has been polled
1100 * check for postponed halted events and eventually halt and re-poll
1101 * other targets */
1102 cortex_m_poll_smp(target->smp_targets);
1104 return retval;
1107 static int cortex_m_halt_one(struct target *target)
1109 int retval;
1110 LOG_TARGET_DEBUG(target, "target->state: %s", target_state_name(target));
1112 if (!target_was_examined(target)) {
1113 LOG_TARGET_ERROR(target, "target non examined yet");
1114 return ERROR_TARGET_NOT_EXAMINED;
1117 if (target->state == TARGET_HALTED) {
1118 LOG_TARGET_DEBUG(target, "target was already halted");
1119 return ERROR_OK;
1122 if (target->state == TARGET_UNKNOWN)
1123 LOG_TARGET_WARNING(target, "target was in unknown state when halt was requested");
1125 /* Write to Debug Halting Control and Status Register */
1126 retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1128 /* Do this really early to minimize the window where the MASKINTS erratum
1129 * can pile up pending interrupts. */
1130 cortex_m_set_maskints_for_halt(target);
1132 target->debug_reason = DBG_REASON_DBGRQ;
1134 return retval;
1137 static int cortex_m_halt(struct target *target)
1139 if (target->smp)
1140 return cortex_m_smp_halt_all(target->smp_targets);
1141 else
1142 return cortex_m_halt_one(target);
1145 static int cortex_m_soft_reset_halt(struct target *target)
1147 struct cortex_m_common *cortex_m = target_to_cm(target);
1148 struct armv7m_common *armv7m = &cortex_m->armv7m;
1149 int retval, timeout = 0;
1151 /* on single cortex_m MCU soft_reset_halt should be avoided as same functionality
1152 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'.
1153 * As this reset only uses VC_CORERESET it would only ever reset the cortex_m
1154 * core, not the peripherals */
1155 LOG_TARGET_DEBUG(target, "soft_reset_halt is discouraged, please use 'reset halt' instead.");
1157 if (!cortex_m->vectreset_supported) {
1158 LOG_TARGET_ERROR(target, "VECTRESET is not supported on this Cortex-M core");
1159 return ERROR_FAIL;
1162 /* Set C_DEBUGEN */
1163 retval = cortex_m_write_debug_halt_mask(target, 0, C_STEP | C_MASKINTS);
1164 if (retval != ERROR_OK)
1165 return retval;
1167 /* Enter debug state on reset; restore DEMCR in endreset_event() */
1168 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR,
1169 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1170 if (retval != ERROR_OK)
1171 return retval;
1173 /* Request a core-only reset */
1174 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1175 AIRCR_VECTKEY | AIRCR_VECTRESET);
1176 if (retval != ERROR_OK)
1177 return retval;
1178 target->state = TARGET_RESET;
1180 /* registers are now invalid */
1181 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1183 while (timeout < 100) {
1184 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1185 if (retval == ERROR_OK) {
1186 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR,
1187 &cortex_m->nvic_dfsr);
1188 if (retval != ERROR_OK)
1189 return retval;
1190 if ((cortex_m->dcb_dhcsr & S_HALT)
1191 && (cortex_m->nvic_dfsr & DFSR_VCATCH)) {
1192 LOG_TARGET_DEBUG(target, "system reset-halted, DHCSR 0x%08" PRIx32 ", DFSR 0x%08" PRIx32,
1193 cortex_m->dcb_dhcsr, cortex_m->nvic_dfsr);
1194 cortex_m_poll(target);
1195 /* FIXME restore user's vector catch config */
1196 return ERROR_OK;
1197 } else {
1198 LOG_TARGET_DEBUG(target, "waiting for system reset-halt, "
1199 "DHCSR 0x%08" PRIx32 ", %d ms",
1200 cortex_m->dcb_dhcsr, timeout);
1203 timeout++;
1204 alive_sleep(1);
1207 return ERROR_OK;
1210 void cortex_m_enable_breakpoints(struct target *target)
1212 struct breakpoint *breakpoint = target->breakpoints;
1214 /* set any pending breakpoints */
1215 while (breakpoint) {
1216 if (!breakpoint->is_set)
1217 cortex_m_set_breakpoint(target, breakpoint);
1218 breakpoint = breakpoint->next;
1222 static int cortex_m_restore_one(struct target *target, bool current,
1223 target_addr_t *address, bool handle_breakpoints, bool debug_execution)
1225 struct armv7m_common *armv7m = target_to_armv7m(target);
1226 struct breakpoint *breakpoint = NULL;
1227 uint32_t resume_pc;
1228 struct reg *r;
1230 if (target->state != TARGET_HALTED) {
1231 LOG_TARGET_ERROR(target, "not halted");
1232 return ERROR_TARGET_NOT_HALTED;
1235 if (!debug_execution) {
1236 target_free_all_working_areas(target);
1237 cortex_m_enable_breakpoints(target);
1238 cortex_m_enable_watchpoints(target);
1241 if (debug_execution) {
1242 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
1244 /* Disable interrupts */
1245 /* We disable interrupts in the PRIMASK register instead of
1246 * masking with C_MASKINTS. This is probably the same issue
1247 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
1248 * in parallel with disabled interrupts can cause local faults
1249 * to not be taken.
1251 * This breaks non-debug (application) execution if not
1252 * called from armv7m_start_algorithm() which saves registers.
1254 buf_set_u32(r->value, 0, 1, 1);
1255 r->dirty = true;
1256 r->valid = true;
1258 /* Make sure we are in Thumb mode, set xPSR.T bit */
1259 /* armv7m_start_algorithm() initializes entire xPSR register.
1260 * This duplicity handles the case when cortex_m_resume()
1261 * is used with the debug_execution flag directly,
1262 * not called through armv7m_start_algorithm().
1264 r = armv7m->arm.cpsr;
1265 buf_set_u32(r->value, 24, 1, 1);
1266 r->dirty = true;
1267 r->valid = true;
1270 /* current = 1: continue on current pc, otherwise continue at <address> */
1271 r = armv7m->arm.pc;
1272 if (!current) {
1273 buf_set_u32(r->value, 0, 32, *address);
1274 r->dirty = true;
1275 r->valid = true;
1278 /* if we halted last time due to a bkpt instruction
1279 * then we have to manually step over it, otherwise
1280 * the core will break again */
1282 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
1283 && !debug_execution)
1284 armv7m_maybe_skip_bkpt_inst(target, NULL);
1286 resume_pc = buf_get_u32(r->value, 0, 32);
1287 if (current)
1288 *address = resume_pc;
1290 int retval = armv7m_restore_context(target);
1291 if (retval != ERROR_OK)
1292 return retval;
1294 /* the front-end may request us not to handle breakpoints */
1295 if (handle_breakpoints) {
1296 /* Single step past breakpoint at current address */
1297 breakpoint = breakpoint_find(target, resume_pc);
1298 if (breakpoint) {
1299 LOG_TARGET_DEBUG(target, "unset breakpoint at " TARGET_ADDR_FMT " (ID: %" PRIu32 ")",
1300 breakpoint->address,
1301 breakpoint->unique_id);
1302 retval = cortex_m_unset_breakpoint(target, breakpoint);
1303 if (retval == ERROR_OK)
1304 retval = cortex_m_single_step_core(target);
1305 int ret2 = cortex_m_set_breakpoint(target, breakpoint);
1306 if (retval != ERROR_OK)
1307 return retval;
1308 if (ret2 != ERROR_OK)
1309 return ret2;
1313 return ERROR_OK;
1316 static int cortex_m_restart_one(struct target *target, bool debug_execution)
1318 struct armv7m_common *armv7m = target_to_armv7m(target);
1320 /* Restart core */
1321 cortex_m_set_maskints_for_run(target);
1322 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1324 target->debug_reason = DBG_REASON_NOTHALTED;
1325 /* registers are now invalid */
1326 register_cache_invalidate(armv7m->arm.core_cache);
1328 if (!debug_execution) {
1329 target->state = TARGET_RUNNING;
1330 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1331 } else {
1332 target->state = TARGET_DEBUG_RUNNING;
1333 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1336 return ERROR_OK;
1339 static int cortex_m_restore_smp(struct target *target, bool handle_breakpoints)
1341 struct target_list *head;
1342 target_addr_t address;
1343 foreach_smp_target(head, target->smp_targets) {
1344 struct target *curr = head->target;
1345 /* skip calling target */
1346 if (curr == target)
1347 continue;
1348 if (!target_was_examined(curr))
1349 continue;
1350 /* skip running targets */
1351 if (curr->state == TARGET_RUNNING)
1352 continue;
1354 int retval = cortex_m_restore_one(curr, true, &address,
1355 handle_breakpoints, false);
1356 if (retval != ERROR_OK)
1357 return retval;
1359 retval = cortex_m_restart_one(curr, false);
1360 if (retval != ERROR_OK)
1361 return retval;
1363 LOG_TARGET_DEBUG(curr, "SMP resumed at " TARGET_ADDR_FMT, address);
1365 return ERROR_OK;
1368 static int cortex_m_resume(struct target *target, int current,
1369 target_addr_t address, int handle_breakpoints, int debug_execution)
1371 int retval = cortex_m_restore_one(target, !!current, &address, !!handle_breakpoints, !!debug_execution);
1372 if (retval != ERROR_OK) {
1373 LOG_TARGET_ERROR(target, "context restore failed, aborting resume");
1374 return retval;
1377 if (target->smp && !debug_execution) {
1378 retval = cortex_m_restore_smp(target, !!handle_breakpoints);
1379 if (retval != ERROR_OK)
1380 LOG_WARNING("resume of a SMP target failed, trying to resume current one");
1383 cortex_m_restart_one(target, !!debug_execution);
1384 if (retval != ERROR_OK) {
1385 LOG_TARGET_ERROR(target, "resume failed");
1386 return retval;
1389 LOG_TARGET_DEBUG(target, "%sresumed at " TARGET_ADDR_FMT,
1390 debug_execution ? "debug " : "", address);
1392 return ERROR_OK;
1395 /* int irqstepcount = 0; */
1396 static int cortex_m_step(struct target *target, int current,
1397 target_addr_t address, int handle_breakpoints)
1399 struct cortex_m_common *cortex_m = target_to_cm(target);
1400 struct armv7m_common *armv7m = &cortex_m->armv7m;
1401 struct breakpoint *breakpoint = NULL;
1402 struct reg *pc = armv7m->arm.pc;
1403 bool bkpt_inst_found = false;
1404 int retval;
1405 bool isr_timed_out = false;
1407 if (target->state != TARGET_HALTED) {
1408 LOG_TARGET_ERROR(target, "not halted");
1409 return ERROR_TARGET_NOT_HALTED;
1412 /* Just one of SMP cores will step. Set the gdb control
1413 * target to current one or gdb miss gdb-end event */
1414 if (target->smp && target->gdb_service)
1415 target->gdb_service->target = target;
1417 /* current = 1: continue on current pc, otherwise continue at <address> */
1418 if (!current) {
1419 buf_set_u32(pc->value, 0, 32, address);
1420 pc->dirty = true;
1421 pc->valid = true;
1424 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
1426 /* the front-end may request us not to handle breakpoints */
1427 if (handle_breakpoints) {
1428 breakpoint = breakpoint_find(target, pc_value);
1429 if (breakpoint)
1430 cortex_m_unset_breakpoint(target, breakpoint);
1433 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
1435 target->debug_reason = DBG_REASON_SINGLESTEP;
1437 armv7m_restore_context(target);
1439 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1441 /* if no bkpt instruction is found at pc then we can perform
1442 * a normal step, otherwise we have to manually step over the bkpt
1443 * instruction - as such simulate a step */
1444 if (bkpt_inst_found == false) {
1445 if (cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO) {
1446 /* Automatic ISR masking mode off: Just step over the next
1447 * instruction, with interrupts on or off as appropriate. */
1448 cortex_m_set_maskints_for_step(target);
1449 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1450 } else {
1451 /* Process interrupts during stepping in a way they don't interfere
1452 * debugging.
1454 * Principle:
1456 * Set a temporary break point at the current pc and let the core run
1457 * with interrupts enabled. Pending interrupts get served and we run
1458 * into the breakpoint again afterwards. Then we step over the next
1459 * instruction with interrupts disabled.
1461 * If the pending interrupts don't complete within time, we leave the
1462 * core running. This may happen if the interrupts trigger faster
1463 * than the core can process them or the handler doesn't return.
1465 * If no more breakpoints are available we simply do a step with
1466 * interrupts enabled.
1470 /* 2012-09-29 ph
1472 * If a break point is already set on the lower half word then a break point on
1473 * the upper half word will not break again when the core is restarted. So we
1474 * just step over the instruction with interrupts disabled.
1476 * The documentation has no information about this, it was found by observation
1477 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 doesn't seem to
1478 * suffer from this problem.
1480 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
1481 * address has it always cleared. The former is done to indicate thumb mode
1482 * to gdb.
1485 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
1486 LOG_TARGET_DEBUG(target, "Stepping over next instruction with interrupts disabled");
1487 cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
1488 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1489 /* Re-enable interrupts if appropriate */
1490 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1491 cortex_m_set_maskints_for_halt(target);
1492 } else {
1494 /* Set a temporary break point */
1495 if (breakpoint) {
1496 retval = cortex_m_set_breakpoint(target, breakpoint);
1497 } else {
1498 enum breakpoint_type type = BKPT_HARD;
1499 if (cortex_m->fp_rev == 0 && pc_value > 0x1FFFFFFF) {
1500 /* FPB rev.1 cannot handle such addr, try BKPT instr */
1501 type = BKPT_SOFT;
1503 retval = breakpoint_add(target, pc_value, 2, type);
1506 bool tmp_bp_set = (retval == ERROR_OK);
1508 /* No more breakpoints left, just do a step */
1509 if (!tmp_bp_set) {
1510 cortex_m_set_maskints_for_step(target);
1511 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1512 /* Re-enable interrupts if appropriate */
1513 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1514 cortex_m_set_maskints_for_halt(target);
1515 } else {
1516 /* Start the core */
1517 LOG_TARGET_DEBUG(target, "Starting core to serve pending interrupts");
1518 int64_t t_start = timeval_ms();
1519 cortex_m_set_maskints_for_run(target);
1520 cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
1522 /* Wait for pending handlers to complete or timeout */
1523 do {
1524 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1525 if (retval != ERROR_OK) {
1526 target->state = TARGET_UNKNOWN;
1527 return retval;
1529 isr_timed_out = ((timeval_ms() - t_start) > 500);
1530 } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out));
1532 /* only remove breakpoint if we created it */
1533 if (breakpoint)
1534 cortex_m_unset_breakpoint(target, breakpoint);
1535 else {
1536 /* Remove the temporary breakpoint */
1537 breakpoint_remove(target, pc_value);
1540 if (isr_timed_out) {
1541 LOG_TARGET_DEBUG(target, "Interrupt handlers didn't complete within time, "
1542 "leaving target running");
1543 } else {
1544 /* Step over next instruction with interrupts disabled */
1545 cortex_m_set_maskints_for_step(target);
1546 cortex_m_write_debug_halt_mask(target,
1547 C_HALT | C_MASKINTS,
1549 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1550 /* Re-enable interrupts if appropriate */
1551 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1552 cortex_m_set_maskints_for_halt(target);
1559 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1560 if (retval != ERROR_OK)
1561 return retval;
1563 /* registers are now invalid */
1564 register_cache_invalidate(armv7m->arm.core_cache);
1566 if (breakpoint)
1567 cortex_m_set_breakpoint(target, breakpoint);
1569 if (isr_timed_out) {
1570 /* Leave the core running. The user has to stop execution manually. */
1571 target->debug_reason = DBG_REASON_NOTHALTED;
1572 target->state = TARGET_RUNNING;
1573 return ERROR_OK;
1576 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1577 " nvic_icsr = 0x%" PRIx32,
1578 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1580 retval = cortex_m_debug_entry(target);
1581 if (retval != ERROR_OK)
1582 return retval;
1583 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1585 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1586 " nvic_icsr = 0x%" PRIx32,
1587 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1589 return ERROR_OK;
1592 static int cortex_m_assert_reset(struct target *target)
1594 struct cortex_m_common *cortex_m = target_to_cm(target);
1595 struct armv7m_common *armv7m = &cortex_m->armv7m;
1596 enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config;
1598 LOG_TARGET_DEBUG(target, "target->state: %s,%s examined",
1599 target_state_name(target),
1600 target_was_examined(target) ? "" : " not");
1602 enum reset_types jtag_reset_config = jtag_get_reset_config();
1604 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1605 /* allow scripts to override the reset event */
1607 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1608 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1609 target->state = TARGET_RESET;
1611 return ERROR_OK;
1614 /* some cores support connecting while srst is asserted
1615 * use that mode if it has been configured */
1617 bool srst_asserted = false;
1619 if ((jtag_reset_config & RESET_HAS_SRST) &&
1620 ((jtag_reset_config & RESET_SRST_NO_GATING)
1621 || (!armv7m->debug_ap && !target->defer_examine))) {
1622 /* If we have no debug_ap, asserting SRST is the only thing
1623 * we can do now */
1624 adapter_assert_reset();
1625 srst_asserted = true;
1628 /* TODO: replace the hack calling target_examine_one()
1629 * as soon as a better reset framework is available */
1630 if (!target_was_examined(target) && !target->defer_examine
1631 && srst_asserted && (jtag_reset_config & RESET_SRST_NO_GATING)) {
1632 LOG_TARGET_DEBUG(target, "Trying to re-examine under reset");
1633 target_examine_one(target);
1636 /* We need at least debug_ap to go further.
1637 * Inform user and bail out if we don't have one. */
1638 if (!armv7m->debug_ap) {
1639 if (srst_asserted) {
1640 if (target->reset_halt)
1641 LOG_TARGET_ERROR(target, "Debug AP not available, will not halt after reset!");
1643 /* Do not propagate error: reset was asserted, proceed to deassert! */
1644 target->state = TARGET_RESET;
1645 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1646 return ERROR_OK;
1648 } else {
1649 LOG_TARGET_ERROR(target, "Debug AP not available, reset NOT asserted!");
1650 return ERROR_FAIL;
1654 /* Enable debug requests */
1655 int retval = cortex_m_read_dhcsr_atomic_sticky(target);
1657 /* Store important errors instead of failing and proceed to reset assert */
1659 if (retval != ERROR_OK || !(cortex_m->dcb_dhcsr & C_DEBUGEN))
1660 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
1662 /* If the processor is sleeping in a WFI or WFE instruction, the
1663 * C_HALT bit must be asserted to regain control */
1664 if (retval == ERROR_OK && (cortex_m->dcb_dhcsr & S_SLEEP))
1665 retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1667 mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
1668 /* Ignore less important errors */
1670 if (!target->reset_halt) {
1671 /* Set/Clear C_MASKINTS in a separate operation */
1672 cortex_m_set_maskints_for_run(target);
1674 /* clear any debug flags before resuming */
1675 cortex_m_clear_halt(target);
1677 /* clear C_HALT in dhcsr reg */
1678 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1679 } else {
1680 /* Halt in debug on reset; endreset_event() restores DEMCR.
1682 * REVISIT catching BUSERR presumably helps to defend against
1683 * bad vector table entries. Should this include MMERR or
1684 * other flags too?
1686 int retval2;
1687 retval2 = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DEMCR,
1688 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1689 if (retval != ERROR_OK || retval2 != ERROR_OK)
1690 LOG_TARGET_INFO(target, "AP write error, reset will not halt");
1693 if (jtag_reset_config & RESET_HAS_SRST) {
1694 /* default to asserting srst */
1695 if (!srst_asserted)
1696 adapter_assert_reset();
1698 /* srst is asserted, ignore AP access errors */
1699 retval = ERROR_OK;
1700 } else {
1701 /* Use a standard Cortex-M software reset mechanism.
1702 * We default to using VECTRESET.
1703 * This has the disadvantage of not resetting the peripherals, so a
1704 * reset-init event handler is needed to perform any peripheral resets.
1706 if (!cortex_m->vectreset_supported
1707 && reset_config == CORTEX_M_RESET_VECTRESET) {
1708 reset_config = CORTEX_M_RESET_SYSRESETREQ;
1709 LOG_TARGET_WARNING(target, "VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead.");
1710 LOG_TARGET_WARNING(target, "Set 'cortex_m reset_config sysresetreq'.");
1713 LOG_TARGET_DEBUG(target, "Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ)
1714 ? "SYSRESETREQ" : "VECTRESET");
1716 if (reset_config == CORTEX_M_RESET_VECTRESET) {
1717 LOG_TARGET_WARNING(target, "Only resetting the Cortex-M core, use a reset-init event "
1718 "handler to reset any peripherals or configure hardware srst support.");
1721 int retval3;
1722 retval3 = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1723 AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ)
1724 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1725 if (retval3 != ERROR_OK)
1726 LOG_TARGET_DEBUG(target, "Ignoring AP write error right after reset");
1728 retval3 = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1729 if (retval3 != ERROR_OK) {
1730 LOG_TARGET_ERROR(target, "DP initialisation failed");
1731 /* The error return value must not be propagated in this case.
1732 * SYSRESETREQ or VECTRESET have been possibly triggered
1733 * so reset processing should continue */
1734 } else {
1735 /* I do not know why this is necessary, but it
1736 * fixes strange effects (step/resume cause NMI
1737 * after reset) on LM3S6918 -- Michael Schwingen
1739 uint32_t tmp;
1740 mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, &tmp);
1744 target->state = TARGET_RESET;
1745 jtag_sleep(50000);
1747 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1749 return retval;
1752 static int cortex_m_deassert_reset(struct target *target)
1754 struct armv7m_common *armv7m = &target_to_cm(target)->armv7m;
1756 LOG_TARGET_DEBUG(target, "target->state: %s,%s examined",
1757 target_state_name(target),
1758 target_was_examined(target) ? "" : " not");
1760 /* deassert reset lines */
1761 adapter_deassert_reset();
1763 enum reset_types jtag_reset_config = jtag_get_reset_config();
1765 if ((jtag_reset_config & RESET_HAS_SRST) &&
1766 !(jtag_reset_config & RESET_SRST_NO_GATING) &&
1767 armv7m->debug_ap) {
1769 int retval = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1770 if (retval != ERROR_OK) {
1771 LOG_TARGET_ERROR(target, "DP initialisation failed");
1772 return retval;
1776 return ERROR_OK;
1779 int cortex_m_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1781 int retval;
1782 unsigned int fp_num = 0;
1783 struct cortex_m_common *cortex_m = target_to_cm(target);
1784 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1786 if (breakpoint->is_set) {
1787 LOG_TARGET_WARNING(target, "breakpoint (BPID: %" PRIu32 ") already set", breakpoint->unique_id);
1788 return ERROR_OK;
1791 if (breakpoint->type == BKPT_HARD) {
1792 uint32_t fpcr_value;
1793 while (comparator_list[fp_num].used && (fp_num < cortex_m->fp_num_code))
1794 fp_num++;
1795 if (fp_num >= cortex_m->fp_num_code) {
1796 LOG_TARGET_ERROR(target, "Can not find free FPB Comparator!");
1797 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1799 breakpoint_hw_set(breakpoint, fp_num);
1800 fpcr_value = breakpoint->address | 1;
1801 if (cortex_m->fp_rev == 0) {
1802 if (breakpoint->address > 0x1FFFFFFF) {
1803 LOG_TARGET_ERROR(target, "Cortex-M Flash Patch Breakpoint rev.1 "
1804 "cannot handle HW breakpoint above address 0x1FFFFFFE");
1805 return ERROR_FAIL;
1807 uint32_t hilo;
1808 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1809 fpcr_value = (fpcr_value & 0x1FFFFFFC) | hilo | 1;
1810 } else if (cortex_m->fp_rev > 1) {
1811 LOG_TARGET_ERROR(target, "Unhandled Cortex-M Flash Patch Breakpoint architecture revision");
1812 return ERROR_FAIL;
1814 comparator_list[fp_num].used = true;
1815 comparator_list[fp_num].fpcr_value = fpcr_value;
1816 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1817 comparator_list[fp_num].fpcr_value);
1818 LOG_TARGET_DEBUG(target, "fpc_num %i fpcr_value 0x%" PRIx32 "",
1819 fp_num,
1820 comparator_list[fp_num].fpcr_value);
1821 if (!cortex_m->fpb_enabled) {
1822 LOG_TARGET_DEBUG(target, "FPB wasn't enabled, do it now");
1823 retval = cortex_m_enable_fpb(target);
1824 if (retval != ERROR_OK) {
1825 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
1826 return retval;
1829 cortex_m->fpb_enabled = true;
1831 } else if (breakpoint->type == BKPT_SOFT) {
1832 uint8_t code[4];
1834 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1835 * semihosting; don't use that. Otherwise the BKPT
1836 * parameter is arbitrary.
1838 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1839 retval = target_read_memory(target,
1840 breakpoint->address & 0xFFFFFFFE,
1841 breakpoint->length, 1,
1842 breakpoint->orig_instr);
1843 if (retval != ERROR_OK)
1844 return retval;
1845 retval = target_write_memory(target,
1846 breakpoint->address & 0xFFFFFFFE,
1847 breakpoint->length, 1,
1848 code);
1849 if (retval != ERROR_OK)
1850 return retval;
1851 breakpoint->is_set = true;
1854 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1855 breakpoint->unique_id,
1856 (int)(breakpoint->type),
1857 breakpoint->address,
1858 breakpoint->length,
1859 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1861 return ERROR_OK;
1864 int cortex_m_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1866 int retval;
1867 struct cortex_m_common *cortex_m = target_to_cm(target);
1868 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1870 if (!breakpoint->is_set) {
1871 LOG_TARGET_WARNING(target, "breakpoint not set");
1872 return ERROR_OK;
1875 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1876 breakpoint->unique_id,
1877 (int)(breakpoint->type),
1878 breakpoint->address,
1879 breakpoint->length,
1880 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1882 if (breakpoint->type == BKPT_HARD) {
1883 unsigned int fp_num = breakpoint->number;
1884 if (fp_num >= cortex_m->fp_num_code) {
1885 LOG_TARGET_DEBUG(target, "Invalid FP Comparator number in breakpoint");
1886 return ERROR_OK;
1888 comparator_list[fp_num].used = false;
1889 comparator_list[fp_num].fpcr_value = 0;
1890 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1891 comparator_list[fp_num].fpcr_value);
1892 } else {
1893 /* restore original instruction (kept in target endianness) */
1894 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE,
1895 breakpoint->length, 1,
1896 breakpoint->orig_instr);
1897 if (retval != ERROR_OK)
1898 return retval;
1900 breakpoint->is_set = false;
1902 return ERROR_OK;
1905 int cortex_m_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1907 if (breakpoint->length == 3) {
1908 LOG_TARGET_DEBUG(target, "Using a two byte breakpoint for 32bit Thumb-2 request");
1909 breakpoint->length = 2;
1912 if ((breakpoint->length != 2)) {
1913 LOG_TARGET_INFO(target, "only breakpoints of two bytes length supported");
1914 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1917 return cortex_m_set_breakpoint(target, breakpoint);
1920 int cortex_m_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1922 if (!breakpoint->is_set)
1923 return ERROR_OK;
1925 return cortex_m_unset_breakpoint(target, breakpoint);
1928 static int cortex_m_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1930 unsigned int dwt_num = 0;
1931 struct cortex_m_common *cortex_m = target_to_cm(target);
1933 /* REVISIT Don't fully trust these "not used" records ... users
1934 * may set up breakpoints by hand, e.g. dual-address data value
1935 * watchpoint using comparator #1; comparator #0 matching cycle
1936 * count; send data trace info through ITM and TPIU; etc
1938 struct cortex_m_dwt_comparator *comparator;
1940 for (comparator = cortex_m->dwt_comparator_list;
1941 comparator->used && dwt_num < cortex_m->dwt_num_comp;
1942 comparator++, dwt_num++)
1943 continue;
1944 if (dwt_num >= cortex_m->dwt_num_comp) {
1945 LOG_TARGET_ERROR(target, "Can not find free DWT Comparator");
1946 return ERROR_FAIL;
1948 comparator->used = true;
1949 watchpoint_set(watchpoint, dwt_num);
1951 comparator->comp = watchpoint->address;
1952 target_write_u32(target, comparator->dwt_comparator_address + 0,
1953 comparator->comp);
1955 if ((cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M_V2_0
1956 && (cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M_V2_1) {
1957 uint32_t mask = 0, temp;
1959 /* watchpoint params were validated earlier */
1960 temp = watchpoint->length;
1961 while (temp) {
1962 temp >>= 1;
1963 mask++;
1965 mask--;
1967 comparator->mask = mask;
1968 target_write_u32(target, comparator->dwt_comparator_address + 4,
1969 comparator->mask);
1971 switch (watchpoint->rw) {
1972 case WPT_READ:
1973 comparator->function = 5;
1974 break;
1975 case WPT_WRITE:
1976 comparator->function = 6;
1977 break;
1978 case WPT_ACCESS:
1979 comparator->function = 7;
1980 break;
1982 } else {
1983 uint32_t data_size = watchpoint->length >> 1;
1984 comparator->mask = (watchpoint->length >> 1) | 1;
1986 switch (watchpoint->rw) {
1987 case WPT_ACCESS:
1988 comparator->function = 4;
1989 break;
1990 case WPT_WRITE:
1991 comparator->function = 5;
1992 break;
1993 case WPT_READ:
1994 comparator->function = 6;
1995 break;
1997 comparator->function = comparator->function | (1 << 4) |
1998 (data_size << 10);
2001 target_write_u32(target, comparator->dwt_comparator_address + 8,
2002 comparator->function);
2004 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
2005 watchpoint->unique_id, dwt_num,
2006 (unsigned) comparator->comp,
2007 (unsigned) comparator->mask,
2008 (unsigned) comparator->function);
2009 return ERROR_OK;
2012 static int cortex_m_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
2014 struct cortex_m_common *cortex_m = target_to_cm(target);
2015 struct cortex_m_dwt_comparator *comparator;
2017 if (!watchpoint->is_set) {
2018 LOG_TARGET_WARNING(target, "watchpoint (wpid: %d) not set",
2019 watchpoint->unique_id);
2020 return ERROR_OK;
2023 unsigned int dwt_num = watchpoint->number;
2025 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%u address: 0x%08x clear",
2026 watchpoint->unique_id, dwt_num,
2027 (unsigned) watchpoint->address);
2029 if (dwt_num >= cortex_m->dwt_num_comp) {
2030 LOG_TARGET_DEBUG(target, "Invalid DWT Comparator number in watchpoint");
2031 return ERROR_OK;
2034 comparator = cortex_m->dwt_comparator_list + dwt_num;
2035 comparator->used = false;
2036 comparator->function = 0;
2037 target_write_u32(target, comparator->dwt_comparator_address + 8,
2038 comparator->function);
2040 watchpoint->is_set = false;
2042 return ERROR_OK;
2045 int cortex_m_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
2047 struct cortex_m_common *cortex_m = target_to_cm(target);
2049 if (cortex_m->dwt_comp_available < 1) {
2050 LOG_TARGET_DEBUG(target, "no comparators?");
2051 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2054 /* REVISIT This DWT may well be able to watch for specific data
2055 * values. Requires comparator #1 to set DATAVMATCH and match
2056 * the data, and another comparator (DATAVADDR0) matching addr.
2058 * NOTE: hardware doesn't support data value masking, so we'll need
2059 * to check that mask is zero
2061 if (watchpoint->mask != WATCHPOINT_IGNORE_DATA_VALUE_MASK) {
2062 LOG_TARGET_DEBUG(target, "watchpoint value masks not supported");
2063 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2066 /* hardware allows address masks of up to 32K */
2067 unsigned mask;
2069 for (mask = 0; mask < 16; mask++) {
2070 if ((1u << mask) == watchpoint->length)
2071 break;
2073 if (mask == 16) {
2074 LOG_TARGET_DEBUG(target, "unsupported watchpoint length");
2075 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2077 if (watchpoint->address & ((1 << mask) - 1)) {
2078 LOG_TARGET_DEBUG(target, "watchpoint address is unaligned");
2079 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2082 cortex_m->dwt_comp_available--;
2083 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
2085 return ERROR_OK;
2088 int cortex_m_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2090 struct cortex_m_common *cortex_m = target_to_cm(target);
2092 /* REVISIT why check? DWT can be updated with core running ... */
2093 if (target->state != TARGET_HALTED) {
2094 LOG_TARGET_ERROR(target, "not halted");
2095 return ERROR_TARGET_NOT_HALTED;
2098 if (watchpoint->is_set)
2099 cortex_m_unset_watchpoint(target, watchpoint);
2101 cortex_m->dwt_comp_available++;
2102 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
2104 return ERROR_OK;
2107 static int cortex_m_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
2109 if (target->debug_reason != DBG_REASON_WATCHPOINT)
2110 return ERROR_FAIL;
2112 struct cortex_m_common *cortex_m = target_to_cm(target);
2114 for (struct watchpoint *wp = target->watchpoints; wp; wp = wp->next) {
2115 if (!wp->is_set)
2116 continue;
2118 unsigned int dwt_num = wp->number;
2119 struct cortex_m_dwt_comparator *comparator = cortex_m->dwt_comparator_list + dwt_num;
2121 uint32_t dwt_function;
2122 int retval = target_read_u32(target, comparator->dwt_comparator_address + 8, &dwt_function);
2123 if (retval != ERROR_OK)
2124 return ERROR_FAIL;
2126 /* check the MATCHED bit */
2127 if (dwt_function & BIT(24)) {
2128 *hit_watchpoint = wp;
2129 return ERROR_OK;
2133 return ERROR_FAIL;
2136 void cortex_m_enable_watchpoints(struct target *target)
2138 struct watchpoint *watchpoint = target->watchpoints;
2140 /* set any pending watchpoints */
2141 while (watchpoint) {
2142 if (!watchpoint->is_set)
2143 cortex_m_set_watchpoint(target, watchpoint);
2144 watchpoint = watchpoint->next;
2148 static int cortex_m_read_memory(struct target *target, target_addr_t address,
2149 uint32_t size, uint32_t count, uint8_t *buffer)
2151 struct armv7m_common *armv7m = target_to_armv7m(target);
2153 if (armv7m->arm.arch == ARM_ARCH_V6M) {
2154 /* armv6m does not handle unaligned memory access */
2155 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2156 return ERROR_TARGET_UNALIGNED_ACCESS;
2159 return mem_ap_read_buf(armv7m->debug_ap, buffer, size, count, address);
2162 static int cortex_m_write_memory(struct target *target, target_addr_t address,
2163 uint32_t size, uint32_t count, const uint8_t *buffer)
2165 struct armv7m_common *armv7m = target_to_armv7m(target);
2167 if (armv7m->arm.arch == ARM_ARCH_V6M) {
2168 /* armv6m does not handle unaligned memory access */
2169 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2170 return ERROR_TARGET_UNALIGNED_ACCESS;
2173 return mem_ap_write_buf(armv7m->debug_ap, buffer, size, count, address);
2176 static int cortex_m_init_target(struct command_context *cmd_ctx,
2177 struct target *target)
2179 armv7m_build_reg_cache(target);
2180 arm_semihosting_init(target);
2181 return ERROR_OK;
2184 void cortex_m_deinit_target(struct target *target)
2186 struct cortex_m_common *cortex_m = target_to_cm(target);
2187 struct armv7m_common *armv7m = target_to_armv7m(target);
2189 if (!armv7m->is_hla_target && armv7m->debug_ap)
2190 dap_put_ap(armv7m->debug_ap);
2192 free(cortex_m->fp_comparator_list);
2194 cortex_m_dwt_free(target);
2195 armv7m_free_reg_cache(target);
2197 free(target->private_config);
2198 free(cortex_m);
2201 int cortex_m_profiling(struct target *target, uint32_t *samples,
2202 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2204 struct timeval timeout, now;
2205 struct armv7m_common *armv7m = target_to_armv7m(target);
2206 uint32_t reg_value;
2207 int retval;
2209 retval = target_read_u32(target, DWT_PCSR, &reg_value);
2210 if (retval != ERROR_OK) {
2211 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2212 return retval;
2214 if (reg_value == 0) {
2215 LOG_TARGET_INFO(target, "PCSR sampling not supported on this processor.");
2216 return target_profiling_default(target, samples, max_num_samples, num_samples, seconds);
2219 gettimeofday(&timeout, NULL);
2220 timeval_add_time(&timeout, seconds, 0);
2222 LOG_TARGET_INFO(target, "Starting Cortex-M profiling. Sampling DWT_PCSR as fast as we can...");
2224 /* Make sure the target is running */
2225 target_poll(target);
2226 if (target->state == TARGET_HALTED)
2227 retval = target_resume(target, 1, 0, 0, 0);
2229 if (retval != ERROR_OK) {
2230 LOG_TARGET_ERROR(target, "Error while resuming target");
2231 return retval;
2234 uint32_t sample_count = 0;
2236 for (;;) {
2237 if (armv7m && armv7m->debug_ap) {
2238 uint32_t read_count = max_num_samples - sample_count;
2239 if (read_count > 1024)
2240 read_count = 1024;
2242 retval = mem_ap_read_buf_noincr(armv7m->debug_ap,
2243 (void *)&samples[sample_count],
2244 4, read_count, DWT_PCSR);
2245 sample_count += read_count;
2246 } else {
2247 target_read_u32(target, DWT_PCSR, &samples[sample_count++]);
2250 if (retval != ERROR_OK) {
2251 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2252 return retval;
2256 gettimeofday(&now, NULL);
2257 if (sample_count >= max_num_samples || timeval_compare(&now, &timeout) > 0) {
2258 LOG_TARGET_INFO(target, "Profiling completed. %" PRIu32 " samples.", sample_count);
2259 break;
2263 *num_samples = sample_count;
2264 return retval;
2268 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
2269 * on r/w if the core is not running, and clear on resume or reset ... or
2270 * at least, in a post_restore_context() method.
2273 struct dwt_reg_state {
2274 struct target *target;
2275 uint32_t addr;
2276 uint8_t value[4]; /* scratch/cache */
2279 static int cortex_m_dwt_get_reg(struct reg *reg)
2281 struct dwt_reg_state *state = reg->arch_info;
2283 uint32_t tmp;
2284 int retval = target_read_u32(state->target, state->addr, &tmp);
2285 if (retval != ERROR_OK)
2286 return retval;
2288 buf_set_u32(state->value, 0, 32, tmp);
2289 return ERROR_OK;
2292 static int cortex_m_dwt_set_reg(struct reg *reg, uint8_t *buf)
2294 struct dwt_reg_state *state = reg->arch_info;
2296 return target_write_u32(state->target, state->addr,
2297 buf_get_u32(buf, 0, reg->size));
2300 struct dwt_reg {
2301 uint32_t addr;
2302 const char *name;
2303 unsigned size;
2306 static const struct dwt_reg dwt_base_regs[] = {
2307 { DWT_CTRL, "dwt_ctrl", 32, },
2308 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
2309 * increments while the core is asleep.
2311 { DWT_CYCCNT, "dwt_cyccnt", 32, },
2312 /* plus some 8 bit counters, useful for profiling with TPIU */
2315 static const struct dwt_reg dwt_comp[] = {
2316 #define DWT_COMPARATOR(i) \
2317 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
2318 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
2319 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
2320 DWT_COMPARATOR(0),
2321 DWT_COMPARATOR(1),
2322 DWT_COMPARATOR(2),
2323 DWT_COMPARATOR(3),
2324 DWT_COMPARATOR(4),
2325 DWT_COMPARATOR(5),
2326 DWT_COMPARATOR(6),
2327 DWT_COMPARATOR(7),
2328 DWT_COMPARATOR(8),
2329 DWT_COMPARATOR(9),
2330 DWT_COMPARATOR(10),
2331 DWT_COMPARATOR(11),
2332 DWT_COMPARATOR(12),
2333 DWT_COMPARATOR(13),
2334 DWT_COMPARATOR(14),
2335 DWT_COMPARATOR(15),
2336 #undef DWT_COMPARATOR
2339 static const struct reg_arch_type dwt_reg_type = {
2340 .get = cortex_m_dwt_get_reg,
2341 .set = cortex_m_dwt_set_reg,
2344 static void cortex_m_dwt_addreg(struct target *t, struct reg *r, const struct dwt_reg *d)
2346 struct dwt_reg_state *state;
2348 state = calloc(1, sizeof(*state));
2349 if (!state)
2350 return;
2351 state->addr = d->addr;
2352 state->target = t;
2354 r->name = d->name;
2355 r->size = d->size;
2356 r->value = state->value;
2357 r->arch_info = state;
2358 r->type = &dwt_reg_type;
2359 r->exist = true;
2362 static void cortex_m_dwt_setup(struct cortex_m_common *cm, struct target *target)
2364 uint32_t dwtcr;
2365 struct reg_cache *cache;
2366 struct cortex_m_dwt_comparator *comparator;
2367 int reg;
2369 target_read_u32(target, DWT_CTRL, &dwtcr);
2370 LOG_TARGET_DEBUG(target, "DWT_CTRL: 0x%" PRIx32, dwtcr);
2371 if (!dwtcr) {
2372 LOG_TARGET_DEBUG(target, "no DWT");
2373 return;
2376 target_read_u32(target, DWT_DEVARCH, &cm->dwt_devarch);
2377 LOG_TARGET_DEBUG(target, "DWT_DEVARCH: 0x%" PRIx32, cm->dwt_devarch);
2379 cm->dwt_num_comp = (dwtcr >> 28) & 0xF;
2380 cm->dwt_comp_available = cm->dwt_num_comp;
2381 cm->dwt_comparator_list = calloc(cm->dwt_num_comp,
2382 sizeof(struct cortex_m_dwt_comparator));
2383 if (!cm->dwt_comparator_list) {
2384 fail0:
2385 cm->dwt_num_comp = 0;
2386 LOG_TARGET_ERROR(target, "out of mem");
2387 return;
2390 cache = calloc(1, sizeof(*cache));
2391 if (!cache) {
2392 fail1:
2393 free(cm->dwt_comparator_list);
2394 goto fail0;
2396 cache->name = "Cortex-M DWT registers";
2397 cache->num_regs = 2 + cm->dwt_num_comp * 3;
2398 cache->reg_list = calloc(cache->num_regs, sizeof(*cache->reg_list));
2399 if (!cache->reg_list) {
2400 free(cache);
2401 goto fail1;
2404 for (reg = 0; reg < 2; reg++)
2405 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2406 dwt_base_regs + reg);
2408 comparator = cm->dwt_comparator_list;
2409 for (unsigned int i = 0; i < cm->dwt_num_comp; i++, comparator++) {
2410 int j;
2412 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
2413 for (j = 0; j < 3; j++, reg++)
2414 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2415 dwt_comp + 3 * i + j);
2417 /* make sure we clear any watchpoints enabled on the target */
2418 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
2421 *register_get_last_cache_p(&target->reg_cache) = cache;
2422 cm->dwt_cache = cache;
2424 LOG_TARGET_DEBUG(target, "DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
2425 dwtcr, cm->dwt_num_comp,
2426 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
2428 /* REVISIT: if num_comp > 1, check whether comparator #1 can
2429 * implement single-address data value watchpoints ... so we
2430 * won't need to check it later, when asked to set one up.
2434 static void cortex_m_dwt_free(struct target *target)
2436 struct cortex_m_common *cm = target_to_cm(target);
2437 struct reg_cache *cache = cm->dwt_cache;
2439 free(cm->dwt_comparator_list);
2440 cm->dwt_comparator_list = NULL;
2441 cm->dwt_num_comp = 0;
2443 if (cache) {
2444 register_unlink_cache(&target->reg_cache, cache);
2446 if (cache->reg_list) {
2447 for (size_t i = 0; i < cache->num_regs; i++)
2448 free(cache->reg_list[i].arch_info);
2449 free(cache->reg_list);
2451 free(cache);
2453 cm->dwt_cache = NULL;
2456 static bool cortex_m_has_tz(struct target *target)
2458 struct armv7m_common *armv7m = target_to_armv7m(target);
2459 uint32_t dauthstatus;
2461 if (armv7m->arm.arch != ARM_ARCH_V8M)
2462 return false;
2464 int retval = target_read_u32(target, DAUTHSTATUS, &dauthstatus);
2465 if (retval != ERROR_OK) {
2466 LOG_WARNING("Error reading DAUTHSTATUS register");
2467 return false;
2469 return (dauthstatus & DAUTHSTATUS_SID_MASK) != 0;
2473 #define MVFR0 0xE000EF40
2474 #define MVFR0_SP_MASK 0x000000F0
2475 #define MVFR0_SP 0x00000020
2476 #define MVFR0_DP_MASK 0x00000F00
2477 #define MVFR0_DP 0x00000200
2479 #define MVFR1 0xE000EF44
2480 #define MVFR1_MVE_MASK 0x00000F00
2481 #define MVFR1_MVE_I 0x00000100
2482 #define MVFR1_MVE_F 0x00000200
2484 static int cortex_m_find_mem_ap(struct adiv5_dap *swjdp,
2485 struct adiv5_ap **debug_ap)
2487 if (dap_find_get_ap(swjdp, AP_TYPE_AHB3_AP, debug_ap) == ERROR_OK)
2488 return ERROR_OK;
2490 return dap_find_get_ap(swjdp, AP_TYPE_AHB5_AP, debug_ap);
2493 int cortex_m_examine(struct target *target)
2495 int retval;
2496 uint32_t cpuid, fpcr;
2497 struct cortex_m_common *cortex_m = target_to_cm(target);
2498 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
2499 struct armv7m_common *armv7m = target_to_armv7m(target);
2501 /* hla_target shares the examine handler but does not support
2502 * all its calls */
2503 if (!armv7m->is_hla_target) {
2504 if (!armv7m->debug_ap) {
2505 if (cortex_m->apsel == DP_APSEL_INVALID) {
2506 /* Search for the MEM-AP */
2507 retval = cortex_m_find_mem_ap(swjdp, &armv7m->debug_ap);
2508 if (retval != ERROR_OK) {
2509 LOG_TARGET_ERROR(target, "Could not find MEM-AP to control the core");
2510 return retval;
2512 } else {
2513 armv7m->debug_ap = dap_get_ap(swjdp, cortex_m->apsel);
2514 if (!armv7m->debug_ap) {
2515 LOG_ERROR("Cannot get AP");
2516 return ERROR_FAIL;
2521 armv7m->debug_ap->memaccess_tck = 8;
2523 retval = mem_ap_init(armv7m->debug_ap);
2524 if (retval != ERROR_OK)
2525 return retval;
2528 if (!target_was_examined(target)) {
2529 target_set_examined(target);
2531 /* Read from Device Identification Registers */
2532 retval = target_read_u32(target, CPUID, &cpuid);
2533 if (retval != ERROR_OK)
2534 return retval;
2536 /* Inspect implementor/part to look for recognized cores */
2537 unsigned int impl_part = cpuid & (ARM_CPUID_IMPLEMENTOR_MASK | ARM_CPUID_PARTNO_MASK);
2539 for (unsigned int n = 0; n < ARRAY_SIZE(cortex_m_parts); n++) {
2540 if (impl_part == cortex_m_parts[n].impl_part) {
2541 cortex_m->core_info = &cortex_m_parts[n];
2542 break;
2546 if (!cortex_m->core_info) {
2547 LOG_TARGET_ERROR(target, "Cortex-M CPUID: 0x%x is unrecognized", cpuid);
2548 return ERROR_FAIL;
2551 armv7m->arm.arch = cortex_m->core_info->arch;
2553 LOG_TARGET_INFO(target, "%s r%" PRId8 "p%" PRId8 " processor detected",
2554 cortex_m->core_info->name,
2555 (uint8_t)((cpuid >> 20) & 0xf),
2556 (uint8_t)((cpuid >> 0) & 0xf));
2558 cortex_m->maskints_erratum = false;
2559 if (impl_part == CORTEX_M7_PARTNO) {
2560 uint8_t rev, patch;
2561 rev = (cpuid >> 20) & 0xf;
2562 patch = (cpuid >> 0) & 0xf;
2563 if ((rev == 0) && (patch < 2)) {
2564 LOG_TARGET_WARNING(target, "Silicon bug: single stepping may enter pending exception handler!");
2565 cortex_m->maskints_erratum = true;
2568 LOG_TARGET_DEBUG(target, "cpuid: 0x%8.8" PRIx32 "", cpuid);
2570 if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV4) {
2571 uint32_t mvfr0;
2572 target_read_u32(target, MVFR0, &mvfr0);
2574 if ((mvfr0 & MVFR0_SP_MASK) == MVFR0_SP) {
2575 LOG_TARGET_DEBUG(target, "%s floating point feature FPv4_SP found",
2576 cortex_m->core_info->name);
2577 armv7m->fp_feature = FPV4_SP;
2579 } else if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV5) {
2580 uint32_t mvfr0, mvfr1;
2581 target_read_u32(target, MVFR0, &mvfr0);
2582 target_read_u32(target, MVFR1, &mvfr1);
2584 if ((mvfr0 & MVFR0_DP_MASK) == MVFR0_DP) {
2585 if ((mvfr1 & MVFR1_MVE_MASK) == MVFR1_MVE_F) {
2586 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_DP + MVE-F found",
2587 cortex_m->core_info->name);
2588 armv7m->fp_feature = FPV5_MVE_F;
2589 } else {
2590 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_DP found",
2591 cortex_m->core_info->name);
2592 armv7m->fp_feature = FPV5_DP;
2594 } else if ((mvfr0 & MVFR0_SP_MASK) == MVFR0_SP) {
2595 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_SP found",
2596 cortex_m->core_info->name);
2597 armv7m->fp_feature = FPV5_SP;
2598 } else if ((mvfr1 & MVFR1_MVE_MASK) == MVFR1_MVE_I) {
2599 LOG_TARGET_DEBUG(target, "%s floating point feature MVE-I found",
2600 cortex_m->core_info->name);
2601 armv7m->fp_feature = FPV5_MVE_I;
2605 /* VECTRESET is supported only on ARMv7-M cores */
2606 cortex_m->vectreset_supported = armv7m->arm.arch == ARM_ARCH_V7M;
2608 /* Check for FPU, otherwise mark FPU register as non-existent */
2609 if (armv7m->fp_feature == FP_NONE)
2610 for (size_t idx = ARMV7M_FPU_FIRST_REG; idx <= ARMV7M_FPU_LAST_REG; idx++)
2611 armv7m->arm.core_cache->reg_list[idx].exist = false;
2613 if (!cortex_m_has_tz(target))
2614 for (size_t idx = ARMV8M_FIRST_REG; idx <= ARMV8M_LAST_REG; idx++)
2615 armv7m->arm.core_cache->reg_list[idx].exist = false;
2617 if (!armv7m->is_hla_target) {
2618 if (cortex_m->core_info->flags & CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K)
2619 /* Cortex-M3/M4 have 4096 bytes autoincrement range,
2620 * s. ARM IHI 0031C: MEM-AP 7.2.2 */
2621 armv7m->debug_ap->tar_autoincr_block = (1 << 12);
2624 retval = target_read_u32(target, DCB_DHCSR, &cortex_m->dcb_dhcsr);
2625 if (retval != ERROR_OK)
2626 return retval;
2628 /* Don't cumulate sticky S_RESET_ST at the very first read of DHCSR
2629 * as S_RESET_ST may indicate a reset that happened long time ago
2630 * (most probably the power-on reset before OpenOCD was started).
2631 * As we are just initializing the debug system we do not need
2632 * to call cortex_m_endreset_event() in the following poll.
2634 if (!cortex_m->dcb_dhcsr_sticky_is_recent) {
2635 cortex_m->dcb_dhcsr_sticky_is_recent = true;
2636 if (cortex_m->dcb_dhcsr & S_RESET_ST) {
2637 LOG_TARGET_DEBUG(target, "reset happened some time ago, ignore");
2638 cortex_m->dcb_dhcsr &= ~S_RESET_ST;
2641 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
2643 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
2644 /* Enable debug requests */
2645 uint32_t dhcsr = (cortex_m->dcb_dhcsr | C_DEBUGEN) & ~(C_HALT | C_STEP | C_MASKINTS);
2647 retval = target_write_u32(target, DCB_DHCSR, DBGKEY | (dhcsr & 0x0000FFFFUL));
2648 if (retval != ERROR_OK)
2649 return retval;
2650 cortex_m->dcb_dhcsr = dhcsr;
2653 /* Configure trace modules */
2654 retval = target_write_u32(target, DCB_DEMCR, TRCENA | armv7m->demcr);
2655 if (retval != ERROR_OK)
2656 return retval;
2658 if (armv7m->trace_config.itm_deferred_config)
2659 armv7m_trace_itm_config(target);
2661 /* NOTE: FPB and DWT are both optional. */
2663 /* Setup FPB */
2664 target_read_u32(target, FP_CTRL, &fpcr);
2665 /* bits [14:12] and [7:4] */
2666 cortex_m->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF);
2667 cortex_m->fp_num_lit = (fpcr >> 8) & 0xF;
2668 /* Detect flash patch revision, see RM DDI 0403E.b page C1-817.
2669 Revision is zero base, fp_rev == 1 means Rev.2 ! */
2670 cortex_m->fp_rev = (fpcr >> 28) & 0xf;
2671 free(cortex_m->fp_comparator_list);
2672 cortex_m->fp_comparator_list = calloc(
2673 cortex_m->fp_num_code + cortex_m->fp_num_lit,
2674 sizeof(struct cortex_m_fp_comparator));
2675 cortex_m->fpb_enabled = fpcr & 1;
2676 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
2677 cortex_m->fp_comparator_list[i].type =
2678 (i < cortex_m->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
2679 cortex_m->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
2681 /* make sure we clear any breakpoints enabled on the target */
2682 target_write_u32(target, cortex_m->fp_comparator_list[i].fpcr_address, 0);
2684 LOG_TARGET_DEBUG(target, "FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
2685 fpcr,
2686 cortex_m->fp_num_code,
2687 cortex_m->fp_num_lit);
2689 /* Setup DWT */
2690 cortex_m_dwt_free(target);
2691 cortex_m_dwt_setup(cortex_m, target);
2693 /* These hardware breakpoints only work for code in flash! */
2694 LOG_TARGET_INFO(target, "target has %d breakpoints, %d watchpoints",
2695 cortex_m->fp_num_code,
2696 cortex_m->dwt_num_comp);
2699 return ERROR_OK;
2702 static int cortex_m_dcc_read(struct target *target, uint8_t *value, uint8_t *ctrl)
2704 struct armv7m_common *armv7m = target_to_armv7m(target);
2705 uint16_t dcrdr;
2706 uint8_t buf[2];
2707 int retval;
2709 retval = mem_ap_read_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2710 if (retval != ERROR_OK)
2711 return retval;
2713 dcrdr = target_buffer_get_u16(target, buf);
2714 *ctrl = (uint8_t)dcrdr;
2715 *value = (uint8_t)(dcrdr >> 8);
2717 LOG_TARGET_DEBUG(target, "data 0x%x ctrl 0x%x", *value, *ctrl);
2719 /* write ack back to software dcc register
2720 * signify we have read data */
2721 if (dcrdr & (1 << 0)) {
2722 target_buffer_set_u16(target, buf, 0);
2723 retval = mem_ap_write_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2724 if (retval != ERROR_OK)
2725 return retval;
2728 return ERROR_OK;
2731 static int cortex_m_target_request_data(struct target *target,
2732 uint32_t size, uint8_t *buffer)
2734 uint8_t data;
2735 uint8_t ctrl;
2736 uint32_t i;
2738 for (i = 0; i < (size * 4); i++) {
2739 int retval = cortex_m_dcc_read(target, &data, &ctrl);
2740 if (retval != ERROR_OK)
2741 return retval;
2742 buffer[i] = data;
2745 return ERROR_OK;
2748 static int cortex_m_handle_target_request(void *priv)
2750 struct target *target = priv;
2751 if (!target_was_examined(target))
2752 return ERROR_OK;
2754 if (!target->dbg_msg_enabled)
2755 return ERROR_OK;
2757 if (target->state == TARGET_RUNNING) {
2758 uint8_t data;
2759 uint8_t ctrl;
2760 int retval;
2762 retval = cortex_m_dcc_read(target, &data, &ctrl);
2763 if (retval != ERROR_OK)
2764 return retval;
2766 /* check if we have data */
2767 if (ctrl & (1 << 0)) {
2768 uint32_t request;
2770 /* we assume target is quick enough */
2771 request = data;
2772 for (int i = 1; i <= 3; i++) {
2773 retval = cortex_m_dcc_read(target, &data, &ctrl);
2774 if (retval != ERROR_OK)
2775 return retval;
2776 request |= ((uint32_t)data << (i * 8));
2778 target_request(target, request);
2782 return ERROR_OK;
2785 static int cortex_m_init_arch_info(struct target *target,
2786 struct cortex_m_common *cortex_m, struct adiv5_dap *dap)
2788 struct armv7m_common *armv7m = &cortex_m->armv7m;
2790 armv7m_init_arch_info(target, armv7m);
2792 /* default reset mode is to use srst if fitted
2793 * if not it will use CORTEX_M_RESET_VECTRESET */
2794 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2796 armv7m->arm.dap = dap;
2798 /* register arch-specific functions */
2799 armv7m->examine_debug_reason = cortex_m_examine_debug_reason;
2801 armv7m->post_debug_entry = NULL;
2803 armv7m->pre_restore_context = NULL;
2805 armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32;
2806 armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32;
2808 target_register_timer_callback(cortex_m_handle_target_request, 1,
2809 TARGET_TIMER_TYPE_PERIODIC, target);
2811 return ERROR_OK;
2814 static int cortex_m_target_create(struct target *target, Jim_Interp *interp)
2816 struct adiv5_private_config *pc;
2818 pc = (struct adiv5_private_config *)target->private_config;
2819 if (adiv5_verify_config(pc) != ERROR_OK)
2820 return ERROR_FAIL;
2822 struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common));
2823 if (!cortex_m) {
2824 LOG_TARGET_ERROR(target, "No memory creating target");
2825 return ERROR_FAIL;
2828 cortex_m->common_magic = CORTEX_M_COMMON_MAGIC;
2829 cortex_m->apsel = pc->ap_num;
2831 cortex_m_init_arch_info(target, cortex_m, pc->dap);
2833 return ERROR_OK;
2836 /*--------------------------------------------------------------------------*/
2838 static int cortex_m_verify_pointer(struct command_invocation *cmd,
2839 struct cortex_m_common *cm)
2841 if (!is_cortex_m_with_dap_access(cm)) {
2842 command_print(cmd, "target is not a Cortex-M");
2843 return ERROR_TARGET_INVALID;
2845 return ERROR_OK;
2849 * Only stuff below this line should need to verify that its target
2850 * is a Cortex-M with available DAP access (not a HLA adapter).
2853 COMMAND_HANDLER(handle_cortex_m_vector_catch_command)
2855 struct target *target = get_current_target(CMD_CTX);
2856 struct cortex_m_common *cortex_m = target_to_cm(target);
2857 struct armv7m_common *armv7m = &cortex_m->armv7m;
2858 uint32_t demcr = 0;
2859 int retval;
2861 static const struct {
2862 char name[10];
2863 unsigned mask;
2864 } vec_ids[] = {
2865 { "hard_err", VC_HARDERR, },
2866 { "int_err", VC_INTERR, },
2867 { "bus_err", VC_BUSERR, },
2868 { "state_err", VC_STATERR, },
2869 { "chk_err", VC_CHKERR, },
2870 { "nocp_err", VC_NOCPERR, },
2871 { "mm_err", VC_MMERR, },
2872 { "reset", VC_CORERESET, },
2875 retval = cortex_m_verify_pointer(CMD, cortex_m);
2876 if (retval != ERROR_OK)
2877 return retval;
2879 if (!target_was_examined(target)) {
2880 LOG_TARGET_ERROR(target, "Target not examined yet");
2881 return ERROR_FAIL;
2884 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2885 if (retval != ERROR_OK)
2886 return retval;
2888 if (CMD_ARGC > 0) {
2889 unsigned catch = 0;
2891 if (CMD_ARGC == 1) {
2892 if (strcmp(CMD_ARGV[0], "all") == 0) {
2893 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2894 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2895 | VC_MMERR | VC_CORERESET;
2896 goto write;
2897 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2898 goto write;
2900 while (CMD_ARGC-- > 0) {
2901 unsigned i;
2902 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2903 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2904 continue;
2905 catch |= vec_ids[i].mask;
2906 break;
2908 if (i == ARRAY_SIZE(vec_ids)) {
2909 LOG_TARGET_ERROR(target, "No Cortex-M vector '%s'", CMD_ARGV[CMD_ARGC]);
2910 return ERROR_COMMAND_SYNTAX_ERROR;
2913 write:
2914 /* For now, armv7m->demcr only stores vector catch flags. */
2915 armv7m->demcr = catch;
2917 demcr &= ~0xffff;
2918 demcr |= catch;
2920 /* write, but don't assume it stuck (why not??) */
2921 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, demcr);
2922 if (retval != ERROR_OK)
2923 return retval;
2924 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2925 if (retval != ERROR_OK)
2926 return retval;
2928 /* FIXME be sure to clear DEMCR on clean server shutdown.
2929 * Otherwise the vector catch hardware could fire when there's
2930 * no debugger hooked up, causing much confusion...
2934 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2935 command_print(CMD, "%9s: %s", vec_ids[i].name,
2936 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2939 return ERROR_OK;
2942 COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command)
2944 struct target *target = get_current_target(CMD_CTX);
2945 struct cortex_m_common *cortex_m = target_to_cm(target);
2946 int retval;
2948 static const struct nvp nvp_maskisr_modes[] = {
2949 { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO },
2950 { .name = "off", .value = CORTEX_M_ISRMASK_OFF },
2951 { .name = "on", .value = CORTEX_M_ISRMASK_ON },
2952 { .name = "steponly", .value = CORTEX_M_ISRMASK_STEPONLY },
2953 { .name = NULL, .value = -1 },
2955 const struct nvp *n;
2958 retval = cortex_m_verify_pointer(CMD, cortex_m);
2959 if (retval != ERROR_OK)
2960 return retval;
2962 if (target->state != TARGET_HALTED) {
2963 command_print(CMD, "Error: target must be stopped for \"%s\" command", CMD_NAME);
2964 return ERROR_TARGET_NOT_HALTED;
2967 if (CMD_ARGC > 0) {
2968 n = nvp_name2value(nvp_maskisr_modes, CMD_ARGV[0]);
2969 if (!n->name)
2970 return ERROR_COMMAND_SYNTAX_ERROR;
2971 cortex_m->isrmasking_mode = n->value;
2972 cortex_m_set_maskints_for_halt(target);
2975 n = nvp_value2name(nvp_maskisr_modes, cortex_m->isrmasking_mode);
2976 command_print(CMD, "cortex_m interrupt mask %s", n->name);
2978 return ERROR_OK;
2981 COMMAND_HANDLER(handle_cortex_m_reset_config_command)
2983 struct target *target = get_current_target(CMD_CTX);
2984 struct cortex_m_common *cortex_m = target_to_cm(target);
2985 int retval;
2986 char *reset_config;
2988 retval = cortex_m_verify_pointer(CMD, cortex_m);
2989 if (retval != ERROR_OK)
2990 return retval;
2992 if (CMD_ARGC > 0) {
2993 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2994 cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ;
2996 else if (strcmp(*CMD_ARGV, "vectreset") == 0) {
2997 if (target_was_examined(target)
2998 && !cortex_m->vectreset_supported)
2999 LOG_TARGET_WARNING(target, "VECTRESET is not supported on your Cortex-M core!");
3000 else
3001 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
3003 } else
3004 return ERROR_COMMAND_SYNTAX_ERROR;
3007 switch (cortex_m->soft_reset_config) {
3008 case CORTEX_M_RESET_SYSRESETREQ:
3009 reset_config = "sysresetreq";
3010 break;
3012 case CORTEX_M_RESET_VECTRESET:
3013 reset_config = "vectreset";
3014 break;
3016 default:
3017 reset_config = "unknown";
3018 break;
3021 command_print(CMD, "cortex_m reset_config %s", reset_config);
3023 return ERROR_OK;
3026 static const struct command_registration cortex_m_exec_command_handlers[] = {
3028 .name = "maskisr",
3029 .handler = handle_cortex_m_mask_interrupts_command,
3030 .mode = COMMAND_EXEC,
3031 .help = "mask cortex_m interrupts",
3032 .usage = "['auto'|'on'|'off'|'steponly']",
3035 .name = "vector_catch",
3036 .handler = handle_cortex_m_vector_catch_command,
3037 .mode = COMMAND_EXEC,
3038 .help = "configure hardware vectors to trigger debug entry",
3039 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
3042 .name = "reset_config",
3043 .handler = handle_cortex_m_reset_config_command,
3044 .mode = COMMAND_ANY,
3045 .help = "configure software reset handling",
3046 .usage = "['sysresetreq'|'vectreset']",
3049 .chain = smp_command_handlers,
3051 COMMAND_REGISTRATION_DONE
3053 static const struct command_registration cortex_m_command_handlers[] = {
3055 .chain = armv7m_command_handlers,
3058 .chain = armv7m_trace_command_handlers,
3060 /* START_DEPRECATED_TPIU */
3062 .chain = arm_tpiu_deprecated_command_handlers,
3064 /* END_DEPRECATED_TPIU */
3066 .name = "cortex_m",
3067 .mode = COMMAND_EXEC,
3068 .help = "Cortex-M command group",
3069 .usage = "",
3070 .chain = cortex_m_exec_command_handlers,
3073 .chain = rtt_target_command_handlers,
3075 COMMAND_REGISTRATION_DONE
3078 struct target_type cortexm_target = {
3079 .name = "cortex_m",
3081 .poll = cortex_m_poll,
3082 .arch_state = armv7m_arch_state,
3084 .target_request_data = cortex_m_target_request_data,
3086 .halt = cortex_m_halt,
3087 .resume = cortex_m_resume,
3088 .step = cortex_m_step,
3090 .assert_reset = cortex_m_assert_reset,
3091 .deassert_reset = cortex_m_deassert_reset,
3092 .soft_reset_halt = cortex_m_soft_reset_halt,
3094 .get_gdb_arch = arm_get_gdb_arch,
3095 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
3097 .read_memory = cortex_m_read_memory,
3098 .write_memory = cortex_m_write_memory,
3099 .checksum_memory = armv7m_checksum_memory,
3100 .blank_check_memory = armv7m_blank_check_memory,
3102 .run_algorithm = armv7m_run_algorithm,
3103 .start_algorithm = armv7m_start_algorithm,
3104 .wait_algorithm = armv7m_wait_algorithm,
3106 .add_breakpoint = cortex_m_add_breakpoint,
3107 .remove_breakpoint = cortex_m_remove_breakpoint,
3108 .add_watchpoint = cortex_m_add_watchpoint,
3109 .remove_watchpoint = cortex_m_remove_watchpoint,
3110 .hit_watchpoint = cortex_m_hit_watchpoint,
3112 .commands = cortex_m_command_handlers,
3113 .target_create = cortex_m_target_create,
3114 .target_jim_configure = adiv5_jim_configure,
3115 .init_target = cortex_m_init_target,
3116 .examine = cortex_m_examine,
3117 .deinit_target = cortex_m_deinit_target,
3119 .profiling = cortex_m_profiling,