target/cortex_m: supress historical reset detection
[openocd.git] / src / target / cortex_m.c
blobaeaeb182945dd6992be42552b153c08baf884f45
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2006 by Magnus Lundin *
8 * lundin@mlu.mine.nu *
9 * *
10 * Copyright (C) 2008 by Spencer Oliver *
11 * spen@spen-soft.co.uk *
12 * *
13 * *
14 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
15 * *
16 ***************************************************************************/
17 #ifdef HAVE_CONFIG_H
18 #include "config.h"
19 #endif
21 #include "jtag/interface.h"
22 #include "breakpoints.h"
23 #include "cortex_m.h"
24 #include "target_request.h"
25 #include "target_type.h"
26 #include "arm_adi_v5.h"
27 #include "arm_disassembler.h"
28 #include "register.h"
29 #include "arm_opcodes.h"
30 #include "arm_semihosting.h"
31 #include <helper/time_support.h>
32 #include <rtt/rtt.h>
34 /* NOTE: most of this should work fine for the Cortex-M1 and
35 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
36 * Some differences: M0/M1 doesn't have FPB remapping or the
37 * DWT tracing/profiling support. (So the cycle counter will
38 * not be usable; the other stuff isn't currently used here.)
40 * Although there are some workarounds for errata seen only in r0p0
41 * silicon, such old parts are hard to find and thus not much tested
42 * any longer.
45 /* Timeout for register r/w */
46 #define DHCSR_S_REGRDY_TIMEOUT (500)
48 /* Supported Cortex-M Cores */
49 static const struct cortex_m_part_info cortex_m_parts[] = {
51 .partno = CORTEX_M0_PARTNO,
52 .name = "Cortex-M0",
53 .arch = ARM_ARCH_V6M,
56 .partno = CORTEX_M0P_PARTNO,
57 .name = "Cortex-M0+",
58 .arch = ARM_ARCH_V6M,
61 .partno = CORTEX_M1_PARTNO,
62 .name = "Cortex-M1",
63 .arch = ARM_ARCH_V6M,
66 .partno = CORTEX_M3_PARTNO,
67 .name = "Cortex-M3",
68 .arch = ARM_ARCH_V7M,
69 .flags = CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
72 .partno = CORTEX_M4_PARTNO,
73 .name = "Cortex-M4",
74 .arch = ARM_ARCH_V7M,
75 .flags = CORTEX_M_F_HAS_FPV4 | CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
78 .partno = CORTEX_M7_PARTNO,
79 .name = "Cortex-M7",
80 .arch = ARM_ARCH_V7M,
81 .flags = CORTEX_M_F_HAS_FPV5,
84 .partno = CORTEX_M23_PARTNO,
85 .name = "Cortex-M23",
86 .arch = ARM_ARCH_V8M,
89 .partno = CORTEX_M33_PARTNO,
90 .name = "Cortex-M33",
91 .arch = ARM_ARCH_V8M,
92 .flags = CORTEX_M_F_HAS_FPV5,
95 .partno = CORTEX_M35P_PARTNO,
96 .name = "Cortex-M35P",
97 .arch = ARM_ARCH_V8M,
98 .flags = CORTEX_M_F_HAS_FPV5,
101 .partno = CORTEX_M55_PARTNO,
102 .name = "Cortex-M55",
103 .arch = ARM_ARCH_V8M,
104 .flags = CORTEX_M_F_HAS_FPV5,
108 /* forward declarations */
109 static int cortex_m_store_core_reg_u32(struct target *target,
110 uint32_t num, uint32_t value);
111 static void cortex_m_dwt_free(struct target *target);
113 /** DCB DHCSR register contains S_RETIRE_ST and S_RESET_ST bits cleared
114 * on a read. Call this helper function each time DHCSR is read
115 * to preserve S_RESET_ST state in case of a reset event was detected.
117 static inline void cortex_m_cumulate_dhcsr_sticky(struct cortex_m_common *cortex_m,
118 uint32_t dhcsr)
120 cortex_m->dcb_dhcsr_cumulated_sticky |= dhcsr;
123 /** Read DCB DHCSR register to cortex_m->dcb_dhcsr and cumulate
124 * sticky bits in cortex_m->dcb_dhcsr_cumulated_sticky
126 static int cortex_m_read_dhcsr_atomic_sticky(struct target *target)
128 struct cortex_m_common *cortex_m = target_to_cm(target);
129 struct armv7m_common *armv7m = target_to_armv7m(target);
131 int retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
132 &cortex_m->dcb_dhcsr);
133 if (retval != ERROR_OK)
134 return retval;
136 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
137 return ERROR_OK;
140 static int cortex_m_load_core_reg_u32(struct target *target,
141 uint32_t regsel, uint32_t *value)
143 struct cortex_m_common *cortex_m = target_to_cm(target);
144 struct armv7m_common *armv7m = target_to_armv7m(target);
145 int retval;
146 uint32_t dcrdr, tmp_value;
147 int64_t then;
149 /* because the DCB_DCRDR is used for the emulated dcc channel
150 * we have to save/restore the DCB_DCRDR when used */
151 if (target->dbg_msg_enabled) {
152 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
153 if (retval != ERROR_OK)
154 return retval;
157 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
158 if (retval != ERROR_OK)
159 return retval;
161 /* check if value from register is ready and pre-read it */
162 then = timeval_ms();
163 while (1) {
164 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR,
165 &cortex_m->dcb_dhcsr);
166 if (retval != ERROR_OK)
167 return retval;
168 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DCRDR,
169 &tmp_value);
170 if (retval != ERROR_OK)
171 return retval;
172 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
173 if (cortex_m->dcb_dhcsr & S_REGRDY)
174 break;
175 cortex_m->slow_register_read = true; /* Polling (still) needed. */
176 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
177 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
178 return ERROR_TIMEOUT_REACHED;
180 keep_alive();
183 *value = tmp_value;
185 if (target->dbg_msg_enabled) {
186 /* restore DCB_DCRDR - this needs to be in a separate
187 * transaction otherwise the emulated DCC channel breaks */
188 if (retval == ERROR_OK)
189 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
192 return retval;
195 static int cortex_m_slow_read_all_regs(struct target *target)
197 struct cortex_m_common *cortex_m = target_to_cm(target);
198 struct armv7m_common *armv7m = target_to_armv7m(target);
199 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
201 /* Opportunistically restore fast read, it'll revert to slow
202 * if any register needed polling in cortex_m_load_core_reg_u32(). */
203 cortex_m->slow_register_read = false;
205 for (unsigned int reg_id = 0; reg_id < num_regs; reg_id++) {
206 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
207 if (r->exist) {
208 int retval = armv7m->arm.read_core_reg(target, r, reg_id, ARM_MODE_ANY);
209 if (retval != ERROR_OK)
210 return retval;
214 if (!cortex_m->slow_register_read)
215 LOG_TARGET_DEBUG(target, "Switching back to fast register reads");
217 return ERROR_OK;
220 static int cortex_m_queue_reg_read(struct target *target, uint32_t regsel,
221 uint32_t *reg_value, uint32_t *dhcsr)
223 struct armv7m_common *armv7m = target_to_armv7m(target);
224 int retval;
226 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
227 if (retval != ERROR_OK)
228 return retval;
230 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR, dhcsr);
231 if (retval != ERROR_OK)
232 return retval;
234 return mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, reg_value);
237 static int cortex_m_fast_read_all_regs(struct target *target)
239 struct cortex_m_common *cortex_m = target_to_cm(target);
240 struct armv7m_common *armv7m = target_to_armv7m(target);
241 int retval;
242 uint32_t dcrdr;
244 /* because the DCB_DCRDR is used for the emulated dcc channel
245 * we have to save/restore the DCB_DCRDR when used */
246 if (target->dbg_msg_enabled) {
247 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
248 if (retval != ERROR_OK)
249 return retval;
252 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
253 const unsigned int n_r32 = ARMV7M_LAST_REG - ARMV7M_CORE_FIRST_REG + 1
254 + ARMV7M_FPU_LAST_REG - ARMV7M_FPU_FIRST_REG + 1;
255 /* we need one 32-bit word for each register except FP D0..D15, which
256 * need two words */
257 uint32_t r_vals[n_r32];
258 uint32_t dhcsr[n_r32];
260 unsigned int wi = 0; /* write index to r_vals and dhcsr arrays */
261 unsigned int reg_id; /* register index in the reg_list, ARMV7M_R0... */
262 for (reg_id = 0; reg_id < num_regs; reg_id++) {
263 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
264 if (!r->exist)
265 continue; /* skip non existent registers */
267 if (r->size <= 8) {
268 /* Any 8-bit or shorter register is unpacked from a 32-bit
269 * container register. Skip it now. */
270 continue;
273 uint32_t regsel = armv7m_map_id_to_regsel(reg_id);
274 retval = cortex_m_queue_reg_read(target, regsel, &r_vals[wi],
275 &dhcsr[wi]);
276 if (retval != ERROR_OK)
277 return retval;
278 wi++;
280 assert(r->size == 32 || r->size == 64);
281 if (r->size == 32)
282 continue; /* done with 32-bit register */
284 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
285 /* the odd part of FP register (S1, S3...) */
286 retval = cortex_m_queue_reg_read(target, regsel + 1, &r_vals[wi],
287 &dhcsr[wi]);
288 if (retval != ERROR_OK)
289 return retval;
290 wi++;
293 assert(wi <= n_r32);
295 retval = dap_run(armv7m->debug_ap->dap);
296 if (retval != ERROR_OK)
297 return retval;
299 if (target->dbg_msg_enabled) {
300 /* restore DCB_DCRDR - this needs to be in a separate
301 * transaction otherwise the emulated DCC channel breaks */
302 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
303 if (retval != ERROR_OK)
304 return retval;
307 bool not_ready = false;
308 for (unsigned int i = 0; i < wi; i++) {
309 if ((dhcsr[i] & S_REGRDY) == 0) {
310 not_ready = true;
311 LOG_TARGET_DEBUG(target, "Register %u was not ready during fast read", i);
313 cortex_m_cumulate_dhcsr_sticky(cortex_m, dhcsr[i]);
316 if (not_ready) {
317 /* Any register was not ready,
318 * fall back to slow read with S_REGRDY polling */
319 return ERROR_TIMEOUT_REACHED;
322 LOG_TARGET_DEBUG(target, "read %u 32-bit registers", wi);
324 unsigned int ri = 0; /* read index from r_vals array */
325 for (reg_id = 0; reg_id < num_regs; reg_id++) {
326 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
327 if (!r->exist)
328 continue; /* skip non existent registers */
330 r->dirty = false;
332 unsigned int reg32_id;
333 uint32_t offset;
334 if (armv7m_map_reg_packing(reg_id, &reg32_id, &offset)) {
335 /* Unpack a partial register from 32-bit container register */
336 struct reg *r32 = &armv7m->arm.core_cache->reg_list[reg32_id];
338 /* The container register ought to precede all regs unpacked
339 * from it in the reg_list. So the value should be ready
340 * to unpack */
341 assert(r32->valid);
342 buf_cpy(r32->value + offset, r->value, r->size);
344 } else {
345 assert(r->size == 32 || r->size == 64);
346 buf_set_u32(r->value, 0, 32, r_vals[ri++]);
348 if (r->size == 64) {
349 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
350 /* the odd part of FP register (S1, S3...) */
351 buf_set_u32(r->value + 4, 0, 32, r_vals[ri++]);
354 r->valid = true;
356 assert(ri == wi);
358 return retval;
361 static int cortex_m_store_core_reg_u32(struct target *target,
362 uint32_t regsel, uint32_t value)
364 struct cortex_m_common *cortex_m = target_to_cm(target);
365 struct armv7m_common *armv7m = target_to_armv7m(target);
366 int retval;
367 uint32_t dcrdr;
368 int64_t then;
370 /* because the DCB_DCRDR is used for the emulated dcc channel
371 * we have to save/restore the DCB_DCRDR when used */
372 if (target->dbg_msg_enabled) {
373 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
374 if (retval != ERROR_OK)
375 return retval;
378 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, value);
379 if (retval != ERROR_OK)
380 return retval;
382 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel | DCRSR_WNR);
383 if (retval != ERROR_OK)
384 return retval;
386 /* check if value is written into register */
387 then = timeval_ms();
388 while (1) {
389 retval = cortex_m_read_dhcsr_atomic_sticky(target);
390 if (retval != ERROR_OK)
391 return retval;
392 if (cortex_m->dcb_dhcsr & S_REGRDY)
393 break;
394 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
395 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
396 return ERROR_TIMEOUT_REACHED;
398 keep_alive();
401 if (target->dbg_msg_enabled) {
402 /* restore DCB_DCRDR - this needs to be in a separate
403 * transaction otherwise the emulated DCC channel breaks */
404 if (retval == ERROR_OK)
405 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
408 return retval;
411 static int cortex_m_write_debug_halt_mask(struct target *target,
412 uint32_t mask_on, uint32_t mask_off)
414 struct cortex_m_common *cortex_m = target_to_cm(target);
415 struct armv7m_common *armv7m = &cortex_m->armv7m;
417 /* mask off status bits */
418 cortex_m->dcb_dhcsr &= ~((0xFFFFul << 16) | mask_off);
419 /* create new register mask */
420 cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
422 return mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR, cortex_m->dcb_dhcsr);
425 static int cortex_m_set_maskints(struct target *target, bool mask)
427 struct cortex_m_common *cortex_m = target_to_cm(target);
428 if (!!(cortex_m->dcb_dhcsr & C_MASKINTS) != mask)
429 return cortex_m_write_debug_halt_mask(target, mask ? C_MASKINTS : 0, mask ? 0 : C_MASKINTS);
430 else
431 return ERROR_OK;
434 static int cortex_m_set_maskints_for_halt(struct target *target)
436 struct cortex_m_common *cortex_m = target_to_cm(target);
437 switch (cortex_m->isrmasking_mode) {
438 case CORTEX_M_ISRMASK_AUTO:
439 /* interrupts taken at resume, whether for step or run -> no mask */
440 return cortex_m_set_maskints(target, false);
442 case CORTEX_M_ISRMASK_OFF:
443 /* interrupts never masked */
444 return cortex_m_set_maskints(target, false);
446 case CORTEX_M_ISRMASK_ON:
447 /* interrupts always masked */
448 return cortex_m_set_maskints(target, true);
450 case CORTEX_M_ISRMASK_STEPONLY:
451 /* interrupts masked for single step only -> mask now if MASKINTS
452 * erratum, otherwise only mask before stepping */
453 return cortex_m_set_maskints(target, cortex_m->maskints_erratum);
455 return ERROR_OK;
458 static int cortex_m_set_maskints_for_run(struct target *target)
460 switch (target_to_cm(target)->isrmasking_mode) {
461 case CORTEX_M_ISRMASK_AUTO:
462 /* interrupts taken at resume, whether for step or run -> no mask */
463 return cortex_m_set_maskints(target, false);
465 case CORTEX_M_ISRMASK_OFF:
466 /* interrupts never masked */
467 return cortex_m_set_maskints(target, false);
469 case CORTEX_M_ISRMASK_ON:
470 /* interrupts always masked */
471 return cortex_m_set_maskints(target, true);
473 case CORTEX_M_ISRMASK_STEPONLY:
474 /* interrupts masked for single step only -> no mask */
475 return cortex_m_set_maskints(target, false);
477 return ERROR_OK;
480 static int cortex_m_set_maskints_for_step(struct target *target)
482 switch (target_to_cm(target)->isrmasking_mode) {
483 case CORTEX_M_ISRMASK_AUTO:
484 /* the auto-interrupt should already be done -> mask */
485 return cortex_m_set_maskints(target, true);
487 case CORTEX_M_ISRMASK_OFF:
488 /* interrupts never masked */
489 return cortex_m_set_maskints(target, false);
491 case CORTEX_M_ISRMASK_ON:
492 /* interrupts always masked */
493 return cortex_m_set_maskints(target, true);
495 case CORTEX_M_ISRMASK_STEPONLY:
496 /* interrupts masked for single step only -> mask */
497 return cortex_m_set_maskints(target, true);
499 return ERROR_OK;
502 static int cortex_m_clear_halt(struct target *target)
504 struct cortex_m_common *cortex_m = target_to_cm(target);
505 struct armv7m_common *armv7m = &cortex_m->armv7m;
506 int retval;
508 /* clear step if any */
509 cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP);
511 /* Read Debug Fault Status Register */
512 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR, &cortex_m->nvic_dfsr);
513 if (retval != ERROR_OK)
514 return retval;
516 /* Clear Debug Fault Status */
517 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_DFSR, cortex_m->nvic_dfsr);
518 if (retval != ERROR_OK)
519 return retval;
520 LOG_TARGET_DEBUG(target, "NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr);
522 return ERROR_OK;
525 static int cortex_m_single_step_core(struct target *target)
527 struct cortex_m_common *cortex_m = target_to_cm(target);
528 int retval;
530 /* Mask interrupts before clearing halt, if not done already. This avoids
531 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
532 * HALT can put the core into an unknown state.
534 if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) {
535 retval = cortex_m_write_debug_halt_mask(target, C_MASKINTS, 0);
536 if (retval != ERROR_OK)
537 return retval;
539 retval = cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
540 if (retval != ERROR_OK)
541 return retval;
542 LOG_TARGET_DEBUG(target, "single step");
544 /* restore dhcsr reg */
545 cortex_m_clear_halt(target);
547 return ERROR_OK;
550 static int cortex_m_enable_fpb(struct target *target)
552 int retval = target_write_u32(target, FP_CTRL, 3);
553 if (retval != ERROR_OK)
554 return retval;
556 /* check the fpb is actually enabled */
557 uint32_t fpctrl;
558 retval = target_read_u32(target, FP_CTRL, &fpctrl);
559 if (retval != ERROR_OK)
560 return retval;
562 if (fpctrl & 1)
563 return ERROR_OK;
565 return ERROR_FAIL;
568 static int cortex_m_endreset_event(struct target *target)
570 int retval;
571 uint32_t dcb_demcr;
572 struct cortex_m_common *cortex_m = target_to_cm(target);
573 struct armv7m_common *armv7m = &cortex_m->armv7m;
574 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
575 struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list;
576 struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list;
578 /* REVISIT The four debug monitor bits are currently ignored... */
579 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &dcb_demcr);
580 if (retval != ERROR_OK)
581 return retval;
582 LOG_TARGET_DEBUG(target, "DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
584 /* this register is used for emulated dcc channel */
585 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
586 if (retval != ERROR_OK)
587 return retval;
589 retval = cortex_m_read_dhcsr_atomic_sticky(target);
590 if (retval != ERROR_OK)
591 return retval;
593 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
594 /* Enable debug requests */
595 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
596 if (retval != ERROR_OK)
597 return retval;
600 /* Restore proper interrupt masking setting for running CPU. */
601 cortex_m_set_maskints_for_run(target);
603 /* Enable features controlled by ITM and DWT blocks, and catch only
604 * the vectors we were told to pay attention to.
606 * Target firmware is responsible for all fault handling policy
607 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
608 * or manual updates to the NVIC SHCSR and CCR registers.
610 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, TRCENA | armv7m->demcr);
611 if (retval != ERROR_OK)
612 return retval;
614 /* Paranoia: evidently some (early?) chips don't preserve all the
615 * debug state (including FPB, DWT, etc) across reset...
618 /* Enable FPB */
619 retval = cortex_m_enable_fpb(target);
620 if (retval != ERROR_OK) {
621 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
622 return retval;
625 cortex_m->fpb_enabled = true;
627 /* Restore FPB registers */
628 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
629 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
630 if (retval != ERROR_OK)
631 return retval;
634 /* Restore DWT registers */
635 for (unsigned int i = 0; i < cortex_m->dwt_num_comp; i++) {
636 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
637 dwt_list[i].comp);
638 if (retval != ERROR_OK)
639 return retval;
640 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
641 dwt_list[i].mask);
642 if (retval != ERROR_OK)
643 return retval;
644 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
645 dwt_list[i].function);
646 if (retval != ERROR_OK)
647 return retval;
649 retval = dap_run(swjdp);
650 if (retval != ERROR_OK)
651 return retval;
653 register_cache_invalidate(armv7m->arm.core_cache);
655 /* TODO: invalidate also working areas (needed in the case of detected reset).
656 * Doing so will require flash drivers to test if working area
657 * is still valid in all target algo calling loops.
660 /* make sure we have latest dhcsr flags */
661 retval = cortex_m_read_dhcsr_atomic_sticky(target);
662 if (retval != ERROR_OK)
663 return retval;
665 return retval;
668 static int cortex_m_examine_debug_reason(struct target *target)
670 struct cortex_m_common *cortex_m = target_to_cm(target);
672 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
673 * only check the debug reason if we don't know it already */
675 if ((target->debug_reason != DBG_REASON_DBGRQ)
676 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
677 if (cortex_m->nvic_dfsr & DFSR_BKPT) {
678 target->debug_reason = DBG_REASON_BREAKPOINT;
679 if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
680 target->debug_reason = DBG_REASON_WPTANDBKPT;
681 } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
682 target->debug_reason = DBG_REASON_WATCHPOINT;
683 else if (cortex_m->nvic_dfsr & DFSR_VCATCH)
684 target->debug_reason = DBG_REASON_BREAKPOINT;
685 else if (cortex_m->nvic_dfsr & DFSR_EXTERNAL)
686 target->debug_reason = DBG_REASON_DBGRQ;
687 else /* HALTED */
688 target->debug_reason = DBG_REASON_UNDEFINED;
691 return ERROR_OK;
694 static int cortex_m_examine_exception_reason(struct target *target)
696 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
697 struct armv7m_common *armv7m = target_to_armv7m(target);
698 struct adiv5_dap *swjdp = armv7m->arm.dap;
699 int retval;
701 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SHCSR, &shcsr);
702 if (retval != ERROR_OK)
703 return retval;
704 switch (armv7m->exception_number) {
705 case 2: /* NMI */
706 break;
707 case 3: /* Hard Fault */
708 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_HFSR, &except_sr);
709 if (retval != ERROR_OK)
710 return retval;
711 if (except_sr & 0x40000000) {
712 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &cfsr);
713 if (retval != ERROR_OK)
714 return retval;
716 break;
717 case 4: /* Memory Management */
718 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
719 if (retval != ERROR_OK)
720 return retval;
721 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_MMFAR, &except_ar);
722 if (retval != ERROR_OK)
723 return retval;
724 break;
725 case 5: /* Bus Fault */
726 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
727 if (retval != ERROR_OK)
728 return retval;
729 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_BFAR, &except_ar);
730 if (retval != ERROR_OK)
731 return retval;
732 break;
733 case 6: /* Usage Fault */
734 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
735 if (retval != ERROR_OK)
736 return retval;
737 break;
738 case 7: /* Secure Fault */
739 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFSR, &except_sr);
740 if (retval != ERROR_OK)
741 return retval;
742 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFAR, &except_ar);
743 if (retval != ERROR_OK)
744 return retval;
745 break;
746 case 11: /* SVCall */
747 break;
748 case 12: /* Debug Monitor */
749 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_DFSR, &except_sr);
750 if (retval != ERROR_OK)
751 return retval;
752 break;
753 case 14: /* PendSV */
754 break;
755 case 15: /* SysTick */
756 break;
757 default:
758 except_sr = 0;
759 break;
761 retval = dap_run(swjdp);
762 if (retval == ERROR_OK)
763 LOG_TARGET_DEBUG(target, "%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
764 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
765 armv7m_exception_string(armv7m->exception_number),
766 shcsr, except_sr, cfsr, except_ar);
767 return retval;
770 static int cortex_m_debug_entry(struct target *target)
772 uint32_t xPSR;
773 int retval;
774 struct cortex_m_common *cortex_m = target_to_cm(target);
775 struct armv7m_common *armv7m = &cortex_m->armv7m;
776 struct arm *arm = &armv7m->arm;
777 struct reg *r;
779 LOG_TARGET_DEBUG(target, " ");
781 /* Do this really early to minimize the window where the MASKINTS erratum
782 * can pile up pending interrupts. */
783 cortex_m_set_maskints_for_halt(target);
785 cortex_m_clear_halt(target);
787 retval = cortex_m_read_dhcsr_atomic_sticky(target);
788 if (retval != ERROR_OK)
789 return retval;
791 retval = armv7m->examine_debug_reason(target);
792 if (retval != ERROR_OK)
793 return retval;
795 /* examine PE security state */
796 bool secure_state = false;
797 if (armv7m->arm.arch == ARM_ARCH_V8M) {
798 uint32_t dscsr;
800 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DSCSR, &dscsr);
801 if (retval != ERROR_OK)
802 return retval;
804 secure_state = (dscsr & DSCSR_CDS) == DSCSR_CDS;
807 /* Load all registers to arm.core_cache */
808 if (!cortex_m->slow_register_read) {
809 retval = cortex_m_fast_read_all_regs(target);
810 if (retval == ERROR_TIMEOUT_REACHED) {
811 cortex_m->slow_register_read = true;
812 LOG_TARGET_DEBUG(target, "Switched to slow register read");
816 if (cortex_m->slow_register_read)
817 retval = cortex_m_slow_read_all_regs(target);
819 if (retval != ERROR_OK)
820 return retval;
822 r = arm->cpsr;
823 xPSR = buf_get_u32(r->value, 0, 32);
825 /* Are we in an exception handler */
826 if (xPSR & 0x1FF) {
827 armv7m->exception_number = (xPSR & 0x1FF);
829 arm->core_mode = ARM_MODE_HANDLER;
830 arm->map = armv7m_msp_reg_map;
831 } else {
832 unsigned control = buf_get_u32(arm->core_cache
833 ->reg_list[ARMV7M_CONTROL].value, 0, 3);
835 /* is this thread privileged? */
836 arm->core_mode = control & 1
837 ? ARM_MODE_USER_THREAD
838 : ARM_MODE_THREAD;
840 /* which stack is it using? */
841 if (control & 2)
842 arm->map = armv7m_psp_reg_map;
843 else
844 arm->map = armv7m_msp_reg_map;
846 armv7m->exception_number = 0;
849 if (armv7m->exception_number)
850 cortex_m_examine_exception_reason(target);
852 LOG_TARGET_DEBUG(target, "entered debug state in core mode: %s at PC 0x%" PRIx32
853 ", cpu in %s state, target->state: %s",
854 arm_mode_name(arm->core_mode),
855 buf_get_u32(arm->pc->value, 0, 32),
856 secure_state ? "Secure" : "Non-Secure",
857 target_state_name(target));
859 if (armv7m->post_debug_entry) {
860 retval = armv7m->post_debug_entry(target);
861 if (retval != ERROR_OK)
862 return retval;
865 return ERROR_OK;
868 static int cortex_m_poll(struct target *target)
870 int detected_failure = ERROR_OK;
871 int retval = ERROR_OK;
872 enum target_state prev_target_state = target->state;
873 struct cortex_m_common *cortex_m = target_to_cm(target);
874 struct armv7m_common *armv7m = &cortex_m->armv7m;
876 /* Check if debug_ap is available to prevent segmentation fault.
877 * If the re-examination after an error does not find a MEM-AP
878 * (e.g. the target stopped communicating), debug_ap pointer
879 * can suddenly become NULL.
881 if (!armv7m->debug_ap) {
882 target->state = TARGET_UNKNOWN;
883 return ERROR_TARGET_NOT_EXAMINED;
886 /* Read from Debug Halting Control and Status Register */
887 retval = cortex_m_read_dhcsr_atomic_sticky(target);
888 if (retval != ERROR_OK) {
889 target->state = TARGET_UNKNOWN;
890 return retval;
893 /* Recover from lockup. See ARMv7-M architecture spec,
894 * section B1.5.15 "Unrecoverable exception cases".
896 if (cortex_m->dcb_dhcsr & S_LOCKUP) {
897 LOG_TARGET_ERROR(target, "clearing lockup after double fault");
898 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
899 target->debug_reason = DBG_REASON_DBGRQ;
901 /* We have to execute the rest (the "finally" equivalent, but
902 * still throw this exception again).
904 detected_failure = ERROR_FAIL;
906 /* refresh status bits */
907 retval = cortex_m_read_dhcsr_atomic_sticky(target);
908 if (retval != ERROR_OK)
909 return retval;
912 if (cortex_m->dcb_dhcsr_cumulated_sticky & S_RESET_ST) {
913 cortex_m->dcb_dhcsr_cumulated_sticky &= ~S_RESET_ST;
914 if (target->state != TARGET_RESET) {
915 target->state = TARGET_RESET;
916 LOG_TARGET_INFO(target, "external reset detected");
918 return ERROR_OK;
921 if (target->state == TARGET_RESET) {
922 /* Cannot switch context while running so endreset is
923 * called with target->state == TARGET_RESET
925 LOG_TARGET_DEBUG(target, "Exit from reset with dcb_dhcsr 0x%" PRIx32,
926 cortex_m->dcb_dhcsr);
927 retval = cortex_m_endreset_event(target);
928 if (retval != ERROR_OK) {
929 target->state = TARGET_UNKNOWN;
930 return retval;
932 target->state = TARGET_RUNNING;
933 prev_target_state = TARGET_RUNNING;
936 if (cortex_m->dcb_dhcsr & S_HALT) {
937 target->state = TARGET_HALTED;
939 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
940 retval = cortex_m_debug_entry(target);
941 if (retval != ERROR_OK)
942 return retval;
944 if (arm_semihosting(target, &retval) != 0)
945 return retval;
947 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
949 if (prev_target_state == TARGET_DEBUG_RUNNING) {
950 retval = cortex_m_debug_entry(target);
951 if (retval != ERROR_OK)
952 return retval;
954 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
958 if (target->state == TARGET_UNKNOWN) {
959 /* Check if processor is retiring instructions or sleeping.
960 * Unlike S_RESET_ST here we test if the target *is* running now,
961 * not if it has been running (possibly in the past). Instructions are
962 * typically processed much faster than OpenOCD polls DHCSR so S_RETIRE_ST
963 * is read always 1. That's the reason not to use dcb_dhcsr_cumulated_sticky.
965 if (cortex_m->dcb_dhcsr & S_RETIRE_ST || cortex_m->dcb_dhcsr & S_SLEEP) {
966 target->state = TARGET_RUNNING;
967 retval = ERROR_OK;
971 /* Check that target is truly halted, since the target could be resumed externally */
972 if ((prev_target_state == TARGET_HALTED) && !(cortex_m->dcb_dhcsr & S_HALT)) {
973 /* registers are now invalid */
974 register_cache_invalidate(armv7m->arm.core_cache);
976 target->state = TARGET_RUNNING;
977 LOG_TARGET_WARNING(target, "external resume detected");
978 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
979 retval = ERROR_OK;
982 /* Did we detect a failure condition that we cleared? */
983 if (detected_failure != ERROR_OK)
984 retval = detected_failure;
985 return retval;
988 static int cortex_m_halt(struct target *target)
990 LOG_TARGET_DEBUG(target, "target->state: %s", target_state_name(target));
992 if (target->state == TARGET_HALTED) {
993 LOG_TARGET_DEBUG(target, "target was already halted");
994 return ERROR_OK;
997 if (target->state == TARGET_UNKNOWN)
998 LOG_TARGET_WARNING(target, "target was in unknown state when halt was requested");
1000 if (target->state == TARGET_RESET) {
1001 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
1002 LOG_TARGET_ERROR(target, "can't request a halt while in reset if nSRST pulls nTRST");
1003 return ERROR_TARGET_FAILURE;
1004 } else {
1005 /* we came here in a reset_halt or reset_init sequence
1006 * debug entry was already prepared in cortex_m3_assert_reset()
1008 target->debug_reason = DBG_REASON_DBGRQ;
1010 return ERROR_OK;
1014 /* Write to Debug Halting Control and Status Register */
1015 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1017 /* Do this really early to minimize the window where the MASKINTS erratum
1018 * can pile up pending interrupts. */
1019 cortex_m_set_maskints_for_halt(target);
1021 target->debug_reason = DBG_REASON_DBGRQ;
1023 return ERROR_OK;
1026 static int cortex_m_soft_reset_halt(struct target *target)
1028 struct cortex_m_common *cortex_m = target_to_cm(target);
1029 struct armv7m_common *armv7m = &cortex_m->armv7m;
1030 int retval, timeout = 0;
1032 /* on single cortex_m MCU soft_reset_halt should be avoided as same functionality
1033 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'.
1034 * As this reset only uses VC_CORERESET it would only ever reset the cortex_m
1035 * core, not the peripherals */
1036 LOG_TARGET_DEBUG(target, "soft_reset_halt is discouraged, please use 'reset halt' instead.");
1038 if (!cortex_m->vectreset_supported) {
1039 LOG_TARGET_ERROR(target, "VECTRESET is not supported on this Cortex-M core");
1040 return ERROR_FAIL;
1043 /* Set C_DEBUGEN */
1044 retval = cortex_m_write_debug_halt_mask(target, 0, C_STEP | C_MASKINTS);
1045 if (retval != ERROR_OK)
1046 return retval;
1048 /* Enter debug state on reset; restore DEMCR in endreset_event() */
1049 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR,
1050 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1051 if (retval != ERROR_OK)
1052 return retval;
1054 /* Request a core-only reset */
1055 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1056 AIRCR_VECTKEY | AIRCR_VECTRESET);
1057 if (retval != ERROR_OK)
1058 return retval;
1059 target->state = TARGET_RESET;
1061 /* registers are now invalid */
1062 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1064 while (timeout < 100) {
1065 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1066 if (retval == ERROR_OK) {
1067 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR,
1068 &cortex_m->nvic_dfsr);
1069 if (retval != ERROR_OK)
1070 return retval;
1071 if ((cortex_m->dcb_dhcsr & S_HALT)
1072 && (cortex_m->nvic_dfsr & DFSR_VCATCH)) {
1073 LOG_TARGET_DEBUG(target, "system reset-halted, DHCSR 0x%08" PRIx32 ", DFSR 0x%08" PRIx32,
1074 cortex_m->dcb_dhcsr, cortex_m->nvic_dfsr);
1075 cortex_m_poll(target);
1076 /* FIXME restore user's vector catch config */
1077 return ERROR_OK;
1078 } else {
1079 LOG_TARGET_DEBUG(target, "waiting for system reset-halt, "
1080 "DHCSR 0x%08" PRIx32 ", %d ms",
1081 cortex_m->dcb_dhcsr, timeout);
1084 timeout++;
1085 alive_sleep(1);
1088 return ERROR_OK;
1091 void cortex_m_enable_breakpoints(struct target *target)
1093 struct breakpoint *breakpoint = target->breakpoints;
1095 /* set any pending breakpoints */
1096 while (breakpoint) {
1097 if (!breakpoint->is_set)
1098 cortex_m_set_breakpoint(target, breakpoint);
1099 breakpoint = breakpoint->next;
1103 static int cortex_m_resume(struct target *target, int current,
1104 target_addr_t address, int handle_breakpoints, int debug_execution)
1106 struct armv7m_common *armv7m = target_to_armv7m(target);
1107 struct breakpoint *breakpoint = NULL;
1108 uint32_t resume_pc;
1109 struct reg *r;
1111 if (target->state != TARGET_HALTED) {
1112 LOG_TARGET_WARNING(target, "target not halted");
1113 return ERROR_TARGET_NOT_HALTED;
1116 if (!debug_execution) {
1117 target_free_all_working_areas(target);
1118 cortex_m_enable_breakpoints(target);
1119 cortex_m_enable_watchpoints(target);
1122 if (debug_execution) {
1123 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
1125 /* Disable interrupts */
1126 /* We disable interrupts in the PRIMASK register instead of
1127 * masking with C_MASKINTS. This is probably the same issue
1128 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
1129 * in parallel with disabled interrupts can cause local faults
1130 * to not be taken.
1132 * This breaks non-debug (application) execution if not
1133 * called from armv7m_start_algorithm() which saves registers.
1135 buf_set_u32(r->value, 0, 1, 1);
1136 r->dirty = true;
1137 r->valid = true;
1139 /* Make sure we are in Thumb mode, set xPSR.T bit */
1140 /* armv7m_start_algorithm() initializes entire xPSR register.
1141 * This duplicity handles the case when cortex_m_resume()
1142 * is used with the debug_execution flag directly,
1143 * not called through armv7m_start_algorithm().
1145 r = armv7m->arm.cpsr;
1146 buf_set_u32(r->value, 24, 1, 1);
1147 r->dirty = true;
1148 r->valid = true;
1151 /* current = 1: continue on current pc, otherwise continue at <address> */
1152 r = armv7m->arm.pc;
1153 if (!current) {
1154 buf_set_u32(r->value, 0, 32, address);
1155 r->dirty = true;
1156 r->valid = true;
1159 /* if we halted last time due to a bkpt instruction
1160 * then we have to manually step over it, otherwise
1161 * the core will break again */
1163 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
1164 && !debug_execution)
1165 armv7m_maybe_skip_bkpt_inst(target, NULL);
1167 resume_pc = buf_get_u32(r->value, 0, 32);
1169 armv7m_restore_context(target);
1171 /* the front-end may request us not to handle breakpoints */
1172 if (handle_breakpoints) {
1173 /* Single step past breakpoint at current address */
1174 breakpoint = breakpoint_find(target, resume_pc);
1175 if (breakpoint) {
1176 LOG_TARGET_DEBUG(target, "unset breakpoint at " TARGET_ADDR_FMT " (ID: %" PRIu32 ")",
1177 breakpoint->address,
1178 breakpoint->unique_id);
1179 cortex_m_unset_breakpoint(target, breakpoint);
1180 cortex_m_single_step_core(target);
1181 cortex_m_set_breakpoint(target, breakpoint);
1185 /* Restart core */
1186 cortex_m_set_maskints_for_run(target);
1187 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1189 target->debug_reason = DBG_REASON_NOTHALTED;
1191 /* registers are now invalid */
1192 register_cache_invalidate(armv7m->arm.core_cache);
1194 if (!debug_execution) {
1195 target->state = TARGET_RUNNING;
1196 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1197 LOG_TARGET_DEBUG(target, "target resumed at 0x%" PRIx32 "", resume_pc);
1198 } else {
1199 target->state = TARGET_DEBUG_RUNNING;
1200 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1201 LOG_TARGET_DEBUG(target, "target debug resumed at 0x%" PRIx32 "", resume_pc);
1204 return ERROR_OK;
1207 /* int irqstepcount = 0; */
1208 static int cortex_m_step(struct target *target, int current,
1209 target_addr_t address, int handle_breakpoints)
1211 struct cortex_m_common *cortex_m = target_to_cm(target);
1212 struct armv7m_common *armv7m = &cortex_m->armv7m;
1213 struct breakpoint *breakpoint = NULL;
1214 struct reg *pc = armv7m->arm.pc;
1215 bool bkpt_inst_found = false;
1216 int retval;
1217 bool isr_timed_out = false;
1219 if (target->state != TARGET_HALTED) {
1220 LOG_TARGET_WARNING(target, "target not halted");
1221 return ERROR_TARGET_NOT_HALTED;
1224 /* current = 1: continue on current pc, otherwise continue at <address> */
1225 if (!current) {
1226 buf_set_u32(pc->value, 0, 32, address);
1227 pc->dirty = true;
1228 pc->valid = true;
1231 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
1233 /* the front-end may request us not to handle breakpoints */
1234 if (handle_breakpoints) {
1235 breakpoint = breakpoint_find(target, pc_value);
1236 if (breakpoint)
1237 cortex_m_unset_breakpoint(target, breakpoint);
1240 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
1242 target->debug_reason = DBG_REASON_SINGLESTEP;
1244 armv7m_restore_context(target);
1246 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1248 /* if no bkpt instruction is found at pc then we can perform
1249 * a normal step, otherwise we have to manually step over the bkpt
1250 * instruction - as such simulate a step */
1251 if (bkpt_inst_found == false) {
1252 if (cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO) {
1253 /* Automatic ISR masking mode off: Just step over the next
1254 * instruction, with interrupts on or off as appropriate. */
1255 cortex_m_set_maskints_for_step(target);
1256 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1257 } else {
1258 /* Process interrupts during stepping in a way they don't interfere
1259 * debugging.
1261 * Principle:
1263 * Set a temporary break point at the current pc and let the core run
1264 * with interrupts enabled. Pending interrupts get served and we run
1265 * into the breakpoint again afterwards. Then we step over the next
1266 * instruction with interrupts disabled.
1268 * If the pending interrupts don't complete within time, we leave the
1269 * core running. This may happen if the interrupts trigger faster
1270 * than the core can process them or the handler doesn't return.
1272 * If no more breakpoints are available we simply do a step with
1273 * interrupts enabled.
1277 /* 2012-09-29 ph
1279 * If a break point is already set on the lower half word then a break point on
1280 * the upper half word will not break again when the core is restarted. So we
1281 * just step over the instruction with interrupts disabled.
1283 * The documentation has no information about this, it was found by observation
1284 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 doesn't seem to
1285 * suffer from this problem.
1287 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
1288 * address has it always cleared. The former is done to indicate thumb mode
1289 * to gdb.
1292 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
1293 LOG_TARGET_DEBUG(target, "Stepping over next instruction with interrupts disabled");
1294 cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
1295 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1296 /* Re-enable interrupts if appropriate */
1297 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1298 cortex_m_set_maskints_for_halt(target);
1299 } else {
1301 /* Set a temporary break point */
1302 if (breakpoint) {
1303 retval = cortex_m_set_breakpoint(target, breakpoint);
1304 } else {
1305 enum breakpoint_type type = BKPT_HARD;
1306 if (cortex_m->fp_rev == 0 && pc_value > 0x1FFFFFFF) {
1307 /* FPB rev.1 cannot handle such addr, try BKPT instr */
1308 type = BKPT_SOFT;
1310 retval = breakpoint_add(target, pc_value, 2, type);
1313 bool tmp_bp_set = (retval == ERROR_OK);
1315 /* No more breakpoints left, just do a step */
1316 if (!tmp_bp_set) {
1317 cortex_m_set_maskints_for_step(target);
1318 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1319 /* Re-enable interrupts if appropriate */
1320 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1321 cortex_m_set_maskints_for_halt(target);
1322 } else {
1323 /* Start the core */
1324 LOG_TARGET_DEBUG(target, "Starting core to serve pending interrupts");
1325 int64_t t_start = timeval_ms();
1326 cortex_m_set_maskints_for_run(target);
1327 cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
1329 /* Wait for pending handlers to complete or timeout */
1330 do {
1331 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1332 if (retval != ERROR_OK) {
1333 target->state = TARGET_UNKNOWN;
1334 return retval;
1336 isr_timed_out = ((timeval_ms() - t_start) > 500);
1337 } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out));
1339 /* only remove breakpoint if we created it */
1340 if (breakpoint)
1341 cortex_m_unset_breakpoint(target, breakpoint);
1342 else {
1343 /* Remove the temporary breakpoint */
1344 breakpoint_remove(target, pc_value);
1347 if (isr_timed_out) {
1348 LOG_TARGET_DEBUG(target, "Interrupt handlers didn't complete within time, "
1349 "leaving target running");
1350 } else {
1351 /* Step over next instruction with interrupts disabled */
1352 cortex_m_set_maskints_for_step(target);
1353 cortex_m_write_debug_halt_mask(target,
1354 C_HALT | C_MASKINTS,
1356 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1357 /* Re-enable interrupts if appropriate */
1358 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1359 cortex_m_set_maskints_for_halt(target);
1366 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1367 if (retval != ERROR_OK)
1368 return retval;
1370 /* registers are now invalid */
1371 register_cache_invalidate(armv7m->arm.core_cache);
1373 if (breakpoint)
1374 cortex_m_set_breakpoint(target, breakpoint);
1376 if (isr_timed_out) {
1377 /* Leave the core running. The user has to stop execution manually. */
1378 target->debug_reason = DBG_REASON_NOTHALTED;
1379 target->state = TARGET_RUNNING;
1380 return ERROR_OK;
1383 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1384 " nvic_icsr = 0x%" PRIx32,
1385 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1387 retval = cortex_m_debug_entry(target);
1388 if (retval != ERROR_OK)
1389 return retval;
1390 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1392 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1393 " nvic_icsr = 0x%" PRIx32,
1394 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1396 return ERROR_OK;
1399 static int cortex_m_assert_reset(struct target *target)
1401 struct cortex_m_common *cortex_m = target_to_cm(target);
1402 struct armv7m_common *armv7m = &cortex_m->armv7m;
1403 enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config;
1405 LOG_TARGET_DEBUG(target, "target->state: %s",
1406 target_state_name(target));
1408 enum reset_types jtag_reset_config = jtag_get_reset_config();
1410 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1411 /* allow scripts to override the reset event */
1413 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1414 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1415 target->state = TARGET_RESET;
1417 return ERROR_OK;
1420 /* some cores support connecting while srst is asserted
1421 * use that mode is it has been configured */
1423 bool srst_asserted = false;
1425 if (!target_was_examined(target)) {
1426 if (jtag_reset_config & RESET_HAS_SRST) {
1427 adapter_assert_reset();
1428 if (target->reset_halt)
1429 LOG_TARGET_ERROR(target, "Target not examined, will not halt after reset!");
1430 return ERROR_OK;
1431 } else {
1432 LOG_TARGET_ERROR(target, "Target not examined, reset NOT asserted!");
1433 return ERROR_FAIL;
1437 if ((jtag_reset_config & RESET_HAS_SRST) &&
1438 (jtag_reset_config & RESET_SRST_NO_GATING)) {
1439 adapter_assert_reset();
1440 srst_asserted = true;
1443 /* Enable debug requests */
1444 int retval = cortex_m_read_dhcsr_atomic_sticky(target);
1446 /* Store important errors instead of failing and proceed to reset assert */
1448 if (retval != ERROR_OK || !(cortex_m->dcb_dhcsr & C_DEBUGEN))
1449 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
1451 /* If the processor is sleeping in a WFI or WFE instruction, the
1452 * C_HALT bit must be asserted to regain control */
1453 if (retval == ERROR_OK && (cortex_m->dcb_dhcsr & S_SLEEP))
1454 retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1456 mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
1457 /* Ignore less important errors */
1459 if (!target->reset_halt) {
1460 /* Set/Clear C_MASKINTS in a separate operation */
1461 cortex_m_set_maskints_for_run(target);
1463 /* clear any debug flags before resuming */
1464 cortex_m_clear_halt(target);
1466 /* clear C_HALT in dhcsr reg */
1467 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1468 } else {
1469 /* Halt in debug on reset; endreset_event() restores DEMCR.
1471 * REVISIT catching BUSERR presumably helps to defend against
1472 * bad vector table entries. Should this include MMERR or
1473 * other flags too?
1475 int retval2;
1476 retval2 = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DEMCR,
1477 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1478 if (retval != ERROR_OK || retval2 != ERROR_OK)
1479 LOG_TARGET_INFO(target, "AP write error, reset will not halt");
1482 if (jtag_reset_config & RESET_HAS_SRST) {
1483 /* default to asserting srst */
1484 if (!srst_asserted)
1485 adapter_assert_reset();
1487 /* srst is asserted, ignore AP access errors */
1488 retval = ERROR_OK;
1489 } else {
1490 /* Use a standard Cortex-M3 software reset mechanism.
1491 * We default to using VECTRESET as it is supported on all current cores
1492 * (except Cortex-M0, M0+ and M1 which support SYSRESETREQ only!)
1493 * This has the disadvantage of not resetting the peripherals, so a
1494 * reset-init event handler is needed to perform any peripheral resets.
1496 if (!cortex_m->vectreset_supported
1497 && reset_config == CORTEX_M_RESET_VECTRESET) {
1498 reset_config = CORTEX_M_RESET_SYSRESETREQ;
1499 LOG_TARGET_WARNING(target, "VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead.");
1500 LOG_TARGET_WARNING(target, "Set 'cortex_m reset_config sysresetreq'.");
1503 LOG_TARGET_DEBUG(target, "Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ)
1504 ? "SYSRESETREQ" : "VECTRESET");
1506 if (reset_config == CORTEX_M_RESET_VECTRESET) {
1507 LOG_TARGET_WARNING(target, "Only resetting the Cortex-M core, use a reset-init event "
1508 "handler to reset any peripherals or configure hardware srst support.");
1511 int retval3;
1512 retval3 = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1513 AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ)
1514 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1515 if (retval3 != ERROR_OK)
1516 LOG_TARGET_DEBUG(target, "Ignoring AP write error right after reset");
1518 retval3 = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1519 if (retval3 != ERROR_OK) {
1520 LOG_TARGET_ERROR(target, "DP initialisation failed");
1521 /* The error return value must not be propagated in this case.
1522 * SYSRESETREQ or VECTRESET have been possibly triggered
1523 * so reset processing should continue */
1524 } else {
1525 /* I do not know why this is necessary, but it
1526 * fixes strange effects (step/resume cause NMI
1527 * after reset) on LM3S6918 -- Michael Schwingen
1529 uint32_t tmp;
1530 mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, &tmp);
1534 target->state = TARGET_RESET;
1535 jtag_sleep(50000);
1537 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1539 /* now return stored error code if any */
1540 if (retval != ERROR_OK)
1541 return retval;
1543 if (target->reset_halt) {
1544 retval = target_halt(target);
1545 if (retval != ERROR_OK)
1546 return retval;
1549 return ERROR_OK;
1552 static int cortex_m_deassert_reset(struct target *target)
1554 struct armv7m_common *armv7m = &target_to_cm(target)->armv7m;
1556 LOG_TARGET_DEBUG(target, "target->state: %s",
1557 target_state_name(target));
1559 /* deassert reset lines */
1560 adapter_deassert_reset();
1562 enum reset_types jtag_reset_config = jtag_get_reset_config();
1564 if ((jtag_reset_config & RESET_HAS_SRST) &&
1565 !(jtag_reset_config & RESET_SRST_NO_GATING) &&
1566 target_was_examined(target)) {
1568 int retval = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1569 if (retval != ERROR_OK) {
1570 LOG_TARGET_ERROR(target, "DP initialisation failed");
1571 return retval;
1575 return ERROR_OK;
1578 int cortex_m_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1580 int retval;
1581 unsigned int fp_num = 0;
1582 struct cortex_m_common *cortex_m = target_to_cm(target);
1583 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1585 if (breakpoint->is_set) {
1586 LOG_TARGET_WARNING(target, "breakpoint (BPID: %" PRIu32 ") already set", breakpoint->unique_id);
1587 return ERROR_OK;
1590 if (breakpoint->type == BKPT_HARD) {
1591 uint32_t fpcr_value;
1592 while (comparator_list[fp_num].used && (fp_num < cortex_m->fp_num_code))
1593 fp_num++;
1594 if (fp_num >= cortex_m->fp_num_code) {
1595 LOG_TARGET_ERROR(target, "Can not find free FPB Comparator!");
1596 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1598 breakpoint_hw_set(breakpoint, fp_num);
1599 fpcr_value = breakpoint->address | 1;
1600 if (cortex_m->fp_rev == 0) {
1601 if (breakpoint->address > 0x1FFFFFFF) {
1602 LOG_TARGET_ERROR(target, "Cortex-M Flash Patch Breakpoint rev.1 "
1603 "cannot handle HW breakpoint above address 0x1FFFFFFE");
1604 return ERROR_FAIL;
1606 uint32_t hilo;
1607 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1608 fpcr_value = (fpcr_value & 0x1FFFFFFC) | hilo | 1;
1609 } else if (cortex_m->fp_rev > 1) {
1610 LOG_TARGET_ERROR(target, "Unhandled Cortex-M Flash Patch Breakpoint architecture revision");
1611 return ERROR_FAIL;
1613 comparator_list[fp_num].used = true;
1614 comparator_list[fp_num].fpcr_value = fpcr_value;
1615 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1616 comparator_list[fp_num].fpcr_value);
1617 LOG_TARGET_DEBUG(target, "fpc_num %i fpcr_value 0x%" PRIx32 "",
1618 fp_num,
1619 comparator_list[fp_num].fpcr_value);
1620 if (!cortex_m->fpb_enabled) {
1621 LOG_TARGET_DEBUG(target, "FPB wasn't enabled, do it now");
1622 retval = cortex_m_enable_fpb(target);
1623 if (retval != ERROR_OK) {
1624 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
1625 return retval;
1628 cortex_m->fpb_enabled = true;
1630 } else if (breakpoint->type == BKPT_SOFT) {
1631 uint8_t code[4];
1633 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1634 * semihosting; don't use that. Otherwise the BKPT
1635 * parameter is arbitrary.
1637 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1638 retval = target_read_memory(target,
1639 breakpoint->address & 0xFFFFFFFE,
1640 breakpoint->length, 1,
1641 breakpoint->orig_instr);
1642 if (retval != ERROR_OK)
1643 return retval;
1644 retval = target_write_memory(target,
1645 breakpoint->address & 0xFFFFFFFE,
1646 breakpoint->length, 1,
1647 code);
1648 if (retval != ERROR_OK)
1649 return retval;
1650 breakpoint->is_set = true;
1653 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1654 breakpoint->unique_id,
1655 (int)(breakpoint->type),
1656 breakpoint->address,
1657 breakpoint->length,
1658 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1660 return ERROR_OK;
1663 int cortex_m_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1665 int retval;
1666 struct cortex_m_common *cortex_m = target_to_cm(target);
1667 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1669 if (!breakpoint->is_set) {
1670 LOG_TARGET_WARNING(target, "breakpoint not set");
1671 return ERROR_OK;
1674 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1675 breakpoint->unique_id,
1676 (int)(breakpoint->type),
1677 breakpoint->address,
1678 breakpoint->length,
1679 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1681 if (breakpoint->type == BKPT_HARD) {
1682 unsigned int fp_num = breakpoint->number;
1683 if (fp_num >= cortex_m->fp_num_code) {
1684 LOG_TARGET_DEBUG(target, "Invalid FP Comparator number in breakpoint");
1685 return ERROR_OK;
1687 comparator_list[fp_num].used = false;
1688 comparator_list[fp_num].fpcr_value = 0;
1689 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1690 comparator_list[fp_num].fpcr_value);
1691 } else {
1692 /* restore original instruction (kept in target endianness) */
1693 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE,
1694 breakpoint->length, 1,
1695 breakpoint->orig_instr);
1696 if (retval != ERROR_OK)
1697 return retval;
1699 breakpoint->is_set = false;
1701 return ERROR_OK;
1704 int cortex_m_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1706 if (breakpoint->length == 3) {
1707 LOG_TARGET_DEBUG(target, "Using a two byte breakpoint for 32bit Thumb-2 request");
1708 breakpoint->length = 2;
1711 if ((breakpoint->length != 2)) {
1712 LOG_TARGET_INFO(target, "only breakpoints of two bytes length supported");
1713 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1716 return cortex_m_set_breakpoint(target, breakpoint);
1719 int cortex_m_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1721 if (!breakpoint->is_set)
1722 return ERROR_OK;
1724 return cortex_m_unset_breakpoint(target, breakpoint);
1727 static int cortex_m_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1729 unsigned int dwt_num = 0;
1730 struct cortex_m_common *cortex_m = target_to_cm(target);
1732 /* REVISIT Don't fully trust these "not used" records ... users
1733 * may set up breakpoints by hand, e.g. dual-address data value
1734 * watchpoint using comparator #1; comparator #0 matching cycle
1735 * count; send data trace info through ITM and TPIU; etc
1737 struct cortex_m_dwt_comparator *comparator;
1739 for (comparator = cortex_m->dwt_comparator_list;
1740 comparator->used && dwt_num < cortex_m->dwt_num_comp;
1741 comparator++, dwt_num++)
1742 continue;
1743 if (dwt_num >= cortex_m->dwt_num_comp) {
1744 LOG_TARGET_ERROR(target, "Can not find free DWT Comparator");
1745 return ERROR_FAIL;
1747 comparator->used = true;
1748 watchpoint_set(watchpoint, dwt_num);
1750 comparator->comp = watchpoint->address;
1751 target_write_u32(target, comparator->dwt_comparator_address + 0,
1752 comparator->comp);
1754 if ((cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M) {
1755 uint32_t mask = 0, temp;
1757 /* watchpoint params were validated earlier */
1758 temp = watchpoint->length;
1759 while (temp) {
1760 temp >>= 1;
1761 mask++;
1763 mask--;
1765 comparator->mask = mask;
1766 target_write_u32(target, comparator->dwt_comparator_address + 4,
1767 comparator->mask);
1769 switch (watchpoint->rw) {
1770 case WPT_READ:
1771 comparator->function = 5;
1772 break;
1773 case WPT_WRITE:
1774 comparator->function = 6;
1775 break;
1776 case WPT_ACCESS:
1777 comparator->function = 7;
1778 break;
1780 } else {
1781 uint32_t data_size = watchpoint->length >> 1;
1782 comparator->mask = (watchpoint->length >> 1) | 1;
1784 switch (watchpoint->rw) {
1785 case WPT_ACCESS:
1786 comparator->function = 4;
1787 break;
1788 case WPT_WRITE:
1789 comparator->function = 5;
1790 break;
1791 case WPT_READ:
1792 comparator->function = 6;
1793 break;
1795 comparator->function = comparator->function | (1 << 4) |
1796 (data_size << 10);
1799 target_write_u32(target, comparator->dwt_comparator_address + 8,
1800 comparator->function);
1802 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1803 watchpoint->unique_id, dwt_num,
1804 (unsigned) comparator->comp,
1805 (unsigned) comparator->mask,
1806 (unsigned) comparator->function);
1807 return ERROR_OK;
1810 static int cortex_m_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1812 struct cortex_m_common *cortex_m = target_to_cm(target);
1813 struct cortex_m_dwt_comparator *comparator;
1815 if (!watchpoint->is_set) {
1816 LOG_TARGET_WARNING(target, "watchpoint (wpid: %d) not set",
1817 watchpoint->unique_id);
1818 return ERROR_OK;
1821 unsigned int dwt_num = watchpoint->number;
1823 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%u address: 0x%08x clear",
1824 watchpoint->unique_id, dwt_num,
1825 (unsigned) watchpoint->address);
1827 if (dwt_num >= cortex_m->dwt_num_comp) {
1828 LOG_TARGET_DEBUG(target, "Invalid DWT Comparator number in watchpoint");
1829 return ERROR_OK;
1832 comparator = cortex_m->dwt_comparator_list + dwt_num;
1833 comparator->used = false;
1834 comparator->function = 0;
1835 target_write_u32(target, comparator->dwt_comparator_address + 8,
1836 comparator->function);
1838 watchpoint->is_set = false;
1840 return ERROR_OK;
1843 int cortex_m_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1845 struct cortex_m_common *cortex_m = target_to_cm(target);
1847 if (cortex_m->dwt_comp_available < 1) {
1848 LOG_TARGET_DEBUG(target, "no comparators?");
1849 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1852 /* hardware doesn't support data value masking */
1853 if (watchpoint->mask != ~(uint32_t)0) {
1854 LOG_TARGET_DEBUG(target, "watchpoint value masks not supported");
1855 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1858 /* hardware allows address masks of up to 32K */
1859 unsigned mask;
1861 for (mask = 0; mask < 16; mask++) {
1862 if ((1u << mask) == watchpoint->length)
1863 break;
1865 if (mask == 16) {
1866 LOG_TARGET_DEBUG(target, "unsupported watchpoint length");
1867 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1869 if (watchpoint->address & ((1 << mask) - 1)) {
1870 LOG_TARGET_DEBUG(target, "watchpoint address is unaligned");
1871 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1874 /* Caller doesn't seem to be able to describe watching for data
1875 * values of zero; that flags "no value".
1877 * REVISIT This DWT may well be able to watch for specific data
1878 * values. Requires comparator #1 to set DATAVMATCH and match
1879 * the data, and another comparator (DATAVADDR0) matching addr.
1881 if (watchpoint->value) {
1882 LOG_TARGET_DEBUG(target, "data value watchpoint not YET supported");
1883 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1886 cortex_m->dwt_comp_available--;
1887 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
1889 return ERROR_OK;
1892 int cortex_m_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1894 struct cortex_m_common *cortex_m = target_to_cm(target);
1896 /* REVISIT why check? DWT can be updated with core running ... */
1897 if (target->state != TARGET_HALTED) {
1898 LOG_TARGET_WARNING(target, "target not halted");
1899 return ERROR_TARGET_NOT_HALTED;
1902 if (watchpoint->is_set)
1903 cortex_m_unset_watchpoint(target, watchpoint);
1905 cortex_m->dwt_comp_available++;
1906 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
1908 return ERROR_OK;
1911 int cortex_m_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
1913 if (target->debug_reason != DBG_REASON_WATCHPOINT)
1914 return ERROR_FAIL;
1916 struct cortex_m_common *cortex_m = target_to_cm(target);
1918 for (struct watchpoint *wp = target->watchpoints; wp; wp = wp->next) {
1919 if (!wp->is_set)
1920 continue;
1922 unsigned int dwt_num = wp->number;
1923 struct cortex_m_dwt_comparator *comparator = cortex_m->dwt_comparator_list + dwt_num;
1925 uint32_t dwt_function;
1926 int retval = target_read_u32(target, comparator->dwt_comparator_address + 8, &dwt_function);
1927 if (retval != ERROR_OK)
1928 return ERROR_FAIL;
1930 /* check the MATCHED bit */
1931 if (dwt_function & BIT(24)) {
1932 *hit_watchpoint = wp;
1933 return ERROR_OK;
1937 return ERROR_FAIL;
1940 void cortex_m_enable_watchpoints(struct target *target)
1942 struct watchpoint *watchpoint = target->watchpoints;
1944 /* set any pending watchpoints */
1945 while (watchpoint) {
1946 if (!watchpoint->is_set)
1947 cortex_m_set_watchpoint(target, watchpoint);
1948 watchpoint = watchpoint->next;
1952 static int cortex_m_read_memory(struct target *target, target_addr_t address,
1953 uint32_t size, uint32_t count, uint8_t *buffer)
1955 struct armv7m_common *armv7m = target_to_armv7m(target);
1957 if (armv7m->arm.arch == ARM_ARCH_V6M) {
1958 /* armv6m does not handle unaligned memory access */
1959 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1960 return ERROR_TARGET_UNALIGNED_ACCESS;
1963 return mem_ap_read_buf(armv7m->debug_ap, buffer, size, count, address);
1966 static int cortex_m_write_memory(struct target *target, target_addr_t address,
1967 uint32_t size, uint32_t count, const uint8_t *buffer)
1969 struct armv7m_common *armv7m = target_to_armv7m(target);
1971 if (armv7m->arm.arch == ARM_ARCH_V6M) {
1972 /* armv6m does not handle unaligned memory access */
1973 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1974 return ERROR_TARGET_UNALIGNED_ACCESS;
1977 return mem_ap_write_buf(armv7m->debug_ap, buffer, size, count, address);
1980 static int cortex_m_init_target(struct command_context *cmd_ctx,
1981 struct target *target)
1983 armv7m_build_reg_cache(target);
1984 arm_semihosting_init(target);
1985 return ERROR_OK;
1988 void cortex_m_deinit_target(struct target *target)
1990 struct cortex_m_common *cortex_m = target_to_cm(target);
1991 struct armv7m_common *armv7m = target_to_armv7m(target);
1993 if (!armv7m->is_hla_target && armv7m->debug_ap)
1994 dap_put_ap(armv7m->debug_ap);
1996 free(cortex_m->fp_comparator_list);
1998 cortex_m_dwt_free(target);
1999 armv7m_free_reg_cache(target);
2001 free(target->private_config);
2002 free(cortex_m);
2005 int cortex_m_profiling(struct target *target, uint32_t *samples,
2006 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2008 struct timeval timeout, now;
2009 struct armv7m_common *armv7m = target_to_armv7m(target);
2010 uint32_t reg_value;
2011 int retval;
2013 retval = target_read_u32(target, DWT_PCSR, &reg_value);
2014 if (retval != ERROR_OK) {
2015 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2016 return retval;
2018 if (reg_value == 0) {
2019 LOG_TARGET_INFO(target, "PCSR sampling not supported on this processor.");
2020 return target_profiling_default(target, samples, max_num_samples, num_samples, seconds);
2023 gettimeofday(&timeout, NULL);
2024 timeval_add_time(&timeout, seconds, 0);
2026 LOG_TARGET_INFO(target, "Starting Cortex-M profiling. Sampling DWT_PCSR as fast as we can...");
2028 /* Make sure the target is running */
2029 target_poll(target);
2030 if (target->state == TARGET_HALTED)
2031 retval = target_resume(target, 1, 0, 0, 0);
2033 if (retval != ERROR_OK) {
2034 LOG_TARGET_ERROR(target, "Error while resuming target");
2035 return retval;
2038 uint32_t sample_count = 0;
2040 for (;;) {
2041 if (armv7m && armv7m->debug_ap) {
2042 uint32_t read_count = max_num_samples - sample_count;
2043 if (read_count > 1024)
2044 read_count = 1024;
2046 retval = mem_ap_read_buf_noincr(armv7m->debug_ap,
2047 (void *)&samples[sample_count],
2048 4, read_count, DWT_PCSR);
2049 sample_count += read_count;
2050 } else {
2051 target_read_u32(target, DWT_PCSR, &samples[sample_count++]);
2054 if (retval != ERROR_OK) {
2055 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2056 return retval;
2060 gettimeofday(&now, NULL);
2061 if (sample_count >= max_num_samples || timeval_compare(&now, &timeout) > 0) {
2062 LOG_TARGET_INFO(target, "Profiling completed. %" PRIu32 " samples.", sample_count);
2063 break;
2067 *num_samples = sample_count;
2068 return retval;
2072 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
2073 * on r/w if the core is not running, and clear on resume or reset ... or
2074 * at least, in a post_restore_context() method.
2077 struct dwt_reg_state {
2078 struct target *target;
2079 uint32_t addr;
2080 uint8_t value[4]; /* scratch/cache */
2083 static int cortex_m_dwt_get_reg(struct reg *reg)
2085 struct dwt_reg_state *state = reg->arch_info;
2087 uint32_t tmp;
2088 int retval = target_read_u32(state->target, state->addr, &tmp);
2089 if (retval != ERROR_OK)
2090 return retval;
2092 buf_set_u32(state->value, 0, 32, tmp);
2093 return ERROR_OK;
2096 static int cortex_m_dwt_set_reg(struct reg *reg, uint8_t *buf)
2098 struct dwt_reg_state *state = reg->arch_info;
2100 return target_write_u32(state->target, state->addr,
2101 buf_get_u32(buf, 0, reg->size));
2104 struct dwt_reg {
2105 uint32_t addr;
2106 const char *name;
2107 unsigned size;
2110 static const struct dwt_reg dwt_base_regs[] = {
2111 { DWT_CTRL, "dwt_ctrl", 32, },
2112 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
2113 * increments while the core is asleep.
2115 { DWT_CYCCNT, "dwt_cyccnt", 32, },
2116 /* plus some 8 bit counters, useful for profiling with TPIU */
2119 static const struct dwt_reg dwt_comp[] = {
2120 #define DWT_COMPARATOR(i) \
2121 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
2122 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
2123 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
2124 DWT_COMPARATOR(0),
2125 DWT_COMPARATOR(1),
2126 DWT_COMPARATOR(2),
2127 DWT_COMPARATOR(3),
2128 DWT_COMPARATOR(4),
2129 DWT_COMPARATOR(5),
2130 DWT_COMPARATOR(6),
2131 DWT_COMPARATOR(7),
2132 DWT_COMPARATOR(8),
2133 DWT_COMPARATOR(9),
2134 DWT_COMPARATOR(10),
2135 DWT_COMPARATOR(11),
2136 DWT_COMPARATOR(12),
2137 DWT_COMPARATOR(13),
2138 DWT_COMPARATOR(14),
2139 DWT_COMPARATOR(15),
2140 #undef DWT_COMPARATOR
2143 static const struct reg_arch_type dwt_reg_type = {
2144 .get = cortex_m_dwt_get_reg,
2145 .set = cortex_m_dwt_set_reg,
2148 static void cortex_m_dwt_addreg(struct target *t, struct reg *r, const struct dwt_reg *d)
2150 struct dwt_reg_state *state;
2152 state = calloc(1, sizeof(*state));
2153 if (!state)
2154 return;
2155 state->addr = d->addr;
2156 state->target = t;
2158 r->name = d->name;
2159 r->size = d->size;
2160 r->value = state->value;
2161 r->arch_info = state;
2162 r->type = &dwt_reg_type;
2165 static void cortex_m_dwt_setup(struct cortex_m_common *cm, struct target *target)
2167 uint32_t dwtcr;
2168 struct reg_cache *cache;
2169 struct cortex_m_dwt_comparator *comparator;
2170 int reg;
2172 target_read_u32(target, DWT_CTRL, &dwtcr);
2173 LOG_TARGET_DEBUG(target, "DWT_CTRL: 0x%" PRIx32, dwtcr);
2174 if (!dwtcr) {
2175 LOG_TARGET_DEBUG(target, "no DWT");
2176 return;
2179 target_read_u32(target, DWT_DEVARCH, &cm->dwt_devarch);
2180 LOG_TARGET_DEBUG(target, "DWT_DEVARCH: 0x%" PRIx32, cm->dwt_devarch);
2182 cm->dwt_num_comp = (dwtcr >> 28) & 0xF;
2183 cm->dwt_comp_available = cm->dwt_num_comp;
2184 cm->dwt_comparator_list = calloc(cm->dwt_num_comp,
2185 sizeof(struct cortex_m_dwt_comparator));
2186 if (!cm->dwt_comparator_list) {
2187 fail0:
2188 cm->dwt_num_comp = 0;
2189 LOG_TARGET_ERROR(target, "out of mem");
2190 return;
2193 cache = calloc(1, sizeof(*cache));
2194 if (!cache) {
2195 fail1:
2196 free(cm->dwt_comparator_list);
2197 goto fail0;
2199 cache->name = "Cortex-M DWT registers";
2200 cache->num_regs = 2 + cm->dwt_num_comp * 3;
2201 cache->reg_list = calloc(cache->num_regs, sizeof(*cache->reg_list));
2202 if (!cache->reg_list) {
2203 free(cache);
2204 goto fail1;
2207 for (reg = 0; reg < 2; reg++)
2208 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2209 dwt_base_regs + reg);
2211 comparator = cm->dwt_comparator_list;
2212 for (unsigned int i = 0; i < cm->dwt_num_comp; i++, comparator++) {
2213 int j;
2215 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
2216 for (j = 0; j < 3; j++, reg++)
2217 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2218 dwt_comp + 3 * i + j);
2220 /* make sure we clear any watchpoints enabled on the target */
2221 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
2224 *register_get_last_cache_p(&target->reg_cache) = cache;
2225 cm->dwt_cache = cache;
2227 LOG_TARGET_DEBUG(target, "DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
2228 dwtcr, cm->dwt_num_comp,
2229 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
2231 /* REVISIT: if num_comp > 1, check whether comparator #1 can
2232 * implement single-address data value watchpoints ... so we
2233 * won't need to check it later, when asked to set one up.
2237 static void cortex_m_dwt_free(struct target *target)
2239 struct cortex_m_common *cm = target_to_cm(target);
2240 struct reg_cache *cache = cm->dwt_cache;
2242 free(cm->dwt_comparator_list);
2243 cm->dwt_comparator_list = NULL;
2244 cm->dwt_num_comp = 0;
2246 if (cache) {
2247 register_unlink_cache(&target->reg_cache, cache);
2249 if (cache->reg_list) {
2250 for (size_t i = 0; i < cache->num_regs; i++)
2251 free(cache->reg_list[i].arch_info);
2252 free(cache->reg_list);
2254 free(cache);
2256 cm->dwt_cache = NULL;
2259 #define MVFR0 0xe000ef40
2260 #define MVFR1 0xe000ef44
2262 #define MVFR0_DEFAULT_M4 0x10110021
2263 #define MVFR1_DEFAULT_M4 0x11000011
2265 #define MVFR0_DEFAULT_M7_SP 0x10110021
2266 #define MVFR0_DEFAULT_M7_DP 0x10110221
2267 #define MVFR1_DEFAULT_M7_SP 0x11000011
2268 #define MVFR1_DEFAULT_M7_DP 0x12000011
2270 static int cortex_m_find_mem_ap(struct adiv5_dap *swjdp,
2271 struct adiv5_ap **debug_ap)
2273 if (dap_find_get_ap(swjdp, AP_TYPE_AHB3_AP, debug_ap) == ERROR_OK)
2274 return ERROR_OK;
2276 return dap_find_get_ap(swjdp, AP_TYPE_AHB5_AP, debug_ap);
2279 int cortex_m_examine(struct target *target)
2281 int retval;
2282 uint32_t cpuid, fpcr, mvfr0, mvfr1;
2283 struct cortex_m_common *cortex_m = target_to_cm(target);
2284 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
2285 struct armv7m_common *armv7m = target_to_armv7m(target);
2287 /* hla_target shares the examine handler but does not support
2288 * all its calls */
2289 if (!armv7m->is_hla_target) {
2290 if (armv7m->debug_ap) {
2291 dap_put_ap(armv7m->debug_ap);
2292 armv7m->debug_ap = NULL;
2295 if (cortex_m->apsel == DP_APSEL_INVALID) {
2296 /* Search for the MEM-AP */
2297 retval = cortex_m_find_mem_ap(swjdp, &armv7m->debug_ap);
2298 if (retval != ERROR_OK) {
2299 LOG_TARGET_ERROR(target, "Could not find MEM-AP to control the core");
2300 return retval;
2302 } else {
2303 armv7m->debug_ap = dap_get_ap(swjdp, cortex_m->apsel);
2304 if (!armv7m->debug_ap) {
2305 LOG_ERROR("Cannot get AP");
2306 return ERROR_FAIL;
2310 armv7m->debug_ap->memaccess_tck = 8;
2312 retval = mem_ap_init(armv7m->debug_ap);
2313 if (retval != ERROR_OK)
2314 return retval;
2317 if (!target_was_examined(target)) {
2318 target_set_examined(target);
2320 /* Read from Device Identification Registers */
2321 retval = target_read_u32(target, CPUID, &cpuid);
2322 if (retval != ERROR_OK)
2323 return retval;
2325 /* Get ARCH and CPU types */
2326 const enum cortex_m_partno core_partno = (cpuid & ARM_CPUID_PARTNO_MASK) >> ARM_CPUID_PARTNO_POS;
2328 for (unsigned int n = 0; n < ARRAY_SIZE(cortex_m_parts); n++) {
2329 if (core_partno == cortex_m_parts[n].partno) {
2330 cortex_m->core_info = &cortex_m_parts[n];
2331 break;
2335 if (!cortex_m->core_info) {
2336 LOG_TARGET_ERROR(target, "Cortex-M PARTNO 0x%x is unrecognized", core_partno);
2337 return ERROR_FAIL;
2340 armv7m->arm.arch = cortex_m->core_info->arch;
2342 LOG_TARGET_INFO(target, "%s r%" PRId8 "p%" PRId8 " processor detected",
2343 cortex_m->core_info->name,
2344 (uint8_t)((cpuid >> 20) & 0xf),
2345 (uint8_t)((cpuid >> 0) & 0xf));
2347 cortex_m->maskints_erratum = false;
2348 if (core_partno == CORTEX_M7_PARTNO) {
2349 uint8_t rev, patch;
2350 rev = (cpuid >> 20) & 0xf;
2351 patch = (cpuid >> 0) & 0xf;
2352 if ((rev == 0) && (patch < 2)) {
2353 LOG_TARGET_WARNING(target, "Silicon bug: single stepping may enter pending exception handler!");
2354 cortex_m->maskints_erratum = true;
2357 LOG_TARGET_DEBUG(target, "cpuid: 0x%8.8" PRIx32 "", cpuid);
2359 if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV4) {
2360 target_read_u32(target, MVFR0, &mvfr0);
2361 target_read_u32(target, MVFR1, &mvfr1);
2363 /* test for floating point feature on Cortex-M4 */
2364 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
2365 LOG_TARGET_DEBUG(target, "%s floating point feature FPv4_SP found", cortex_m->core_info->name);
2366 armv7m->fp_feature = FPV4_SP;
2368 } else if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV5) {
2369 target_read_u32(target, MVFR0, &mvfr0);
2370 target_read_u32(target, MVFR1, &mvfr1);
2372 /* test for floating point features on Cortex-M7 */
2373 if ((mvfr0 == MVFR0_DEFAULT_M7_SP) && (mvfr1 == MVFR1_DEFAULT_M7_SP)) {
2374 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_SP found", cortex_m->core_info->name);
2375 armv7m->fp_feature = FPV5_SP;
2376 } else if ((mvfr0 == MVFR0_DEFAULT_M7_DP) && (mvfr1 == MVFR1_DEFAULT_M7_DP)) {
2377 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_DP found", cortex_m->core_info->name);
2378 armv7m->fp_feature = FPV5_DP;
2382 /* VECTRESET is supported only on ARMv7-M cores */
2383 cortex_m->vectreset_supported = armv7m->arm.arch == ARM_ARCH_V7M;
2385 /* Check for FPU, otherwise mark FPU register as non-existent */
2386 if (armv7m->fp_feature == FP_NONE)
2387 for (size_t idx = ARMV7M_FPU_FIRST_REG; idx <= ARMV7M_FPU_LAST_REG; idx++)
2388 armv7m->arm.core_cache->reg_list[idx].exist = false;
2390 if (armv7m->arm.arch != ARM_ARCH_V8M)
2391 for (size_t idx = ARMV8M_FIRST_REG; idx <= ARMV8M_LAST_REG; idx++)
2392 armv7m->arm.core_cache->reg_list[idx].exist = false;
2394 if (!armv7m->is_hla_target) {
2395 if (cortex_m->core_info->flags & CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K)
2396 /* Cortex-M3/M4 have 4096 bytes autoincrement range,
2397 * s. ARM IHI 0031C: MEM-AP 7.2.2 */
2398 armv7m->debug_ap->tar_autoincr_block = (1 << 12);
2401 retval = target_read_u32(target, DCB_DHCSR, &cortex_m->dcb_dhcsr);
2402 if (retval != ERROR_OK)
2403 return retval;
2405 /* Don't cumulate sticky S_RESET_ST at the very first read of DHCSR
2406 * as S_RESET_ST may indicate a reset that happened long time ago
2407 * (most probably the power-on reset before OpenOCD was started).
2408 * As we are just initializing the debug system we do not need
2409 * to call cortex_m_endreset_event() in the following poll.
2411 if (!cortex_m->dcb_dhcsr_sticky_is_recent) {
2412 cortex_m->dcb_dhcsr_sticky_is_recent = true;
2413 if (cortex_m->dcb_dhcsr & S_RESET_ST) {
2414 LOG_TARGET_DEBUG(target, "reset happened some time ago, ignore");
2415 cortex_m->dcb_dhcsr &= ~S_RESET_ST;
2418 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
2420 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
2421 /* Enable debug requests */
2422 uint32_t dhcsr = (cortex_m->dcb_dhcsr | C_DEBUGEN) & ~(C_HALT | C_STEP | C_MASKINTS);
2424 retval = target_write_u32(target, DCB_DHCSR, DBGKEY | (dhcsr & 0x0000FFFFUL));
2425 if (retval != ERROR_OK)
2426 return retval;
2427 cortex_m->dcb_dhcsr = dhcsr;
2430 /* Configure trace modules */
2431 retval = target_write_u32(target, DCB_DEMCR, TRCENA | armv7m->demcr);
2432 if (retval != ERROR_OK)
2433 return retval;
2435 if (armv7m->trace_config.itm_deferred_config)
2436 armv7m_trace_itm_config(target);
2438 /* NOTE: FPB and DWT are both optional. */
2440 /* Setup FPB */
2441 target_read_u32(target, FP_CTRL, &fpcr);
2442 /* bits [14:12] and [7:4] */
2443 cortex_m->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF);
2444 cortex_m->fp_num_lit = (fpcr >> 8) & 0xF;
2445 /* Detect flash patch revision, see RM DDI 0403E.b page C1-817.
2446 Revision is zero base, fp_rev == 1 means Rev.2 ! */
2447 cortex_m->fp_rev = (fpcr >> 28) & 0xf;
2448 free(cortex_m->fp_comparator_list);
2449 cortex_m->fp_comparator_list = calloc(
2450 cortex_m->fp_num_code + cortex_m->fp_num_lit,
2451 sizeof(struct cortex_m_fp_comparator));
2452 cortex_m->fpb_enabled = fpcr & 1;
2453 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
2454 cortex_m->fp_comparator_list[i].type =
2455 (i < cortex_m->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
2456 cortex_m->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
2458 /* make sure we clear any breakpoints enabled on the target */
2459 target_write_u32(target, cortex_m->fp_comparator_list[i].fpcr_address, 0);
2461 LOG_TARGET_DEBUG(target, "FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
2462 fpcr,
2463 cortex_m->fp_num_code,
2464 cortex_m->fp_num_lit);
2466 /* Setup DWT */
2467 cortex_m_dwt_free(target);
2468 cortex_m_dwt_setup(cortex_m, target);
2470 /* These hardware breakpoints only work for code in flash! */
2471 LOG_TARGET_INFO(target, "target has %d breakpoints, %d watchpoints",
2472 cortex_m->fp_num_code,
2473 cortex_m->dwt_num_comp);
2476 return ERROR_OK;
2479 static int cortex_m_dcc_read(struct target *target, uint8_t *value, uint8_t *ctrl)
2481 struct armv7m_common *armv7m = target_to_armv7m(target);
2482 uint16_t dcrdr;
2483 uint8_t buf[2];
2484 int retval;
2486 retval = mem_ap_read_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2487 if (retval != ERROR_OK)
2488 return retval;
2490 dcrdr = target_buffer_get_u16(target, buf);
2491 *ctrl = (uint8_t)dcrdr;
2492 *value = (uint8_t)(dcrdr >> 8);
2494 LOG_TARGET_DEBUG(target, "data 0x%x ctrl 0x%x", *value, *ctrl);
2496 /* write ack back to software dcc register
2497 * signify we have read data */
2498 if (dcrdr & (1 << 0)) {
2499 target_buffer_set_u16(target, buf, 0);
2500 retval = mem_ap_write_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2501 if (retval != ERROR_OK)
2502 return retval;
2505 return ERROR_OK;
2508 static int cortex_m_target_request_data(struct target *target,
2509 uint32_t size, uint8_t *buffer)
2511 uint8_t data;
2512 uint8_t ctrl;
2513 uint32_t i;
2515 for (i = 0; i < (size * 4); i++) {
2516 int retval = cortex_m_dcc_read(target, &data, &ctrl);
2517 if (retval != ERROR_OK)
2518 return retval;
2519 buffer[i] = data;
2522 return ERROR_OK;
2525 static int cortex_m_handle_target_request(void *priv)
2527 struct target *target = priv;
2528 if (!target_was_examined(target))
2529 return ERROR_OK;
2531 if (!target->dbg_msg_enabled)
2532 return ERROR_OK;
2534 if (target->state == TARGET_RUNNING) {
2535 uint8_t data;
2536 uint8_t ctrl;
2537 int retval;
2539 retval = cortex_m_dcc_read(target, &data, &ctrl);
2540 if (retval != ERROR_OK)
2541 return retval;
2543 /* check if we have data */
2544 if (ctrl & (1 << 0)) {
2545 uint32_t request;
2547 /* we assume target is quick enough */
2548 request = data;
2549 for (int i = 1; i <= 3; i++) {
2550 retval = cortex_m_dcc_read(target, &data, &ctrl);
2551 if (retval != ERROR_OK)
2552 return retval;
2553 request |= ((uint32_t)data << (i * 8));
2555 target_request(target, request);
2559 return ERROR_OK;
2562 static int cortex_m_init_arch_info(struct target *target,
2563 struct cortex_m_common *cortex_m, struct adiv5_dap *dap)
2565 struct armv7m_common *armv7m = &cortex_m->armv7m;
2567 armv7m_init_arch_info(target, armv7m);
2569 /* default reset mode is to use srst if fitted
2570 * if not it will use CORTEX_M3_RESET_VECTRESET */
2571 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2573 armv7m->arm.dap = dap;
2575 /* register arch-specific functions */
2576 armv7m->examine_debug_reason = cortex_m_examine_debug_reason;
2578 armv7m->post_debug_entry = NULL;
2580 armv7m->pre_restore_context = NULL;
2582 armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32;
2583 armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32;
2585 target_register_timer_callback(cortex_m_handle_target_request, 1,
2586 TARGET_TIMER_TYPE_PERIODIC, target);
2588 return ERROR_OK;
2591 static int cortex_m_target_create(struct target *target, Jim_Interp *interp)
2593 struct adiv5_private_config *pc;
2595 pc = (struct adiv5_private_config *)target->private_config;
2596 if (adiv5_verify_config(pc) != ERROR_OK)
2597 return ERROR_FAIL;
2599 struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common));
2600 if (!cortex_m) {
2601 LOG_TARGET_ERROR(target, "No memory creating target");
2602 return ERROR_FAIL;
2605 cortex_m->common_magic = CORTEX_M_COMMON_MAGIC;
2606 cortex_m->apsel = pc->ap_num;
2608 cortex_m_init_arch_info(target, cortex_m, pc->dap);
2610 return ERROR_OK;
2613 /*--------------------------------------------------------------------------*/
2615 static int cortex_m_verify_pointer(struct command_invocation *cmd,
2616 struct cortex_m_common *cm)
2618 if (!is_cortex_m_with_dap_access(cm)) {
2619 command_print(cmd, "target is not a Cortex-M");
2620 return ERROR_TARGET_INVALID;
2622 return ERROR_OK;
2626 * Only stuff below this line should need to verify that its target
2627 * is a Cortex-M3. Everything else should have indirected through the
2628 * cortexm3_target structure, which is only used with CM3 targets.
2631 COMMAND_HANDLER(handle_cortex_m_vector_catch_command)
2633 struct target *target = get_current_target(CMD_CTX);
2634 struct cortex_m_common *cortex_m = target_to_cm(target);
2635 struct armv7m_common *armv7m = &cortex_m->armv7m;
2636 uint32_t demcr = 0;
2637 int retval;
2639 static const struct {
2640 char name[10];
2641 unsigned mask;
2642 } vec_ids[] = {
2643 { "hard_err", VC_HARDERR, },
2644 { "int_err", VC_INTERR, },
2645 { "bus_err", VC_BUSERR, },
2646 { "state_err", VC_STATERR, },
2647 { "chk_err", VC_CHKERR, },
2648 { "nocp_err", VC_NOCPERR, },
2649 { "mm_err", VC_MMERR, },
2650 { "reset", VC_CORERESET, },
2653 retval = cortex_m_verify_pointer(CMD, cortex_m);
2654 if (retval != ERROR_OK)
2655 return retval;
2657 if (!target_was_examined(target)) {
2658 LOG_TARGET_ERROR(target, "Target not examined yet");
2659 return ERROR_FAIL;
2662 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2663 if (retval != ERROR_OK)
2664 return retval;
2666 if (CMD_ARGC > 0) {
2667 unsigned catch = 0;
2669 if (CMD_ARGC == 1) {
2670 if (strcmp(CMD_ARGV[0], "all") == 0) {
2671 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2672 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2673 | VC_MMERR | VC_CORERESET;
2674 goto write;
2675 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2676 goto write;
2678 while (CMD_ARGC-- > 0) {
2679 unsigned i;
2680 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2681 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2682 continue;
2683 catch |= vec_ids[i].mask;
2684 break;
2686 if (i == ARRAY_SIZE(vec_ids)) {
2687 LOG_TARGET_ERROR(target, "No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2688 return ERROR_COMMAND_SYNTAX_ERROR;
2691 write:
2692 /* For now, armv7m->demcr only stores vector catch flags. */
2693 armv7m->demcr = catch;
2695 demcr &= ~0xffff;
2696 demcr |= catch;
2698 /* write, but don't assume it stuck (why not??) */
2699 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, demcr);
2700 if (retval != ERROR_OK)
2701 return retval;
2702 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2703 if (retval != ERROR_OK)
2704 return retval;
2706 /* FIXME be sure to clear DEMCR on clean server shutdown.
2707 * Otherwise the vector catch hardware could fire when there's
2708 * no debugger hooked up, causing much confusion...
2712 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2713 command_print(CMD, "%9s: %s", vec_ids[i].name,
2714 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2717 return ERROR_OK;
2720 COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command)
2722 struct target *target = get_current_target(CMD_CTX);
2723 struct cortex_m_common *cortex_m = target_to_cm(target);
2724 int retval;
2726 static const struct jim_nvp nvp_maskisr_modes[] = {
2727 { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO },
2728 { .name = "off", .value = CORTEX_M_ISRMASK_OFF },
2729 { .name = "on", .value = CORTEX_M_ISRMASK_ON },
2730 { .name = "steponly", .value = CORTEX_M_ISRMASK_STEPONLY },
2731 { .name = NULL, .value = -1 },
2733 const struct jim_nvp *n;
2736 retval = cortex_m_verify_pointer(CMD, cortex_m);
2737 if (retval != ERROR_OK)
2738 return retval;
2740 if (target->state != TARGET_HALTED) {
2741 command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
2742 return ERROR_OK;
2745 if (CMD_ARGC > 0) {
2746 n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2747 if (!n->name)
2748 return ERROR_COMMAND_SYNTAX_ERROR;
2749 cortex_m->isrmasking_mode = n->value;
2750 cortex_m_set_maskints_for_halt(target);
2753 n = jim_nvp_value2name_simple(nvp_maskisr_modes, cortex_m->isrmasking_mode);
2754 command_print(CMD, "cortex_m interrupt mask %s", n->name);
2756 return ERROR_OK;
2759 COMMAND_HANDLER(handle_cortex_m_reset_config_command)
2761 struct target *target = get_current_target(CMD_CTX);
2762 struct cortex_m_common *cortex_m = target_to_cm(target);
2763 int retval;
2764 char *reset_config;
2766 retval = cortex_m_verify_pointer(CMD, cortex_m);
2767 if (retval != ERROR_OK)
2768 return retval;
2770 if (CMD_ARGC > 0) {
2771 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2772 cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ;
2774 else if (strcmp(*CMD_ARGV, "vectreset") == 0) {
2775 if (target_was_examined(target)
2776 && !cortex_m->vectreset_supported)
2777 LOG_TARGET_WARNING(target, "VECTRESET is not supported on your Cortex-M core!");
2778 else
2779 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2781 } else
2782 return ERROR_COMMAND_SYNTAX_ERROR;
2785 switch (cortex_m->soft_reset_config) {
2786 case CORTEX_M_RESET_SYSRESETREQ:
2787 reset_config = "sysresetreq";
2788 break;
2790 case CORTEX_M_RESET_VECTRESET:
2791 reset_config = "vectreset";
2792 break;
2794 default:
2795 reset_config = "unknown";
2796 break;
2799 command_print(CMD, "cortex_m reset_config %s", reset_config);
2801 return ERROR_OK;
2804 static const struct command_registration cortex_m_exec_command_handlers[] = {
2806 .name = "maskisr",
2807 .handler = handle_cortex_m_mask_interrupts_command,
2808 .mode = COMMAND_EXEC,
2809 .help = "mask cortex_m interrupts",
2810 .usage = "['auto'|'on'|'off'|'steponly']",
2813 .name = "vector_catch",
2814 .handler = handle_cortex_m_vector_catch_command,
2815 .mode = COMMAND_EXEC,
2816 .help = "configure hardware vectors to trigger debug entry",
2817 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2820 .name = "reset_config",
2821 .handler = handle_cortex_m_reset_config_command,
2822 .mode = COMMAND_ANY,
2823 .help = "configure software reset handling",
2824 .usage = "['sysresetreq'|'vectreset']",
2826 COMMAND_REGISTRATION_DONE
2828 static const struct command_registration cortex_m_command_handlers[] = {
2830 .chain = armv7m_command_handlers,
2833 .chain = armv7m_trace_command_handlers,
2835 /* START_DEPRECATED_TPIU */
2837 .chain = arm_tpiu_deprecated_command_handlers,
2839 /* END_DEPRECATED_TPIU */
2841 .name = "cortex_m",
2842 .mode = COMMAND_EXEC,
2843 .help = "Cortex-M command group",
2844 .usage = "",
2845 .chain = cortex_m_exec_command_handlers,
2848 .chain = rtt_target_command_handlers,
2850 COMMAND_REGISTRATION_DONE
2853 struct target_type cortexm_target = {
2854 .name = "cortex_m",
2856 .poll = cortex_m_poll,
2857 .arch_state = armv7m_arch_state,
2859 .target_request_data = cortex_m_target_request_data,
2861 .halt = cortex_m_halt,
2862 .resume = cortex_m_resume,
2863 .step = cortex_m_step,
2865 .assert_reset = cortex_m_assert_reset,
2866 .deassert_reset = cortex_m_deassert_reset,
2867 .soft_reset_halt = cortex_m_soft_reset_halt,
2869 .get_gdb_arch = arm_get_gdb_arch,
2870 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
2872 .read_memory = cortex_m_read_memory,
2873 .write_memory = cortex_m_write_memory,
2874 .checksum_memory = armv7m_checksum_memory,
2875 .blank_check_memory = armv7m_blank_check_memory,
2877 .run_algorithm = armv7m_run_algorithm,
2878 .start_algorithm = armv7m_start_algorithm,
2879 .wait_algorithm = armv7m_wait_algorithm,
2881 .add_breakpoint = cortex_m_add_breakpoint,
2882 .remove_breakpoint = cortex_m_remove_breakpoint,
2883 .add_watchpoint = cortex_m_add_watchpoint,
2884 .remove_watchpoint = cortex_m_remove_watchpoint,
2885 .hit_watchpoint = cortex_m_hit_watchpoint,
2887 .commands = cortex_m_command_handlers,
2888 .target_create = cortex_m_target_create,
2889 .target_jim_configure = adiv5_jim_configure,
2890 .init_target = cortex_m_init_target,
2891 .examine = cortex_m_examine,
2892 .deinit_target = cortex_m_deinit_target,
2894 .profiling = cortex_m_profiling,