armv7m: use generic arm read/write_core_reg
[openocd.git] / src / target / cortex_m.c
blob3a823fe6f2e72f2e334b3e26d0d14bc13bbf70a2
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 * *
26 * *
27 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
28 * *
29 ***************************************************************************/
30 #ifdef HAVE_CONFIG_H
31 #include "config.h"
32 #endif
34 #include "jtag/interface.h"
35 #include "breakpoints.h"
36 #include "cortex_m.h"
37 #include "target_request.h"
38 #include "target_type.h"
39 #include "arm_disassembler.h"
40 #include "register.h"
41 #include "arm_opcodes.h"
42 #include "arm_semihosting.h"
43 #include <helper/time_support.h>
45 /* NOTE: most of this should work fine for the Cortex-M1 and
46 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
47 * Some differences: M0/M1 doesn't have FBP remapping or the
48 * DWT tracing/profiling support. (So the cycle counter will
49 * not be usable; the other stuff isn't currently used here.)
51 * Although there are some workarounds for errata seen only in r0p0
52 * silicon, such old parts are hard to find and thus not much tested
53 * any longer.
56 /**
57 * Returns the type of a break point required by address location
59 #define BKPT_TYPE_BY_ADDR(addr) ((addr) < 0x20000000 ? BKPT_HARD : BKPT_SOFT)
62 /* forward declarations */
63 static int cortex_m3_store_core_reg_u32(struct target *target,
64 uint32_t num, uint32_t value);
66 static int cortexm3_dap_read_coreregister_u32(struct adiv5_dap *swjdp,
67 uint32_t *value, int regnum)
69 int retval;
70 uint32_t dcrdr;
72 /* because the DCB_DCRDR is used for the emulated dcc channel
73 * we have to save/restore the DCB_DCRDR when used */
75 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
76 if (retval != ERROR_OK)
77 return retval;
79 /* mem_ap_write_u32(swjdp, DCB_DCRSR, regnum); */
80 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
81 if (retval != ERROR_OK)
82 return retval;
83 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum);
84 if (retval != ERROR_OK)
85 return retval;
87 /* mem_ap_read_u32(swjdp, DCB_DCRDR, value); */
88 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
89 if (retval != ERROR_OK)
90 return retval;
91 retval = dap_queue_ap_read(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
92 if (retval != ERROR_OK)
93 return retval;
95 retval = dap_run(swjdp);
96 if (retval != ERROR_OK)
97 return retval;
99 /* restore DCB_DCRDR - this needs to be in a seperate
100 * transaction otherwise the emulated DCC channel breaks */
101 if (retval == ERROR_OK)
102 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
104 return retval;
107 static int cortexm3_dap_write_coreregister_u32(struct adiv5_dap *swjdp,
108 uint32_t value, int regnum)
110 int retval;
111 uint32_t dcrdr;
113 /* because the DCB_DCRDR is used for the emulated dcc channel
114 * we have to save/restore the DCB_DCRDR when used */
116 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
117 if (retval != ERROR_OK)
118 return retval;
120 /* mem_ap_write_u32(swjdp, DCB_DCRDR, core_regs[i]); */
121 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
122 if (retval != ERROR_OK)
123 return retval;
124 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
125 if (retval != ERROR_OK)
126 return retval;
128 /* mem_ap_write_u32(swjdp, DCB_DCRSR, i | DCRSR_WnR); */
129 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
130 if (retval != ERROR_OK)
131 return retval;
132 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum | DCRSR_WnR);
133 if (retval != ERROR_OK)
134 return retval;
136 retval = dap_run(swjdp);
137 if (retval != ERROR_OK)
138 return retval;
140 /* restore DCB_DCRDR - this needs to be in a seperate
141 * transaction otherwise the emulated DCC channel breaks */
142 if (retval == ERROR_OK)
143 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
145 return retval;
148 static int cortex_m3_write_debug_halt_mask(struct target *target,
149 uint32_t mask_on, uint32_t mask_off)
151 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
152 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
154 /* mask off status bits */
155 cortex_m3->dcb_dhcsr &= ~((0xFFFF << 16) | mask_off);
156 /* create new register mask */
157 cortex_m3->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
159 return mem_ap_write_atomic_u32(swjdp, DCB_DHCSR, cortex_m3->dcb_dhcsr);
162 static int cortex_m3_clear_halt(struct target *target)
164 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
165 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
166 int retval;
168 /* clear step if any */
169 cortex_m3_write_debug_halt_mask(target, C_HALT, C_STEP);
171 /* Read Debug Fault Status Register */
172 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR, &cortex_m3->nvic_dfsr);
173 if (retval != ERROR_OK)
174 return retval;
176 /* Clear Debug Fault Status */
177 retval = mem_ap_write_atomic_u32(swjdp, NVIC_DFSR, cortex_m3->nvic_dfsr);
178 if (retval != ERROR_OK)
179 return retval;
180 LOG_DEBUG(" NVIC_DFSR 0x%" PRIx32 "", cortex_m3->nvic_dfsr);
182 return ERROR_OK;
185 static int cortex_m3_single_step_core(struct target *target)
187 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
188 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
189 uint32_t dhcsr_save;
190 int retval;
192 /* backup dhcsr reg */
193 dhcsr_save = cortex_m3->dcb_dhcsr;
195 /* Mask interrupts before clearing halt, if done already. This avoids
196 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
197 * HALT can put the core into an unknown state.
199 if (!(cortex_m3->dcb_dhcsr & C_MASKINTS)) {
200 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
201 DBGKEY | C_MASKINTS | C_HALT | C_DEBUGEN);
202 if (retval != ERROR_OK)
203 return retval;
205 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
206 DBGKEY | C_MASKINTS | C_STEP | C_DEBUGEN);
207 if (retval != ERROR_OK)
208 return retval;
209 LOG_DEBUG(" ");
211 /* restore dhcsr reg */
212 cortex_m3->dcb_dhcsr = dhcsr_save;
213 cortex_m3_clear_halt(target);
215 return ERROR_OK;
218 static int cortex_m3_endreset_event(struct target *target)
220 int i;
221 int retval;
222 uint32_t dcb_demcr;
223 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
224 struct armv7m_common *armv7m = &cortex_m3->armv7m;
225 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
226 struct cortex_m3_fp_comparator *fp_list = cortex_m3->fp_comparator_list;
227 struct cortex_m3_dwt_comparator *dwt_list = cortex_m3->dwt_comparator_list;
229 /* REVISIT The four debug monitor bits are currently ignored... */
230 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &dcb_demcr);
231 if (retval != ERROR_OK)
232 return retval;
233 LOG_DEBUG("DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
235 /* this register is used for emulated dcc channel */
236 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
237 if (retval != ERROR_OK)
238 return retval;
240 /* Enable debug requests */
241 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
242 if (retval != ERROR_OK)
243 return retval;
244 if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN)) {
245 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
246 if (retval != ERROR_OK)
247 return retval;
250 /* clear any interrupt masking */
251 cortex_m3_write_debug_halt_mask(target, 0, C_MASKINTS);
253 /* Enable features controlled by ITM and DWT blocks, and catch only
254 * the vectors we were told to pay attention to.
256 * Target firmware is responsible for all fault handling policy
257 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
258 * or manual updates to the NVIC SHCSR and CCR registers.
260 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, TRCENA | armv7m->demcr);
261 if (retval != ERROR_OK)
262 return retval;
264 /* Paranoia: evidently some (early?) chips don't preserve all the
265 * debug state (including FBP, DWT, etc) across reset...
268 /* Enable FPB */
269 retval = target_write_u32(target, FP_CTRL, 3);
270 if (retval != ERROR_OK)
271 return retval;
273 cortex_m3->fpb_enabled = 1;
275 /* Restore FPB registers */
276 for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++) {
277 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
278 if (retval != ERROR_OK)
279 return retval;
282 /* Restore DWT registers */
283 for (i = 0; i < cortex_m3->dwt_num_comp; i++) {
284 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
285 dwt_list[i].comp);
286 if (retval != ERROR_OK)
287 return retval;
288 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
289 dwt_list[i].mask);
290 if (retval != ERROR_OK)
291 return retval;
292 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
293 dwt_list[i].function);
294 if (retval != ERROR_OK)
295 return retval;
297 retval = dap_run(swjdp);
298 if (retval != ERROR_OK)
299 return retval;
301 register_cache_invalidate(armv7m->arm.core_cache);
303 /* make sure we have latest dhcsr flags */
304 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
306 return retval;
309 static int cortex_m3_examine_debug_reason(struct target *target)
311 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
313 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
314 * only check the debug reason if we don't know it already */
316 if ((target->debug_reason != DBG_REASON_DBGRQ)
317 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
318 if (cortex_m3->nvic_dfsr & DFSR_BKPT) {
319 target->debug_reason = DBG_REASON_BREAKPOINT;
320 if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
321 target->debug_reason = DBG_REASON_WPTANDBKPT;
322 } else if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
323 target->debug_reason = DBG_REASON_WATCHPOINT;
324 else if (cortex_m3->nvic_dfsr & DFSR_VCATCH)
325 target->debug_reason = DBG_REASON_BREAKPOINT;
326 else /* EXTERNAL, HALTED */
327 target->debug_reason = DBG_REASON_UNDEFINED;
330 return ERROR_OK;
333 static int cortex_m3_examine_exception_reason(struct target *target)
335 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
336 struct armv7m_common *armv7m = target_to_armv7m(target);
337 struct adiv5_dap *swjdp = armv7m->arm.dap;
338 int retval;
340 retval = mem_ap_read_u32(swjdp, NVIC_SHCSR, &shcsr);
341 if (retval != ERROR_OK)
342 return retval;
343 switch (armv7m->exception_number) {
344 case 2: /* NMI */
345 break;
346 case 3: /* Hard Fault */
347 retval = mem_ap_read_atomic_u32(swjdp, NVIC_HFSR, &except_sr);
348 if (retval != ERROR_OK)
349 return retval;
350 if (except_sr & 0x40000000) {
351 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &cfsr);
352 if (retval != ERROR_OK)
353 return retval;
355 break;
356 case 4: /* Memory Management */
357 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
358 if (retval != ERROR_OK)
359 return retval;
360 retval = mem_ap_read_u32(swjdp, NVIC_MMFAR, &except_ar);
361 if (retval != ERROR_OK)
362 return retval;
363 break;
364 case 5: /* Bus Fault */
365 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
366 if (retval != ERROR_OK)
367 return retval;
368 retval = mem_ap_read_u32(swjdp, NVIC_BFAR, &except_ar);
369 if (retval != ERROR_OK)
370 return retval;
371 break;
372 case 6: /* Usage Fault */
373 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
374 if (retval != ERROR_OK)
375 return retval;
376 break;
377 case 11: /* SVCall */
378 break;
379 case 12: /* Debug Monitor */
380 retval = mem_ap_read_u32(swjdp, NVIC_DFSR, &except_sr);
381 if (retval != ERROR_OK)
382 return retval;
383 break;
384 case 14: /* PendSV */
385 break;
386 case 15: /* SysTick */
387 break;
388 default:
389 except_sr = 0;
390 break;
392 retval = dap_run(swjdp);
393 if (retval == ERROR_OK)
394 LOG_DEBUG("%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
395 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
396 armv7m_exception_string(armv7m->exception_number),
397 shcsr, except_sr, cfsr, except_ar);
398 return retval;
401 static int cortex_m3_debug_entry(struct target *target)
403 int i;
404 uint32_t xPSR;
405 int retval;
406 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
407 struct armv7m_common *armv7m = &cortex_m3->armv7m;
408 struct arm *arm = &armv7m->arm;
409 struct adiv5_dap *swjdp = armv7m->arm.dap;
410 struct reg *r;
412 LOG_DEBUG(" ");
414 cortex_m3_clear_halt(target);
415 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
416 if (retval != ERROR_OK)
417 return retval;
419 retval = armv7m->examine_debug_reason(target);
420 if (retval != ERROR_OK)
421 return retval;
423 /* Examine target state and mode
424 * First load register accessible through core debug port */
425 int num_regs = arm->core_cache->num_regs;
427 for (i = 0; i < num_regs; i++) {
428 r = &armv7m->arm.core_cache->reg_list[i];
429 if (!r->valid)
430 arm->read_core_reg(target, r, i, ARM_MODE_ANY);
433 r = arm->core_cache->reg_list + ARMV7M_xPSR;
434 xPSR = buf_get_u32(r->value, 0, 32);
436 #ifdef ARMV7_GDB_HACKS
437 /* FIXME this breaks on scan chains with more than one Cortex-M3.
438 * Instead, each CM3 should have its own dummy value...
440 /* copy real xpsr reg for gdb, setting thumb bit */
441 buf_set_u32(armv7m_gdb_dummy_cpsr_value, 0, 32, xPSR);
442 buf_set_u32(armv7m_gdb_dummy_cpsr_value, 5, 1, 1);
443 armv7m_gdb_dummy_cpsr_reg.valid = r->valid;
444 armv7m_gdb_dummy_cpsr_reg.dirty = r->dirty;
445 #endif
447 /* For IT instructions xPSR must be reloaded on resume and clear on debug exec */
448 if (xPSR & 0xf00) {
449 r->dirty = r->valid;
450 cortex_m3_store_core_reg_u32(target, 16, xPSR & ~0xff);
453 /* Are we in an exception handler */
454 if (xPSR & 0x1FF) {
455 armv7m->exception_number = (xPSR & 0x1FF);
457 arm->core_mode = ARM_MODE_HANDLER;
458 arm->map = armv7m_msp_reg_map;
459 } else {
460 unsigned control = buf_get_u32(arm->core_cache
461 ->reg_list[ARMV7M_CONTROL].value, 0, 2);
463 /* is this thread privileged? */
464 arm->core_mode = control & 1
465 ? ARM_MODE_USER_THREAD
466 : ARM_MODE_THREAD;
468 /* which stack is it using? */
469 if (control & 2)
470 arm->map = armv7m_psp_reg_map;
471 else
472 arm->map = armv7m_msp_reg_map;
474 armv7m->exception_number = 0;
477 if (armv7m->exception_number)
478 cortex_m3_examine_exception_reason(target);
480 LOG_DEBUG("entered debug state in core mode: %s at PC 0x%" PRIx32 ", target->state: %s",
481 arm_mode_name(arm->core_mode),
482 *(uint32_t *)(arm->pc->value),
483 target_state_name(target));
485 if (armv7m->post_debug_entry) {
486 retval = armv7m->post_debug_entry(target);
487 if (retval != ERROR_OK)
488 return retval;
491 return ERROR_OK;
494 static int cortex_m3_poll(struct target *target)
496 int detected_failure = ERROR_OK;
497 int retval = ERROR_OK;
498 enum target_state prev_target_state = target->state;
499 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
500 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
502 /* Read from Debug Halting Control and Status Register */
503 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
504 if (retval != ERROR_OK) {
505 target->state = TARGET_UNKNOWN;
506 return retval;
509 /* Recover from lockup. See ARMv7-M architecture spec,
510 * section B1.5.15 "Unrecoverable exception cases".
512 if (cortex_m3->dcb_dhcsr & S_LOCKUP) {
513 LOG_ERROR("%s -- clearing lockup after double fault",
514 target_name(target));
515 cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
516 target->debug_reason = DBG_REASON_DBGRQ;
518 /* We have to execute the rest (the "finally" equivalent, but
519 * still throw this exception again).
521 detected_failure = ERROR_FAIL;
523 /* refresh status bits */
524 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
525 if (retval != ERROR_OK)
526 return retval;
529 if (cortex_m3->dcb_dhcsr & S_RESET_ST) {
530 /* check if still in reset */
531 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
532 if (retval != ERROR_OK)
533 return retval;
535 if (cortex_m3->dcb_dhcsr & S_RESET_ST) {
536 target->state = TARGET_RESET;
537 return ERROR_OK;
541 if (target->state == TARGET_RESET) {
542 /* Cannot switch context while running so endreset is
543 * called with target->state == TARGET_RESET
545 LOG_DEBUG("Exit from reset with dcb_dhcsr 0x%" PRIx32,
546 cortex_m3->dcb_dhcsr);
547 cortex_m3_endreset_event(target);
548 target->state = TARGET_RUNNING;
549 prev_target_state = TARGET_RUNNING;
552 if (cortex_m3->dcb_dhcsr & S_HALT) {
553 target->state = TARGET_HALTED;
555 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
556 retval = cortex_m3_debug_entry(target);
557 if (retval != ERROR_OK)
558 return retval;
560 if (arm_semihosting(target, &retval) != 0)
561 return retval;
563 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
565 if (prev_target_state == TARGET_DEBUG_RUNNING) {
566 LOG_DEBUG(" ");
567 retval = cortex_m3_debug_entry(target);
568 if (retval != ERROR_OK)
569 return retval;
571 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
575 /* REVISIT when S_SLEEP is set, it's in a Sleep or DeepSleep state.
576 * How best to model low power modes?
579 if (target->state == TARGET_UNKNOWN) {
580 /* check if processor is retiring instructions */
581 if (cortex_m3->dcb_dhcsr & S_RETIRE_ST) {
582 target->state = TARGET_RUNNING;
583 retval = ERROR_OK;
587 /* Did we detect a failure condition that we cleared? */
588 if (detected_failure != ERROR_OK)
589 retval = detected_failure;
590 return retval;
593 static int cortex_m3_halt(struct target *target)
595 LOG_DEBUG("target->state: %s",
596 target_state_name(target));
598 if (target->state == TARGET_HALTED) {
599 LOG_DEBUG("target was already halted");
600 return ERROR_OK;
603 if (target->state == TARGET_UNKNOWN)
604 LOG_WARNING("target was in unknown state when halt was requested");
606 if (target->state == TARGET_RESET) {
607 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
608 LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
609 return ERROR_TARGET_FAILURE;
610 } else {
611 /* we came here in a reset_halt or reset_init sequence
612 * debug entry was already prepared in cortex_m3_assert_reset()
614 target->debug_reason = DBG_REASON_DBGRQ;
616 return ERROR_OK;
620 /* Write to Debug Halting Control and Status Register */
621 cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
623 target->debug_reason = DBG_REASON_DBGRQ;
625 return ERROR_OK;
628 static int cortex_m3_soft_reset_halt(struct target *target)
630 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
631 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
632 uint32_t dcb_dhcsr = 0;
633 int retval, timeout = 0;
635 /* Enter debug state on reset; restore DEMCR in endreset_event() */
636 retval = mem_ap_write_u32(swjdp, DCB_DEMCR,
637 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
638 if (retval != ERROR_OK)
639 return retval;
641 /* Request a core-only reset */
642 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
643 AIRCR_VECTKEY | AIRCR_VECTRESET);
644 if (retval != ERROR_OK)
645 return retval;
646 target->state = TARGET_RESET;
648 /* registers are now invalid */
649 register_cache_invalidate(cortex_m3->armv7m.arm.core_cache);
651 while (timeout < 100) {
652 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &dcb_dhcsr);
653 if (retval == ERROR_OK) {
654 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR,
655 &cortex_m3->nvic_dfsr);
656 if (retval != ERROR_OK)
657 return retval;
658 if ((dcb_dhcsr & S_HALT)
659 && (cortex_m3->nvic_dfsr & DFSR_VCATCH)) {
660 LOG_DEBUG("system reset-halted, DHCSR 0x%08x, "
661 "DFSR 0x%08x",
662 (unsigned) dcb_dhcsr,
663 (unsigned) cortex_m3->nvic_dfsr);
664 cortex_m3_poll(target);
665 /* FIXME restore user's vector catch config */
666 return ERROR_OK;
667 } else
668 LOG_DEBUG("waiting for system reset-halt, "
669 "DHCSR 0x%08x, %d ms",
670 (unsigned) dcb_dhcsr, timeout);
672 timeout++;
673 alive_sleep(1);
676 return ERROR_OK;
679 void cortex_m3_enable_breakpoints(struct target *target)
681 struct breakpoint *breakpoint = target->breakpoints;
683 /* set any pending breakpoints */
684 while (breakpoint) {
685 if (!breakpoint->set)
686 cortex_m3_set_breakpoint(target, breakpoint);
687 breakpoint = breakpoint->next;
691 static int cortex_m3_resume(struct target *target, int current,
692 uint32_t address, int handle_breakpoints, int debug_execution)
694 struct armv7m_common *armv7m = target_to_armv7m(target);
695 struct breakpoint *breakpoint = NULL;
696 uint32_t resume_pc;
697 struct reg *r;
699 if (target->state != TARGET_HALTED) {
700 LOG_WARNING("target not halted");
701 return ERROR_TARGET_NOT_HALTED;
704 if (!debug_execution) {
705 target_free_all_working_areas(target);
706 cortex_m3_enable_breakpoints(target);
707 cortex_m3_enable_watchpoints(target);
710 if (debug_execution) {
711 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
713 /* Disable interrupts */
714 /* We disable interrupts in the PRIMASK register instead of
715 * masking with C_MASKINTS. This is probably the same issue
716 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
717 * in parallel with disabled interrupts can cause local faults
718 * to not be taken.
720 * REVISIT this clearly breaks non-debug execution, since the
721 * PRIMASK register state isn't saved/restored... workaround
722 * by never resuming app code after debug execution.
724 buf_set_u32(r->value, 0, 1, 1);
725 r->dirty = true;
726 r->valid = true;
728 /* Make sure we are in Thumb mode */
729 r = armv7m->arm.core_cache->reg_list + ARMV7M_xPSR;
730 buf_set_u32(r->value, 24, 1, 1);
731 r->dirty = true;
732 r->valid = true;
735 /* current = 1: continue on current pc, otherwise continue at <address> */
736 r = armv7m->arm.pc;
737 if (!current) {
738 buf_set_u32(r->value, 0, 32, address);
739 r->dirty = true;
740 r->valid = true;
743 /* if we halted last time due to a bkpt instruction
744 * then we have to manually step over it, otherwise
745 * the core will break again */
747 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
748 && !debug_execution)
749 armv7m_maybe_skip_bkpt_inst(target, NULL);
751 resume_pc = buf_get_u32(r->value, 0, 32);
753 armv7m_restore_context(target);
755 /* the front-end may request us not to handle breakpoints */
756 if (handle_breakpoints) {
757 /* Single step past breakpoint at current address */
758 breakpoint = breakpoint_find(target, resume_pc);
759 if (breakpoint) {
760 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 " (ID: %d)",
761 breakpoint->address,
762 breakpoint->unique_id);
763 cortex_m3_unset_breakpoint(target, breakpoint);
764 cortex_m3_single_step_core(target);
765 cortex_m3_set_breakpoint(target, breakpoint);
769 /* Restart core */
770 cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
772 target->debug_reason = DBG_REASON_NOTHALTED;
774 /* registers are now invalid */
775 register_cache_invalidate(armv7m->arm.core_cache);
777 if (!debug_execution) {
778 target->state = TARGET_RUNNING;
779 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
780 LOG_DEBUG("target resumed at 0x%" PRIx32 "", resume_pc);
781 } else {
782 target->state = TARGET_DEBUG_RUNNING;
783 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
784 LOG_DEBUG("target debug resumed at 0x%" PRIx32 "", resume_pc);
787 return ERROR_OK;
790 /* int irqstepcount = 0; */
791 static int cortex_m3_step(struct target *target, int current,
792 uint32_t address, int handle_breakpoints)
794 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
795 struct armv7m_common *armv7m = &cortex_m3->armv7m;
796 struct adiv5_dap *swjdp = armv7m->arm.dap;
797 struct breakpoint *breakpoint = NULL;
798 struct reg *pc = armv7m->arm.pc;
799 bool bkpt_inst_found = false;
800 int retval;
801 bool isr_timed_out = false;
803 if (target->state != TARGET_HALTED) {
804 LOG_WARNING("target not halted");
805 return ERROR_TARGET_NOT_HALTED;
808 /* current = 1: continue on current pc, otherwise continue at <address> */
809 if (!current)
810 buf_set_u32(pc->value, 0, 32, address);
812 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
814 /* the front-end may request us not to handle breakpoints */
815 if (handle_breakpoints) {
816 breakpoint = breakpoint_find(target, pc_value);
817 if (breakpoint)
818 cortex_m3_unset_breakpoint(target, breakpoint);
821 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
823 target->debug_reason = DBG_REASON_SINGLESTEP;
825 armv7m_restore_context(target);
827 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
829 /* if no bkpt instruction is found at pc then we can perform
830 * a normal step, otherwise we have to manually step over the bkpt
831 * instruction - as such simulate a step */
832 if (bkpt_inst_found == false) {
833 /* Automatic ISR masking mode off: Just step over the next instruction */
834 if ((cortex_m3->isrmasking_mode != CORTEX_M3_ISRMASK_AUTO))
835 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
836 else {
837 /* Process interrupts during stepping in a way they don't interfere
838 * debugging.
840 * Principle:
842 * Set a temporary break point at the current pc and let the core run
843 * with interrupts enabled. Pending interrupts get served and we run
844 * into the breakpoint again afterwards. Then we step over the next
845 * instruction with interrupts disabled.
847 * If the pending interrupts don't complete within time, we leave the
848 * core running. This may happen if the interrupts trigger faster
849 * than the core can process them or the handler doesn't return.
851 * If no more breakpoints are available we simply do a step with
852 * interrupts enabled.
856 /* 2012-09-29 ph
858 * If a break point is already set on the lower half word then a break point on
859 * the upper half word will not break again when the core is restarted. So we
860 * just step over the instruction with interrupts disabled.
862 * The documentation has no information about this, it was found by observation
863 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 dosen't seem to
864 * suffer from this problem.
866 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
867 * address has it always cleared. The former is done to indicate thumb mode
868 * to gdb.
871 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
872 LOG_DEBUG("Stepping over next instruction with interrupts disabled");
873 cortex_m3_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
874 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
875 /* Re-enable interrupts */
876 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
878 else {
880 /* Set a temporary break point */
881 if (breakpoint)
882 retval = cortex_m3_set_breakpoint(target, breakpoint);
883 else
884 retval = breakpoint_add(target, pc_value, 2, BKPT_TYPE_BY_ADDR(pc_value));
885 bool tmp_bp_set = (retval == ERROR_OK);
887 /* No more breakpoints left, just do a step */
888 if (!tmp_bp_set)
889 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
890 else {
891 /* Start the core */
892 LOG_DEBUG("Starting core to serve pending interrupts");
893 int64_t t_start = timeval_ms();
894 cortex_m3_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
896 /* Wait for pending handlers to complete or timeout */
897 do {
898 retval = mem_ap_read_atomic_u32(swjdp,
899 DCB_DHCSR,
900 &cortex_m3->dcb_dhcsr);
901 if (retval != ERROR_OK) {
902 target->state = TARGET_UNKNOWN;
903 return retval;
905 isr_timed_out = ((timeval_ms() - t_start) > 500);
906 } while (!((cortex_m3->dcb_dhcsr & S_HALT) || isr_timed_out));
908 /* only remove breakpoint if we created it */
909 if (breakpoint)
910 cortex_m3_unset_breakpoint(target, breakpoint);
911 else {
912 /* Remove the temporary breakpoint */
913 breakpoint_remove(target, pc_value);
916 if (isr_timed_out) {
917 LOG_DEBUG("Interrupt handlers didn't complete within time, "
918 "leaving target running");
919 } else {
920 /* Step over next instruction with interrupts disabled */
921 cortex_m3_write_debug_halt_mask(target,
922 C_HALT | C_MASKINTS,
924 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
925 /* Re-enable interrupts */
926 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
933 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
934 if (retval != ERROR_OK)
935 return retval;
937 /* registers are now invalid */
938 register_cache_invalidate(armv7m->arm.core_cache);
940 if (breakpoint)
941 cortex_m3_set_breakpoint(target, breakpoint);
943 if (isr_timed_out) {
944 /* Leave the core running. The user has to stop execution manually. */
945 target->debug_reason = DBG_REASON_NOTHALTED;
946 target->state = TARGET_RUNNING;
947 return ERROR_OK;
950 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
951 " nvic_icsr = 0x%" PRIx32,
952 cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
954 retval = cortex_m3_debug_entry(target);
955 if (retval != ERROR_OK)
956 return retval;
957 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
959 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
960 " nvic_icsr = 0x%" PRIx32,
961 cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
963 return ERROR_OK;
966 static int cortex_m3_assert_reset(struct target *target)
968 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
969 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
970 enum cortex_m3_soft_reset_config reset_config = cortex_m3->soft_reset_config;
972 LOG_DEBUG("target->state: %s",
973 target_state_name(target));
975 enum reset_types jtag_reset_config = jtag_get_reset_config();
977 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
978 /* allow scripts to override the reset event */
980 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
981 register_cache_invalidate(cortex_m3->armv7m.arm.core_cache);
982 target->state = TARGET_RESET;
984 return ERROR_OK;
987 /* some cores support connecting while srst is asserted
988 * use that mode is it has been configured */
990 bool srst_asserted = false;
992 if (jtag_reset_config & RESET_SRST_NO_GATING) {
993 adapter_assert_reset();
994 srst_asserted = true;
997 /* Enable debug requests */
998 int retval;
999 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
1000 if (retval != ERROR_OK)
1001 return retval;
1002 if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN)) {
1003 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
1004 if (retval != ERROR_OK)
1005 return retval;
1008 /* If the processor is sleeping in a WFI or WFE instruction, the
1009 * C_HALT bit must be asserted to regain control */
1010 if (cortex_m3->dcb_dhcsr & S_SLEEP) {
1011 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_HALT | C_DEBUGEN);
1012 if (retval != ERROR_OK)
1013 return retval;
1016 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
1017 if (retval != ERROR_OK)
1018 return retval;
1020 if (!target->reset_halt) {
1021 /* Set/Clear C_MASKINTS in a separate operation */
1022 if (cortex_m3->dcb_dhcsr & C_MASKINTS) {
1023 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
1024 DBGKEY | C_DEBUGEN | C_HALT);
1025 if (retval != ERROR_OK)
1026 return retval;
1029 /* clear any debug flags before resuming */
1030 cortex_m3_clear_halt(target);
1032 /* clear C_HALT in dhcsr reg */
1033 cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
1034 } else {
1035 /* Halt in debug on reset; endreset_event() restores DEMCR.
1037 * REVISIT catching BUSERR presumably helps to defend against
1038 * bad vector table entries. Should this include MMERR or
1039 * other flags too?
1041 retval = mem_ap_write_atomic_u32(swjdp, DCB_DEMCR,
1042 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1043 if (retval != ERROR_OK)
1044 return retval;
1047 if (jtag_reset_config & RESET_HAS_SRST) {
1048 /* default to asserting srst */
1049 if (!srst_asserted)
1050 adapter_assert_reset();
1051 } else {
1052 /* Use a standard Cortex-M3 software reset mechanism.
1053 * We default to using VECRESET as it is supported on all current cores.
1054 * This has the disadvantage of not resetting the peripherals, so a
1055 * reset-init event handler is needed to perform any peripheral resets.
1057 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
1058 AIRCR_VECTKEY | ((reset_config == CORTEX_M3_RESET_SYSRESETREQ)
1059 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1060 if (retval != ERROR_OK)
1061 return retval;
1063 LOG_DEBUG("Using Cortex-M3 %s", (reset_config == CORTEX_M3_RESET_SYSRESETREQ)
1064 ? "SYSRESETREQ" : "VECTRESET");
1066 if (reset_config == CORTEX_M3_RESET_VECTRESET) {
1067 LOG_WARNING("Only resetting the Cortex-M3 core, use a reset-init event "
1068 "handler to reset any peripherals or configure hardware srst support.");
1072 /* I do not know why this is necessary, but it
1073 * fixes strange effects (step/resume cause NMI
1074 * after reset) on LM3S6918 -- Michael Schwingen
1076 uint32_t tmp;
1077 retval = mem_ap_read_atomic_u32(swjdp, NVIC_AIRCR, &tmp);
1078 if (retval != ERROR_OK)
1079 return retval;
1083 target->state = TARGET_RESET;
1084 jtag_add_sleep(50000);
1086 register_cache_invalidate(cortex_m3->armv7m.arm.core_cache);
1088 if (target->reset_halt) {
1089 retval = target_halt(target);
1090 if (retval != ERROR_OK)
1091 return retval;
1094 return ERROR_OK;
1097 static int cortex_m3_deassert_reset(struct target *target)
1099 LOG_DEBUG("target->state: %s",
1100 target_state_name(target));
1102 /* deassert reset lines */
1103 adapter_deassert_reset();
1105 return ERROR_OK;
1108 int cortex_m3_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1110 int retval;
1111 int fp_num = 0;
1112 uint32_t hilo;
1113 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1114 struct cortex_m3_fp_comparator *comparator_list = cortex_m3->fp_comparator_list;
1116 if (breakpoint->set) {
1117 LOG_WARNING("breakpoint (BPID: %d) already set", breakpoint->unique_id);
1118 return ERROR_OK;
1121 if (cortex_m3->auto_bp_type)
1122 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1124 if (breakpoint->type == BKPT_HARD) {
1125 while (comparator_list[fp_num].used && (fp_num < cortex_m3->fp_num_code))
1126 fp_num++;
1127 if (fp_num >= cortex_m3->fp_num_code) {
1128 LOG_ERROR("Can not find free FPB Comparator!");
1129 return ERROR_FAIL;
1131 breakpoint->set = fp_num + 1;
1132 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1133 comparator_list[fp_num].used = 1;
1134 comparator_list[fp_num].fpcr_value = (breakpoint->address & 0x1FFFFFFC) | hilo | 1;
1135 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1136 comparator_list[fp_num].fpcr_value);
1137 LOG_DEBUG("fpc_num %i fpcr_value 0x%" PRIx32 "",
1138 fp_num,
1139 comparator_list[fp_num].fpcr_value);
1140 if (!cortex_m3->fpb_enabled) {
1141 LOG_DEBUG("FPB wasn't enabled, do it now");
1142 target_write_u32(target, FP_CTRL, 3);
1144 } else if (breakpoint->type == BKPT_SOFT) {
1145 uint8_t code[4];
1147 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1148 * semihosting; don't use that. Otherwise the BKPT
1149 * parameter is arbitrary.
1151 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1152 retval = target_read_memory(target,
1153 breakpoint->address & 0xFFFFFFFE,
1154 breakpoint->length, 1,
1155 breakpoint->orig_instr);
1156 if (retval != ERROR_OK)
1157 return retval;
1158 retval = target_write_memory(target,
1159 breakpoint->address & 0xFFFFFFFE,
1160 breakpoint->length, 1,
1161 code);
1162 if (retval != ERROR_OK)
1163 return retval;
1164 breakpoint->set = true;
1167 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1168 breakpoint->unique_id,
1169 (int)(breakpoint->type),
1170 breakpoint->address,
1171 breakpoint->length,
1172 breakpoint->set);
1174 return ERROR_OK;
1177 int cortex_m3_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1179 int retval;
1180 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1181 struct cortex_m3_fp_comparator *comparator_list = cortex_m3->fp_comparator_list;
1183 if (!breakpoint->set) {
1184 LOG_WARNING("breakpoint not set");
1185 return ERROR_OK;
1188 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1189 breakpoint->unique_id,
1190 (int)(breakpoint->type),
1191 breakpoint->address,
1192 breakpoint->length,
1193 breakpoint->set);
1195 if (breakpoint->type == BKPT_HARD) {
1196 int fp_num = breakpoint->set - 1;
1197 if ((fp_num < 0) || (fp_num >= cortex_m3->fp_num_code)) {
1198 LOG_DEBUG("Invalid FP Comparator number in breakpoint");
1199 return ERROR_OK;
1201 comparator_list[fp_num].used = 0;
1202 comparator_list[fp_num].fpcr_value = 0;
1203 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1204 comparator_list[fp_num].fpcr_value);
1205 } else {
1206 /* restore original instruction (kept in target endianness) */
1207 if (breakpoint->length == 4) {
1208 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 4, 1,
1209 breakpoint->orig_instr);
1210 if (retval != ERROR_OK)
1211 return retval;
1212 } else {
1213 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 2, 1,
1214 breakpoint->orig_instr);
1215 if (retval != ERROR_OK)
1216 return retval;
1219 breakpoint->set = false;
1221 return ERROR_OK;
1224 int cortex_m3_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1226 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1228 if (cortex_m3->auto_bp_type) {
1229 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1230 #ifdef ARMV7_GDB_HACKS
1231 if (breakpoint->length != 2) {
1232 /* XXX Hack: Replace all breakpoints with length != 2 with
1233 * a hardware breakpoint. */
1234 breakpoint->type = BKPT_HARD;
1235 breakpoint->length = 2;
1237 #endif
1240 if (breakpoint->type != BKPT_TYPE_BY_ADDR(breakpoint->address)) {
1241 if (breakpoint->type == BKPT_HARD) {
1242 LOG_INFO("flash patch comparator requested outside code memory region");
1243 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1246 if (breakpoint->type == BKPT_SOFT) {
1247 LOG_INFO("soft breakpoint requested in code (flash) memory region");
1248 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1252 if ((breakpoint->type == BKPT_HARD) && (cortex_m3->fp_code_available < 1)) {
1253 LOG_INFO("no flash patch comparator unit available for hardware breakpoint");
1254 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1257 if ((breakpoint->length != 2)) {
1258 LOG_INFO("only breakpoints of two bytes length supported");
1259 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1262 if (breakpoint->type == BKPT_HARD)
1263 cortex_m3->fp_code_available--;
1265 return cortex_m3_set_breakpoint(target, breakpoint);
1268 int cortex_m3_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1270 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1272 /* REVISIT why check? FBP can be updated with core running ... */
1273 if (target->state != TARGET_HALTED) {
1274 LOG_WARNING("target not halted");
1275 return ERROR_TARGET_NOT_HALTED;
1278 if (cortex_m3->auto_bp_type)
1279 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1281 if (breakpoint->set)
1282 cortex_m3_unset_breakpoint(target, breakpoint);
1284 if (breakpoint->type == BKPT_HARD)
1285 cortex_m3->fp_code_available++;
1287 return ERROR_OK;
1290 int cortex_m3_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1292 int dwt_num = 0;
1293 uint32_t mask, temp;
1294 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1296 /* watchpoint params were validated earlier */
1297 mask = 0;
1298 temp = watchpoint->length;
1299 while (temp) {
1300 temp >>= 1;
1301 mask++;
1303 mask--;
1305 /* REVISIT Don't fully trust these "not used" records ... users
1306 * may set up breakpoints by hand, e.g. dual-address data value
1307 * watchpoint using comparator #1; comparator #0 matching cycle
1308 * count; send data trace info through ITM and TPIU; etc
1310 struct cortex_m3_dwt_comparator *comparator;
1312 for (comparator = cortex_m3->dwt_comparator_list;
1313 comparator->used && dwt_num < cortex_m3->dwt_num_comp;
1314 comparator++, dwt_num++)
1315 continue;
1316 if (dwt_num >= cortex_m3->dwt_num_comp) {
1317 LOG_ERROR("Can not find free DWT Comparator");
1318 return ERROR_FAIL;
1320 comparator->used = 1;
1321 watchpoint->set = dwt_num + 1;
1323 comparator->comp = watchpoint->address;
1324 target_write_u32(target, comparator->dwt_comparator_address + 0,
1325 comparator->comp);
1327 comparator->mask = mask;
1328 target_write_u32(target, comparator->dwt_comparator_address + 4,
1329 comparator->mask);
1331 switch (watchpoint->rw) {
1332 case WPT_READ:
1333 comparator->function = 5;
1334 break;
1335 case WPT_WRITE:
1336 comparator->function = 6;
1337 break;
1338 case WPT_ACCESS:
1339 comparator->function = 7;
1340 break;
1342 target_write_u32(target, comparator->dwt_comparator_address + 8,
1343 comparator->function);
1345 LOG_DEBUG("Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1346 watchpoint->unique_id, dwt_num,
1347 (unsigned) comparator->comp,
1348 (unsigned) comparator->mask,
1349 (unsigned) comparator->function);
1350 return ERROR_OK;
1353 int cortex_m3_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1355 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1356 struct cortex_m3_dwt_comparator *comparator;
1357 int dwt_num;
1359 if (!watchpoint->set) {
1360 LOG_WARNING("watchpoint (wpid: %d) not set",
1361 watchpoint->unique_id);
1362 return ERROR_OK;
1365 dwt_num = watchpoint->set - 1;
1367 LOG_DEBUG("Watchpoint (ID %d) DWT%d address: 0x%08x clear",
1368 watchpoint->unique_id, dwt_num,
1369 (unsigned) watchpoint->address);
1371 if ((dwt_num < 0) || (dwt_num >= cortex_m3->dwt_num_comp)) {
1372 LOG_DEBUG("Invalid DWT Comparator number in watchpoint");
1373 return ERROR_OK;
1376 comparator = cortex_m3->dwt_comparator_list + dwt_num;
1377 comparator->used = 0;
1378 comparator->function = 0;
1379 target_write_u32(target, comparator->dwt_comparator_address + 8,
1380 comparator->function);
1382 watchpoint->set = false;
1384 return ERROR_OK;
1387 int cortex_m3_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1389 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1391 if (cortex_m3->dwt_comp_available < 1) {
1392 LOG_DEBUG("no comparators?");
1393 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1396 /* hardware doesn't support data value masking */
1397 if (watchpoint->mask != ~(uint32_t)0) {
1398 LOG_DEBUG("watchpoint value masks not supported");
1399 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1402 /* hardware allows address masks of up to 32K */
1403 unsigned mask;
1405 for (mask = 0; mask < 16; mask++) {
1406 if ((1u << mask) == watchpoint->length)
1407 break;
1409 if (mask == 16) {
1410 LOG_DEBUG("unsupported watchpoint length");
1411 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1413 if (watchpoint->address & ((1 << mask) - 1)) {
1414 LOG_DEBUG("watchpoint address is unaligned");
1415 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1418 /* Caller doesn't seem to be able to describe watching for data
1419 * values of zero; that flags "no value".
1421 * REVISIT This DWT may well be able to watch for specific data
1422 * values. Requires comparator #1 to set DATAVMATCH and match
1423 * the data, and another comparator (DATAVADDR0) matching addr.
1425 if (watchpoint->value) {
1426 LOG_DEBUG("data value watchpoint not YET supported");
1427 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1430 cortex_m3->dwt_comp_available--;
1431 LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
1433 return ERROR_OK;
1436 int cortex_m3_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1438 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1440 /* REVISIT why check? DWT can be updated with core running ... */
1441 if (target->state != TARGET_HALTED) {
1442 LOG_WARNING("target not halted");
1443 return ERROR_TARGET_NOT_HALTED;
1446 if (watchpoint->set)
1447 cortex_m3_unset_watchpoint(target, watchpoint);
1449 cortex_m3->dwt_comp_available++;
1450 LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
1452 return ERROR_OK;
1455 void cortex_m3_enable_watchpoints(struct target *target)
1457 struct watchpoint *watchpoint = target->watchpoints;
1459 /* set any pending watchpoints */
1460 while (watchpoint) {
1461 if (!watchpoint->set)
1462 cortex_m3_set_watchpoint(target, watchpoint);
1463 watchpoint = watchpoint->next;
1467 static int cortex_m3_load_core_reg_u32(struct target *target,
1468 uint32_t num, uint32_t *value)
1470 int retval;
1471 struct armv7m_common *armv7m = target_to_armv7m(target);
1472 struct adiv5_dap *swjdp = armv7m->arm.dap;
1474 /* NOTE: we "know" here that the register identifiers used
1475 * in the v7m header match the Cortex-M3 Debug Core Register
1476 * Selector values for R0..R15, xPSR, MSP, and PSP.
1478 switch (num) {
1479 case 0 ... 18:
1480 /* read a normal core register */
1481 retval = cortexm3_dap_read_coreregister_u32(swjdp, value, num);
1483 if (retval != ERROR_OK) {
1484 LOG_ERROR("JTAG failure %i", retval);
1485 return ERROR_JTAG_DEVICE_ERROR;
1487 LOG_DEBUG("load from core reg %i value 0x%" PRIx32 "", (int)num, *value);
1488 break;
1490 case ARMV7M_PRIMASK:
1491 case ARMV7M_BASEPRI:
1492 case ARMV7M_FAULTMASK:
1493 case ARMV7M_CONTROL:
1494 /* Cortex-M3 packages these four registers as bitfields
1495 * in one Debug Core register. So say r0 and r2 docs;
1496 * it was removed from r1 docs, but still works.
1498 cortexm3_dap_read_coreregister_u32(swjdp, value, 20);
1500 switch (num) {
1501 case ARMV7M_PRIMASK:
1502 *value = buf_get_u32((uint8_t *)value, 0, 1);
1503 break;
1505 case ARMV7M_BASEPRI:
1506 *value = buf_get_u32((uint8_t *)value, 8, 8);
1507 break;
1509 case ARMV7M_FAULTMASK:
1510 *value = buf_get_u32((uint8_t *)value, 16, 1);
1511 break;
1513 case ARMV7M_CONTROL:
1514 *value = buf_get_u32((uint8_t *)value, 24, 2);
1515 break;
1518 LOG_DEBUG("load from special reg %i value 0x%" PRIx32 "", (int)num, *value);
1519 break;
1521 default:
1522 return ERROR_COMMAND_SYNTAX_ERROR;
1525 return ERROR_OK;
1528 static int cortex_m3_store_core_reg_u32(struct target *target,
1529 uint32_t num, uint32_t value)
1531 int retval;
1532 uint32_t reg;
1533 struct armv7m_common *armv7m = target_to_armv7m(target);
1534 struct adiv5_dap *swjdp = armv7m->arm.dap;
1536 #ifdef ARMV7_GDB_HACKS
1537 /* If the LR register is being modified, make sure it will put us
1538 * in "thumb" mode, or an INVSTATE exception will occur. This is a
1539 * hack to deal with the fact that gdb will sometimes "forge"
1540 * return addresses, and doesn't set the LSB correctly (i.e., when
1541 * printing expressions containing function calls, it sets LR = 0.)
1542 * Valid exception return codes have bit 0 set too.
1544 if (num == ARMV7M_R14)
1545 value |= 0x01;
1546 #endif
1548 /* NOTE: we "know" here that the register identifiers used
1549 * in the v7m header match the Cortex-M3 Debug Core Register
1550 * Selector values for R0..R15, xPSR, MSP, and PSP.
1552 switch (num) {
1553 case 0 ... 18:
1554 retval = cortexm3_dap_write_coreregister_u32(swjdp, value, num);
1555 if (retval != ERROR_OK) {
1556 struct reg *r;
1558 LOG_ERROR("JTAG failure");
1559 r = armv7m->arm.core_cache->reg_list + num;
1560 r->dirty = r->valid;
1561 return ERROR_JTAG_DEVICE_ERROR;
1563 LOG_DEBUG("write core reg %i value 0x%" PRIx32 "", (int)num, value);
1564 break;
1566 case ARMV7M_PRIMASK:
1567 case ARMV7M_BASEPRI:
1568 case ARMV7M_FAULTMASK:
1569 case ARMV7M_CONTROL:
1570 /* Cortex-M3 packages these four registers as bitfields
1571 * in one Debug Core register. So say r0 and r2 docs;
1572 * it was removed from r1 docs, but still works.
1574 cortexm3_dap_read_coreregister_u32(swjdp, &reg, 20);
1576 switch (num) {
1577 case ARMV7M_PRIMASK:
1578 buf_set_u32((uint8_t *)&reg, 0, 1, value);
1579 break;
1581 case ARMV7M_BASEPRI:
1582 buf_set_u32((uint8_t *)&reg, 8, 8, value);
1583 break;
1585 case ARMV7M_FAULTMASK:
1586 buf_set_u32((uint8_t *)&reg, 16, 1, value);
1587 break;
1589 case ARMV7M_CONTROL:
1590 buf_set_u32((uint8_t *)&reg, 24, 2, value);
1591 break;
1594 cortexm3_dap_write_coreregister_u32(swjdp, reg, 20);
1596 LOG_DEBUG("write special reg %i value 0x%" PRIx32 " ", (int)num, value);
1597 break;
1599 default:
1600 return ERROR_COMMAND_SYNTAX_ERROR;
1603 return ERROR_OK;
1606 static int cortex_m3_read_memory(struct target *target, uint32_t address,
1607 uint32_t size, uint32_t count, uint8_t *buffer)
1609 struct armv7m_common *armv7m = target_to_armv7m(target);
1610 struct adiv5_dap *swjdp = armv7m->arm.dap;
1611 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1613 if (armv7m->arm.is_armv6m) {
1614 /* armv6m does not handle unaligned memory access */
1615 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1616 return ERROR_TARGET_UNALIGNED_ACCESS;
1619 /* cortex_m3 handles unaligned memory access */
1620 if (count && buffer) {
1621 switch (size) {
1622 case 4:
1623 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1624 break;
1625 case 2:
1626 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1627 break;
1628 case 1:
1629 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1630 break;
1634 return retval;
1637 static int cortex_m3_write_memory(struct target *target, uint32_t address,
1638 uint32_t size, uint32_t count, const uint8_t *buffer)
1640 struct armv7m_common *armv7m = target_to_armv7m(target);
1641 struct adiv5_dap *swjdp = armv7m->arm.dap;
1642 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1644 if (armv7m->arm.is_armv6m) {
1645 /* armv6m does not handle unaligned memory access */
1646 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1647 return ERROR_TARGET_UNALIGNED_ACCESS;
1650 if (count && buffer) {
1651 switch (size) {
1652 case 4:
1653 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1654 break;
1655 case 2:
1656 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1657 break;
1658 case 1:
1659 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1660 break;
1664 return retval;
1667 static int cortex_m3_bulk_write_memory(struct target *target, uint32_t address,
1668 uint32_t count, const uint8_t *buffer)
1670 return cortex_m3_write_memory(target, address, 4, count, buffer);
1673 static int cortex_m3_init_target(struct command_context *cmd_ctx,
1674 struct target *target)
1676 armv7m_build_reg_cache(target);
1677 return ERROR_OK;
1680 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
1681 * on r/w if the core is not running, and clear on resume or reset ... or
1682 * at least, in a post_restore_context() method.
1685 struct dwt_reg_state {
1686 struct target *target;
1687 uint32_t addr;
1688 uint32_t value; /* scratch/cache */
1691 static int cortex_m3_dwt_get_reg(struct reg *reg)
1693 struct dwt_reg_state *state = reg->arch_info;
1695 return target_read_u32(state->target, state->addr, &state->value);
1698 static int cortex_m3_dwt_set_reg(struct reg *reg, uint8_t *buf)
1700 struct dwt_reg_state *state = reg->arch_info;
1702 return target_write_u32(state->target, state->addr,
1703 buf_get_u32(buf, 0, reg->size));
1706 struct dwt_reg {
1707 uint32_t addr;
1708 char *name;
1709 unsigned size;
1712 static struct dwt_reg dwt_base_regs[] = {
1713 { DWT_CTRL, "dwt_ctrl", 32, },
1714 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
1715 * increments while the core is asleep.
1717 { DWT_CYCCNT, "dwt_cyccnt", 32, },
1718 /* plus some 8 bit counters, useful for profiling with TPIU */
1721 static struct dwt_reg dwt_comp[] = {
1722 #define DWT_COMPARATOR(i) \
1723 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
1724 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
1725 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
1726 DWT_COMPARATOR(0),
1727 DWT_COMPARATOR(1),
1728 DWT_COMPARATOR(2),
1729 DWT_COMPARATOR(3),
1730 #undef DWT_COMPARATOR
1733 static const struct reg_arch_type dwt_reg_type = {
1734 .get = cortex_m3_dwt_get_reg,
1735 .set = cortex_m3_dwt_set_reg,
1738 static void cortex_m3_dwt_addreg(struct target *t, struct reg *r, struct dwt_reg *d)
1740 struct dwt_reg_state *state;
1742 state = calloc(1, sizeof *state);
1743 if (!state)
1744 return;
1745 state->addr = d->addr;
1746 state->target = t;
1748 r->name = d->name;
1749 r->size = d->size;
1750 r->value = &state->value;
1751 r->arch_info = state;
1752 r->type = &dwt_reg_type;
1755 void cortex_m3_dwt_setup(struct cortex_m3_common *cm3, struct target *target)
1757 uint32_t dwtcr;
1758 struct reg_cache *cache;
1759 struct cortex_m3_dwt_comparator *comparator;
1760 int reg, i;
1762 target_read_u32(target, DWT_CTRL, &dwtcr);
1763 if (!dwtcr) {
1764 LOG_DEBUG("no DWT");
1765 return;
1768 cm3->dwt_num_comp = (dwtcr >> 28) & 0xF;
1769 cm3->dwt_comp_available = cm3->dwt_num_comp;
1770 cm3->dwt_comparator_list = calloc(cm3->dwt_num_comp,
1771 sizeof(struct cortex_m3_dwt_comparator));
1772 if (!cm3->dwt_comparator_list) {
1773 fail0:
1774 cm3->dwt_num_comp = 0;
1775 LOG_ERROR("out of mem");
1776 return;
1779 cache = calloc(1, sizeof *cache);
1780 if (!cache) {
1781 fail1:
1782 free(cm3->dwt_comparator_list);
1783 goto fail0;
1785 cache->name = "cortex-m3 dwt registers";
1786 cache->num_regs = 2 + cm3->dwt_num_comp * 3;
1787 cache->reg_list = calloc(cache->num_regs, sizeof *cache->reg_list);
1788 if (!cache->reg_list) {
1789 free(cache);
1790 goto fail1;
1793 for (reg = 0; reg < 2; reg++)
1794 cortex_m3_dwt_addreg(target, cache->reg_list + reg,
1795 dwt_base_regs + reg);
1797 comparator = cm3->dwt_comparator_list;
1798 for (i = 0; i < cm3->dwt_num_comp; i++, comparator++) {
1799 int j;
1801 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
1802 for (j = 0; j < 3; j++, reg++)
1803 cortex_m3_dwt_addreg(target, cache->reg_list + reg,
1804 dwt_comp + 3 * i + j);
1807 *register_get_last_cache_p(&target->reg_cache) = cache;
1808 cm3->dwt_cache = cache;
1810 LOG_DEBUG("DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
1811 dwtcr, cm3->dwt_num_comp,
1812 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
1814 /* REVISIT: if num_comp > 1, check whether comparator #1 can
1815 * implement single-address data value watchpoints ... so we
1816 * won't need to check it later, when asked to set one up.
1820 #define MVFR0 0xe000ef40
1821 #define MVFR1 0xe000ef44
1823 #define MVFR0_DEFAULT_M4 0x10110021
1824 #define MVFR1_DEFAULT_M4 0x11000011
1826 int cortex_m3_examine(struct target *target)
1828 int retval;
1829 uint32_t cpuid, fpcr, mvfr0, mvfr1;
1830 int i;
1831 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1832 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
1833 struct armv7m_common *armv7m = target_to_armv7m(target);
1835 /* stlink shares the examine handler but does not support
1836 * all its calls */
1837 if (!armv7m->stlink) {
1838 retval = ahbap_debugport_init(swjdp);
1839 if (retval != ERROR_OK)
1840 return retval;
1843 if (!target_was_examined(target)) {
1844 target_set_examined(target);
1846 /* Read from Device Identification Registers */
1847 retval = target_read_u32(target, CPUID, &cpuid);
1848 if (retval != ERROR_OK)
1849 return retval;
1851 /* Get CPU Type */
1852 i = (cpuid >> 4) & 0xf;
1854 LOG_DEBUG("Cortex-M%d r%" PRId8 "p%" PRId8 " processor detected",
1855 i, (uint8_t)((cpuid >> 20) & 0xf), (uint8_t)((cpuid >> 0) & 0xf));
1856 LOG_DEBUG("cpuid: 0x%8.8" PRIx32 "", cpuid);
1858 /* test for floating point feature on cortex-m4 */
1859 if (i == 4) {
1860 target_read_u32(target, MVFR0, &mvfr0);
1861 target_read_u32(target, MVFR1, &mvfr1);
1863 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
1864 LOG_DEBUG("Cortex-M%d floating point feature FPv4_SP found", i);
1865 armv7m->fp_feature = FPv4_SP;
1867 } else if (i == 0) {
1868 /* Cortex-M0 does not support unaligned memory access */
1869 armv7m->arm.is_armv6m = true;
1872 if (i == 4 || i == 3) {
1873 /* Cortex-M3/M4 has 4096 bytes autoincrement range */
1874 armv7m->dap.tar_autoincr_block = (1 << 12);
1877 /* NOTE: FPB and DWT are both optional. */
1879 /* Setup FPB */
1880 target_read_u32(target, FP_CTRL, &fpcr);
1881 cortex_m3->auto_bp_type = 1;
1882 cortex_m3->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF); /* bits
1883 *[14:12]
1884 *and [7:4]
1886 cortex_m3->fp_num_lit = (fpcr >> 8) & 0xF;
1887 cortex_m3->fp_code_available = cortex_m3->fp_num_code;
1888 cortex_m3->fp_comparator_list = calloc(
1889 cortex_m3->fp_num_code + cortex_m3->fp_num_lit,
1890 sizeof(struct cortex_m3_fp_comparator));
1891 cortex_m3->fpb_enabled = fpcr & 1;
1892 for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++) {
1893 cortex_m3->fp_comparator_list[i].type =
1894 (i < cortex_m3->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
1895 cortex_m3->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
1897 LOG_DEBUG("FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
1898 fpcr,
1899 cortex_m3->fp_num_code,
1900 cortex_m3->fp_num_lit);
1902 /* Setup DWT */
1903 cortex_m3_dwt_setup(cortex_m3, target);
1905 /* These hardware breakpoints only work for code in flash! */
1906 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
1907 target_name(target),
1908 cortex_m3->fp_num_code,
1909 cortex_m3->dwt_num_comp);
1912 return ERROR_OK;
1915 static int cortex_m3_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1917 uint16_t dcrdr;
1918 int retval;
1920 mem_ap_read_buf_u16(swjdp, (uint8_t *)&dcrdr, 1, DCB_DCRDR);
1921 *ctrl = (uint8_t)dcrdr;
1922 *value = (uint8_t)(dcrdr >> 8);
1924 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1926 /* write ack back to software dcc register
1927 * signify we have read data */
1928 if (dcrdr & (1 << 0)) {
1929 dcrdr = 0;
1930 retval = mem_ap_write_buf_u16(swjdp, (uint8_t *)&dcrdr, 1, DCB_DCRDR);
1931 if (retval != ERROR_OK)
1932 return retval;
1935 return ERROR_OK;
1938 static int cortex_m3_target_request_data(struct target *target,
1939 uint32_t size, uint8_t *buffer)
1941 struct armv7m_common *armv7m = target_to_armv7m(target);
1942 struct adiv5_dap *swjdp = armv7m->arm.dap;
1943 uint8_t data;
1944 uint8_t ctrl;
1945 uint32_t i;
1947 for (i = 0; i < (size * 4); i++) {
1948 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1949 buffer[i] = data;
1952 return ERROR_OK;
1955 static int cortex_m3_handle_target_request(void *priv)
1957 struct target *target = priv;
1958 if (!target_was_examined(target))
1959 return ERROR_OK;
1960 struct armv7m_common *armv7m = target_to_armv7m(target);
1961 struct adiv5_dap *swjdp = armv7m->arm.dap;
1963 if (!target->dbg_msg_enabled)
1964 return ERROR_OK;
1966 if (target->state == TARGET_RUNNING) {
1967 uint8_t data;
1968 uint8_t ctrl;
1970 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1972 /* check if we have data */
1973 if (ctrl & (1 << 0)) {
1974 uint32_t request;
1976 /* we assume target is quick enough */
1977 request = data;
1978 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1979 request |= (data << 8);
1980 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1981 request |= (data << 16);
1982 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1983 request |= (data << 24);
1984 target_request(target, request);
1988 return ERROR_OK;
1991 static int cortex_m3_init_arch_info(struct target *target,
1992 struct cortex_m3_common *cortex_m3, struct jtag_tap *tap)
1994 int retval;
1995 struct armv7m_common *armv7m = &cortex_m3->armv7m;
1997 armv7m_init_arch_info(target, armv7m);
1999 /* prepare JTAG information for the new target */
2000 cortex_m3->jtag_info.tap = tap;
2001 cortex_m3->jtag_info.scann_size = 4;
2003 /* default reset mode is to use srst if fitted
2004 * if not it will use CORTEX_M3_RESET_VECTRESET */
2005 cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
2007 armv7m->arm.dap = &armv7m->dap;
2009 /* Leave (only) generic DAP stuff for debugport_init(); */
2010 armv7m->dap.jtag_info = &cortex_m3->jtag_info;
2011 armv7m->dap.memaccess_tck = 8;
2013 /* Cortex-M3/M4 has 4096 bytes autoincrement range
2014 * but set a safe default to 1024 to support Cortex-M0
2015 * this will be changed in cortex_m3_examine if a M3/M4 is detected */
2016 armv7m->dap.tar_autoincr_block = (1 << 10);
2018 /* register arch-specific functions */
2019 armv7m->examine_debug_reason = cortex_m3_examine_debug_reason;
2021 armv7m->post_debug_entry = NULL;
2023 armv7m->pre_restore_context = NULL;
2025 armv7m->load_core_reg_u32 = cortex_m3_load_core_reg_u32;
2026 armv7m->store_core_reg_u32 = cortex_m3_store_core_reg_u32;
2028 target_register_timer_callback(cortex_m3_handle_target_request, 1, 1, target);
2030 retval = arm_jtag_setup_connection(&cortex_m3->jtag_info);
2031 if (retval != ERROR_OK)
2032 return retval;
2034 return ERROR_OK;
2037 static int cortex_m3_target_create(struct target *target, Jim_Interp *interp)
2039 struct cortex_m3_common *cortex_m3 = calloc(1, sizeof(struct cortex_m3_common));
2041 cortex_m3->common_magic = CORTEX_M3_COMMON_MAGIC;
2042 cortex_m3_init_arch_info(target, cortex_m3, target->tap);
2044 return ERROR_OK;
2047 /*--------------------------------------------------------------------------*/
2049 static int cortex_m3_verify_pointer(struct command_context *cmd_ctx,
2050 struct cortex_m3_common *cm3)
2052 if (cm3->common_magic != CORTEX_M3_COMMON_MAGIC) {
2053 command_print(cmd_ctx, "target is not a Cortex-M3");
2054 return ERROR_TARGET_INVALID;
2056 return ERROR_OK;
2060 * Only stuff below this line should need to verify that its target
2061 * is a Cortex-M3. Everything else should have indirected through the
2062 * cortexm3_target structure, which is only used with CM3 targets.
2065 static const struct {
2066 char name[10];
2067 unsigned mask;
2068 } vec_ids[] = {
2069 { "hard_err", VC_HARDERR, },
2070 { "int_err", VC_INTERR, },
2071 { "bus_err", VC_BUSERR, },
2072 { "state_err", VC_STATERR, },
2073 { "chk_err", VC_CHKERR, },
2074 { "nocp_err", VC_NOCPERR, },
2075 { "mm_err", VC_MMERR, },
2076 { "reset", VC_CORERESET, },
2079 COMMAND_HANDLER(handle_cortex_m3_vector_catch_command)
2081 struct target *target = get_current_target(CMD_CTX);
2082 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2083 struct armv7m_common *armv7m = &cortex_m3->armv7m;
2084 struct adiv5_dap *swjdp = armv7m->arm.dap;
2085 uint32_t demcr = 0;
2086 int retval;
2088 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2089 if (retval != ERROR_OK)
2090 return retval;
2092 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2093 if (retval != ERROR_OK)
2094 return retval;
2096 if (CMD_ARGC > 0) {
2097 unsigned catch = 0;
2099 if (CMD_ARGC == 1) {
2100 if (strcmp(CMD_ARGV[0], "all") == 0) {
2101 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2102 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2103 | VC_MMERR | VC_CORERESET;
2104 goto write;
2105 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2106 goto write;
2108 while (CMD_ARGC-- > 0) {
2109 unsigned i;
2110 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2111 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2112 continue;
2113 catch |= vec_ids[i].mask;
2114 break;
2116 if (i == ARRAY_SIZE(vec_ids)) {
2117 LOG_ERROR("No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2118 return ERROR_COMMAND_SYNTAX_ERROR;
2121 write:
2122 /* For now, armv7m->demcr only stores vector catch flags. */
2123 armv7m->demcr = catch;
2125 demcr &= ~0xffff;
2126 demcr |= catch;
2128 /* write, but don't assume it stuck (why not??) */
2129 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, demcr);
2130 if (retval != ERROR_OK)
2131 return retval;
2132 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2133 if (retval != ERROR_OK)
2134 return retval;
2136 /* FIXME be sure to clear DEMCR on clean server shutdown.
2137 * Otherwise the vector catch hardware could fire when there's
2138 * no debugger hooked up, causing much confusion...
2142 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2143 command_print(CMD_CTX, "%9s: %s", vec_ids[i].name,
2144 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2147 return ERROR_OK;
2150 COMMAND_HANDLER(handle_cortex_m3_mask_interrupts_command)
2152 struct target *target = get_current_target(CMD_CTX);
2153 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2154 int retval;
2156 static const Jim_Nvp nvp_maskisr_modes[] = {
2157 { .name = "auto", .value = CORTEX_M3_ISRMASK_AUTO },
2158 { .name = "off", .value = CORTEX_M3_ISRMASK_OFF },
2159 { .name = "on", .value = CORTEX_M3_ISRMASK_ON },
2160 { .name = NULL, .value = -1 },
2162 const Jim_Nvp *n;
2165 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2166 if (retval != ERROR_OK)
2167 return retval;
2169 if (target->state != TARGET_HALTED) {
2170 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
2171 return ERROR_OK;
2174 if (CMD_ARGC > 0) {
2175 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2176 if (n->name == NULL)
2177 return ERROR_COMMAND_SYNTAX_ERROR;
2178 cortex_m3->isrmasking_mode = n->value;
2181 if (cortex_m3->isrmasking_mode == CORTEX_M3_ISRMASK_ON)
2182 cortex_m3_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
2183 else
2184 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
2187 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_m3->isrmasking_mode);
2188 command_print(CMD_CTX, "cortex_m3 interrupt mask %s", n->name);
2190 return ERROR_OK;
2193 COMMAND_HANDLER(handle_cortex_m3_reset_config_command)
2195 struct target *target = get_current_target(CMD_CTX);
2196 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2197 int retval;
2198 char *reset_config;
2200 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2201 if (retval != ERROR_OK)
2202 return retval;
2204 if (CMD_ARGC > 0) {
2205 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2206 cortex_m3->soft_reset_config = CORTEX_M3_RESET_SYSRESETREQ;
2207 else if (strcmp(*CMD_ARGV, "vectreset") == 0)
2208 cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
2211 switch (cortex_m3->soft_reset_config) {
2212 case CORTEX_M3_RESET_SYSRESETREQ:
2213 reset_config = "sysresetreq";
2214 break;
2216 case CORTEX_M3_RESET_VECTRESET:
2217 reset_config = "vectreset";
2218 break;
2220 default:
2221 reset_config = "unknown";
2222 break;
2225 command_print(CMD_CTX, "cortex_m3 reset_config %s", reset_config);
2227 return ERROR_OK;
2230 static const struct command_registration cortex_m3_exec_command_handlers[] = {
2232 .name = "maskisr",
2233 .handler = handle_cortex_m3_mask_interrupts_command,
2234 .mode = COMMAND_EXEC,
2235 .help = "mask cortex_m3 interrupts",
2236 .usage = "['auto'|'on'|'off']",
2239 .name = "vector_catch",
2240 .handler = handle_cortex_m3_vector_catch_command,
2241 .mode = COMMAND_EXEC,
2242 .help = "configure hardware vectors to trigger debug entry",
2243 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2246 .name = "reset_config",
2247 .handler = handle_cortex_m3_reset_config_command,
2248 .mode = COMMAND_ANY,
2249 .help = "configure software reset handling",
2250 .usage = "['srst'|'sysresetreq'|'vectreset']",
2252 COMMAND_REGISTRATION_DONE
2254 static const struct command_registration cortex_m3_command_handlers[] = {
2256 .chain = armv7m_command_handlers,
2259 .name = "cortex_m3",
2260 .mode = COMMAND_EXEC,
2261 .help = "Cortex-M3 command group",
2262 .usage = "",
2263 .chain = cortex_m3_exec_command_handlers,
2265 COMMAND_REGISTRATION_DONE
2268 struct target_type cortexm3_target = {
2269 .name = "cortex_m3",
2271 .poll = cortex_m3_poll,
2272 .arch_state = armv7m_arch_state,
2274 .target_request_data = cortex_m3_target_request_data,
2276 .halt = cortex_m3_halt,
2277 .resume = cortex_m3_resume,
2278 .step = cortex_m3_step,
2280 .assert_reset = cortex_m3_assert_reset,
2281 .deassert_reset = cortex_m3_deassert_reset,
2282 .soft_reset_halt = cortex_m3_soft_reset_halt,
2284 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
2286 .read_memory = cortex_m3_read_memory,
2287 .write_memory = cortex_m3_write_memory,
2288 .bulk_write_memory = cortex_m3_bulk_write_memory,
2289 .checksum_memory = armv7m_checksum_memory,
2290 .blank_check_memory = armv7m_blank_check_memory,
2292 .run_algorithm = armv7m_run_algorithm,
2293 .start_algorithm = armv7m_start_algorithm,
2294 .wait_algorithm = armv7m_wait_algorithm,
2296 .add_breakpoint = cortex_m3_add_breakpoint,
2297 .remove_breakpoint = cortex_m3_remove_breakpoint,
2298 .add_watchpoint = cortex_m3_add_watchpoint,
2299 .remove_watchpoint = cortex_m3_remove_watchpoint,
2301 .commands = cortex_m3_command_handlers,
2302 .target_create = cortex_m3_target_create,
2303 .init_target = cortex_m3_init_target,
2304 .examine = cortex_m3_examine,