ARM DPM: disable some nyet-ready breakpoint code
[openocd/dnglaze.git] / src / target / arm_dpm.c
blob4bd22ffbf53cc7fa3807a8387d9728d60c015df2
1 /*
2 * Copyright (C) 2009 by David Brownell
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the
16 * Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
24 #include "arm.h"
25 #include "arm_dpm.h"
26 #include <jtag/jtag.h>
27 #include "register.h"
28 #include "breakpoints.h"
29 #include "target_type.h"
30 #include "arm_opcodes.h"
33 /**
34 * @file
35 * Implements various ARM DPM operations using architectural debug registers.
36 * These routines layer over core-specific communication methods to cope with
37 * implementation differences between cores like ARM1136 and Cortex-A8.
40 /*----------------------------------------------------------------------*/
43 * Coprocessor support
46 /* Read coprocessor */
47 static int dpm_mrc(struct target *target, int cpnum,
48 uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm,
49 uint32_t *value)
51 struct arm *arm = target_to_arm(target);
52 struct arm_dpm *dpm = arm->dpm;
53 int retval;
55 retval = dpm->prepare(dpm);
56 if (retval != ERROR_OK)
57 return retval;
59 LOG_DEBUG("MRC p%d, %d, r0, c%d, c%d, %d", cpnum,
60 (int) op1, (int) CRn,
61 (int) CRm, (int) op2);
63 /* read coprocessor register into R0; return via DCC */
64 retval = dpm->instr_read_data_r0(dpm,
65 ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2),
66 value);
68 /* (void) */ dpm->finish(dpm);
69 return retval;
72 static int dpm_mcr(struct target *target, int cpnum,
73 uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm,
74 uint32_t value)
76 struct arm *arm = target_to_arm(target);
77 struct arm_dpm *dpm = arm->dpm;
78 int retval;
80 retval = dpm->prepare(dpm);
81 if (retval != ERROR_OK)
82 return retval;
84 LOG_DEBUG("MCR p%d, %d, r0, c%d, c%d, %d", cpnum,
85 (int) op1, (int) CRn,
86 (int) CRm, (int) op2);
88 /* read DCC into r0; then write coprocessor register from R0 */
89 retval = dpm->instr_write_data_r0(dpm,
90 ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2),
91 value);
93 /* (void) */ dpm->finish(dpm);
94 return retval;
97 /*----------------------------------------------------------------------*/
100 * Register access utilities
103 /* Toggles between recorded core mode (USR, SVC, etc) and a temporary one.
104 * Routines *must* restore the original mode before returning!!
106 static int dpm_modeswitch(struct arm_dpm *dpm, enum arm_mode mode)
108 int retval;
109 uint32_t cpsr;
111 /* restore previous mode */
112 if (mode == ARM_MODE_ANY)
113 cpsr = buf_get_u32(dpm->arm->cpsr->value, 0, 32);
115 /* else force to the specified mode */
116 else
117 cpsr = mode;
119 retval = dpm->instr_write_data_r0(dpm, ARMV4_5_MSR_GP(0, 0xf, 0), cpsr);
121 if (dpm->instr_cpsr_sync)
122 retval = dpm->instr_cpsr_sync(dpm);
124 return retval;
127 /* just read the register -- rely on the core mode being right */
128 static int dpm_read_reg(struct arm_dpm *dpm, struct reg *r, unsigned regnum)
130 uint32_t value;
131 int retval;
133 switch (regnum) {
134 case 0 ... 14:
135 /* return via DCC: "MCR p14, 0, Rnum, c0, c5, 0" */
136 retval = dpm->instr_read_data_dcc(dpm,
137 ARMV4_5_MCR(14, 0, regnum, 0, 5, 0),
138 &value);
139 break;
140 case 15: /* PC */
141 /* "MOV r0, pc"; then return via DCC */
142 retval = dpm->instr_read_data_r0(dpm, 0xe1a0000f, &value);
144 /* NOTE: this seems like a slightly awkward place to update
145 * this value ... but if the PC gets written (the only way
146 * to change what we compute), the arch spec says subsequent
147 * reads return values which are "unpredictable". So this
148 * is always right except in those broken-by-intent cases.
150 switch (dpm->arm->core_state) {
151 case ARM_STATE_ARM:
152 value -= 8;
153 break;
154 case ARM_STATE_THUMB:
155 case ARM_STATE_THUMB_EE:
156 value -= 4;
157 break;
158 case ARM_STATE_JAZELLE:
159 /* core-specific ... ? */
160 LOG_WARNING("Jazelle PC adjustment unknown");
161 break;
163 break;
164 default:
165 /* 16: "MRS r0, CPSR"; then return via DCC
166 * 17: "MRS r0, SPSR"; then return via DCC
168 retval = dpm->instr_read_data_r0(dpm,
169 ARMV4_5_MRS(0, regnum & 1),
170 &value);
171 break;
174 if (retval == ERROR_OK) {
175 buf_set_u32(r->value, 0, 32, value);
176 r->valid = true;
177 r->dirty = false;
178 LOG_DEBUG("READ: %s, %8.8x", r->name, (unsigned) value);
181 return retval;
184 /* just write the register -- rely on the core mode being right */
185 static int dpm_write_reg(struct arm_dpm *dpm, struct reg *r, unsigned regnum)
187 int retval;
188 uint32_t value = buf_get_u32(r->value, 0, 32);
190 switch (regnum) {
191 case 0 ... 14:
192 /* load register from DCC: "MRC p14, 0, Rnum, c0, c5, 0" */
193 retval = dpm->instr_write_data_dcc(dpm,
194 ARMV4_5_MRC(14, 0, regnum, 0, 5, 0),
195 value);
196 break;
197 case 15: /* PC */
198 /* read r0 from DCC; then "MOV pc, r0" */
199 retval = dpm->instr_write_data_r0(dpm, 0xe1a0f000, value);
200 break;
201 default:
202 /* 16: read r0 from DCC, then "MSR r0, CPSR_cxsf"
203 * 17: read r0 from DCC, then "MSR r0, SPSR_cxsf"
205 retval = dpm->instr_write_data_r0(dpm,
206 ARMV4_5_MSR_GP(0, 0xf, regnum & 1),
207 value);
209 if (regnum == 16 && dpm->instr_cpsr_sync)
210 retval = dpm->instr_cpsr_sync(dpm);
212 break;
215 if (retval == ERROR_OK) {
216 r->dirty = false;
217 LOG_DEBUG("WRITE: %s, %8.8x", r->name, (unsigned) value);
220 return retval;
224 * Read basic registers of the the current context: R0 to R15, and CPSR;
225 * sets the core mode (such as USR or IRQ) and state (such as ARM or Thumb).
226 * In normal operation this is called on entry to halting debug state,
227 * possibly after some other operations supporting restore of debug state
228 * or making sure the CPU is fully idle (drain write buffer, etc).
230 int arm_dpm_read_current_registers(struct arm_dpm *dpm)
232 struct arm *arm = dpm->arm;
233 uint32_t cpsr;
234 int retval;
235 struct reg *r;
237 retval = dpm->prepare(dpm);
238 if (retval != ERROR_OK)
239 return retval;
241 /* read R0 first (it's used for scratch), then CPSR */
242 r = arm->core_cache->reg_list + 0;
243 if (!r->valid) {
244 retval = dpm_read_reg(dpm, r, 0);
245 if (retval != ERROR_OK)
246 goto fail;
248 r->dirty = true;
250 retval = dpm->instr_read_data_r0(dpm, ARMV4_5_MRS(0, 0), &cpsr);
251 if (retval != ERROR_OK)
252 goto fail;
254 /* update core mode and state, plus shadow mapping for R8..R14 */
255 arm_set_cpsr(arm, cpsr);
257 /* REVISIT we can probably avoid reading R1..R14, saving time... */
258 for (unsigned i = 1; i < 16; i++) {
259 r = arm_reg_current(arm, i);
260 if (r->valid)
261 continue;
263 retval = dpm_read_reg(dpm, r, i);
264 if (retval != ERROR_OK)
265 goto fail;
268 /* NOTE: SPSR ignored (if it's even relevant). */
270 /* REVISIT the debugger can trigger various exceptions. See the
271 * ARMv7A architecture spec, section C5.7, for more info about
272 * what defenses are needed; v6 debug has the most issues.
275 fail:
276 /* (void) */ dpm->finish(dpm);
277 return retval;
280 /* Avoid needless I/O ... leave breakpoints and watchpoints alone
281 * unless they're removed, or need updating because of single-stepping
282 * or running debugger code.
284 static int dpm_maybe_update_bpwp(struct arm_dpm *dpm, bool bpwp,
285 struct dpm_bpwp *xp, int *set_p)
287 int retval = ERROR_OK;
288 bool disable;
290 if (!set_p) {
291 if (!xp->dirty)
292 goto done;
293 xp->dirty = false;
294 /* removed or startup; we must disable it */
295 disable = true;
296 } else if (bpwp) {
297 if (!xp->dirty)
298 goto done;
299 /* disabled, but we must set it */
300 xp->dirty = disable = false;
301 *set_p = true;
302 } else {
303 if (!*set_p)
304 goto done;
305 /* set, but we must temporarily disable it */
306 xp->dirty = disable = true;
307 *set_p = false;
310 if (disable)
311 retval = dpm->bpwp_disable(dpm, xp->number);
312 else
313 retval = dpm->bpwp_enable(dpm, xp->number,
314 xp->address, xp->control);
316 if (retval != ERROR_OK)
317 LOG_ERROR("%s: can't %s HW bp/wp %d",
318 disable ? "disable" : "enable",
319 target_name(dpm->arm->target),
320 xp->number);
321 done:
322 return retval;
326 * Writes all modified core registers for all processor modes. In normal
327 * operation this is called on exit from halting debug state.
329 * @param dpm: represents the processor
330 * @param bpwp: true ensures breakpoints and watchpoints are set,
331 * false ensures they are cleared
333 int arm_dpm_write_dirty_registers(struct arm_dpm *dpm, bool bpwp)
335 struct arm *arm = dpm->arm;
336 struct reg_cache *cache = arm->core_cache;
337 int retval;
338 bool did_write;
340 retval = dpm->prepare(dpm);
341 if (retval != ERROR_OK)
342 goto done;
344 /* If we're managing hardware breakpoints for this core, enable
345 * or disable them as requested.
347 * REVISIT We don't yet manage them for ANY cores. Eventually
348 * we should be able to assume we handle them; but until then,
349 * cope with the hand-crafted breakpoint code.
351 if (0) {
352 for (unsigned i = 0; i < dpm->nbp; i++) {
353 struct dpm_bp *dbp = dpm->dbp + i;
354 struct breakpoint *bp = dbp->bp;
356 retval = dpm_maybe_update_bpwp(dpm, bpwp, &dbp->bpwp,
357 bp ? &bp->set : NULL);
361 /* enable/disable watchpoints */
362 for (unsigned i = 0; i < dpm->nwp; i++) {
363 struct dpm_wp *dwp = dpm->dwp + i;
364 struct watchpoint *wp = dwp->wp;
366 retval = dpm_maybe_update_bpwp(dpm, bpwp, &dwp->bpwp,
367 wp ? &wp->set : NULL);
370 /* NOTE: writes to breakpoint and watchpoint registers might
371 * be queued, and need (efficient/batched) flushing later.
374 /* Scan the registers until we find one that's both dirty and
375 * eligible for flushing. Flush that and everything else that
376 * shares the same core mode setting. Typically this won't
377 * actually find anything to do...
379 do {
380 enum arm_mode mode = ARM_MODE_ANY;
382 did_write = false;
384 /* check everything except our scratch register R0 */
385 for (unsigned i = 1; i < cache->num_regs; i++) {
386 struct arm_reg *r;
387 unsigned regnum;
389 /* also skip PC, CPSR, and non-dirty */
390 if (i == 15)
391 continue;
392 if (arm->cpsr == cache->reg_list + i)
393 continue;
394 if (!cache->reg_list[i].dirty)
395 continue;
397 r = cache->reg_list[i].arch_info;
398 regnum = r->num;
400 /* may need to pick and set a mode */
401 if (!did_write) {
402 enum arm_mode tmode;
404 did_write = true;
405 mode = tmode = r->mode;
407 /* cope with special cases */
408 switch (regnum) {
409 case 8 ... 12:
410 /* r8..r12 "anything but FIQ" case;
411 * we "know" core mode is accurate
412 * since we haven't changed it yet
414 if (arm->core_mode == ARM_MODE_FIQ
415 && ARM_MODE_ANY
416 != mode)
417 tmode = ARM_MODE_USR;
418 break;
419 case 16:
420 /* SPSR */
421 regnum++;
422 break;
425 /* REVISIT error checks */
426 if (tmode != ARM_MODE_ANY)
427 retval = dpm_modeswitch(dpm, tmode);
429 if (r->mode != mode)
430 continue;
432 retval = dpm_write_reg(dpm,
433 &cache->reg_list[i],
434 regnum);
438 } while (did_write);
440 /* Restore original CPSR ... assuming either that we changed it,
441 * or it's dirty. Must write PC to ensure the return address is
442 * defined, and must not write it before CPSR.
444 retval = dpm_modeswitch(dpm, ARM_MODE_ANY);
445 arm->cpsr->dirty = false;
447 retval = dpm_write_reg(dpm, &cache->reg_list[15], 15);
448 cache->reg_list[15].dirty = false;
450 /* flush R0 -- it's *very* dirty by now */
451 retval = dpm_write_reg(dpm, &cache->reg_list[0], 0);
452 cache->reg_list[0].dirty = false;
454 /* (void) */ dpm->finish(dpm);
455 done:
456 return retval;
459 /* Returns ARM_MODE_ANY or temporary mode to use while reading the
460 * specified register ... works around flakiness from ARM core calls.
461 * Caller already filtered out SPSR access; mode is never MODE_SYS
462 * or MODE_ANY.
464 static enum arm_mode dpm_mapmode(struct arm *arm,
465 unsigned num, enum arm_mode mode)
467 enum arm_mode amode = arm->core_mode;
469 /* don't switch if the mode is already correct */
470 if (amode == ARM_MODE_SYS)
471 amode = ARM_MODE_USR;
472 if (mode == amode)
473 return ARM_MODE_ANY;
475 switch (num) {
476 /* don't switch for non-shadowed registers (r0..r7, r15/pc, cpsr) */
477 case 0 ... 7:
478 case 15:
479 case 16:
480 break;
481 /* r8..r12 aren't shadowed for anything except FIQ */
482 case 8 ... 12:
483 if (mode == ARM_MODE_FIQ)
484 return mode;
485 break;
486 /* r13/sp, and r14/lr are always shadowed */
487 case 13:
488 case 14:
489 return mode;
490 default:
491 LOG_WARNING("invalid register #%u", num);
492 break;
494 return ARM_MODE_ANY;
499 * Standard ARM register accessors ... there are three methods
500 * in "struct arm", to support individual read/write and bulk read
501 * of registers.
504 static int arm_dpm_read_core_reg(struct target *target, struct reg *r,
505 int regnum, enum arm_mode mode)
507 struct arm_dpm *dpm = target_to_arm(target)->dpm;
508 int retval;
510 if (regnum < 0 || regnum > 16)
511 return ERROR_INVALID_ARGUMENTS;
513 if (regnum == 16) {
514 if (mode != ARM_MODE_ANY)
515 regnum = 17;
516 } else
517 mode = dpm_mapmode(dpm->arm, regnum, mode);
519 /* REVISIT what happens if we try to read SPSR in a core mode
520 * which has no such register?
523 retval = dpm->prepare(dpm);
524 if (retval != ERROR_OK)
525 return retval;
527 if (mode != ARM_MODE_ANY) {
528 retval = dpm_modeswitch(dpm, mode);
529 if (retval != ERROR_OK)
530 goto fail;
533 retval = dpm_read_reg(dpm, r, regnum);
534 /* always clean up, regardless of error */
536 if (mode != ARM_MODE_ANY)
537 /* (void) */ dpm_modeswitch(dpm, ARM_MODE_ANY);
539 fail:
540 /* (void) */ dpm->finish(dpm);
541 return retval;
544 static int arm_dpm_write_core_reg(struct target *target, struct reg *r,
545 int regnum, enum arm_mode mode, uint32_t value)
547 struct arm_dpm *dpm = target_to_arm(target)->dpm;
548 int retval;
551 if (regnum < 0 || regnum > 16)
552 return ERROR_INVALID_ARGUMENTS;
554 if (regnum == 16) {
555 if (mode != ARM_MODE_ANY)
556 regnum = 17;
557 } else
558 mode = dpm_mapmode(dpm->arm, regnum, mode);
560 /* REVISIT what happens if we try to write SPSR in a core mode
561 * which has no such register?
564 retval = dpm->prepare(dpm);
565 if (retval != ERROR_OK)
566 return retval;
568 if (mode != ARM_MODE_ANY) {
569 retval = dpm_modeswitch(dpm, mode);
570 if (retval != ERROR_OK)
571 goto fail;
574 retval = dpm_write_reg(dpm, r, regnum);
575 /* always clean up, regardless of error */
577 if (mode != ARM_MODE_ANY)
578 /* (void) */ dpm_modeswitch(dpm, ARM_MODE_ANY);
580 fail:
581 /* (void) */ dpm->finish(dpm);
582 return retval;
585 static int arm_dpm_full_context(struct target *target)
587 struct arm *arm = target_to_arm(target);
588 struct arm_dpm *dpm = arm->dpm;
589 struct reg_cache *cache = arm->core_cache;
590 int retval;
591 bool did_read;
593 retval = dpm->prepare(dpm);
594 if (retval != ERROR_OK)
595 goto done;
597 do {
598 enum arm_mode mode = ARM_MODE_ANY;
600 did_read = false;
602 /* We "know" arm_dpm_read_current_registers() was called so
603 * the unmapped registers (R0..R7, PC, AND CPSR) and some
604 * view of R8..R14 are current. We also "know" oddities of
605 * register mapping: special cases for R8..R12 and SPSR.
607 * Pick some mode with unread registers and read them all.
608 * Repeat until done.
610 for (unsigned i = 0; i < cache->num_regs; i++) {
611 struct arm_reg *r;
613 if (cache->reg_list[i].valid)
614 continue;
615 r = cache->reg_list[i].arch_info;
617 /* may need to pick a mode and set CPSR */
618 if (!did_read) {
619 did_read = true;
620 mode = r->mode;
622 /* For R8..R12 when we've entered debug
623 * state in FIQ mode... patch mode.
625 if (mode == ARM_MODE_ANY)
626 mode = ARM_MODE_USR;
628 /* REVISIT error checks */
629 retval = dpm_modeswitch(dpm, mode);
631 if (r->mode != mode)
632 continue;
634 /* CPSR was read, so "R16" must mean SPSR */
635 retval = dpm_read_reg(dpm,
636 &cache->reg_list[i],
637 (r->num == 16) ? 17 : r->num);
641 } while (did_read);
643 retval = dpm_modeswitch(dpm, ARM_MODE_ANY);
644 /* (void) */ dpm->finish(dpm);
645 done:
646 return retval;
650 /*----------------------------------------------------------------------*/
653 * Breakpoint and Watchpoint support.
655 * Hardware {break,watch}points are usually left active, to minimize
656 * debug entry/exit costs. When they are set or cleared, it's done in
657 * batches. Also, DPM-conformant hardware can update debug registers
658 * regardless of whether the CPU is running or halted ... though that
659 * fact isn't currently leveraged.
662 static int dpm_watchpoint_setup(struct arm_dpm *dpm, unsigned index,
663 struct watchpoint *wp)
665 uint32_t addr = wp->address;
666 uint32_t control;
668 /* this hardware doesn't support data value matching or masking */
669 if (wp->value || wp->mask != ~(uint32_t)0) {
670 LOG_DEBUG("watchpoint values and masking not supported");
671 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
674 control = (1 << 0) /* enable */
675 | (3 << 1); /* both user and privileged access */
677 switch (wp->rw) {
678 case WPT_READ:
679 control |= 1 << 3;
680 break;
681 case WPT_WRITE:
682 control |= 2 << 3;
683 break;
684 case WPT_ACCESS:
685 control |= 3 << 3;
686 break;
689 /* Match 1, 2, or all 4 byte addresses in this word.
691 * FIXME: v7 hardware allows lengths up to 2 GB, and has eight
692 * byte address select bits. Support larger wp->length, if addr
693 * is suitably aligned.
695 switch (wp->length) {
696 case 1:
697 control |= (1 << (addr & 3)) << 5;
698 addr &= ~3;
699 break;
700 case 2:
701 /* require 2-byte alignment */
702 if (!(addr & 1)) {
703 control |= (3 << (addr & 2)) << 5;
704 break;
706 /* FALL THROUGH */
707 case 4:
708 /* require 4-byte alignment */
709 if (!(addr & 3)) {
710 control |= 0xf << 5;
711 break;
713 /* FALL THROUGH */
714 default:
715 LOG_DEBUG("bad watchpoint length or alignment");
716 return ERROR_INVALID_ARGUMENTS;
719 /* other control bits:
720 * bits 9:12 == 0 ... only checking up to four byte addresses (v7 only)
721 * bits 15:14 == 0 ... both secure and nonsecure states (v6.1+ only)
722 * bit 20 == 0 ... not linked to a context ID
723 * bit 28:24 == 0 ... not ignoring N LSBs (v7 only)
726 dpm->dwp[index].wp = wp;
727 dpm->dwp[index].bpwp.address = addr & ~3;
728 dpm->dwp[index].bpwp.control = control;
729 dpm->dwp[index].bpwp.dirty = true;
731 /* hardware is updated in write_dirty_registers() */
732 return ERROR_OK;
736 static int dpm_add_watchpoint(struct target *target, struct watchpoint *wp)
738 struct arm *arm = target_to_arm(target);
739 struct arm_dpm *dpm = arm->dpm;
740 int retval = ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
742 if (dpm->bpwp_enable) {
743 for (unsigned i = 0; i < dpm->nwp; i++) {
744 if (!dpm->dwp[i].wp) {
745 retval = dpm_watchpoint_setup(dpm, i, wp);
746 break;
751 return retval;
754 static int dpm_remove_watchpoint(struct target *target, struct watchpoint *wp)
756 struct arm *arm = target_to_arm(target);
757 struct arm_dpm *dpm = arm->dpm;
758 int retval = ERROR_INVALID_ARGUMENTS;
760 for (unsigned i = 0; i < dpm->nwp; i++) {
761 if (dpm->dwp[i].wp == wp) {
762 dpm->dwp[i].wp = NULL;
763 dpm->dwp[i].bpwp.dirty = true;
765 /* hardware is updated in write_dirty_registers() */
766 retval = ERROR_OK;
767 break;
771 return retval;
774 void arm_dpm_report_wfar(struct arm_dpm *dpm, uint32_t addr)
776 switch (dpm->arm->core_state) {
777 case ARM_STATE_ARM:
778 addr -= 8;
779 break;
780 case ARM_STATE_THUMB:
781 case ARM_STATE_THUMB_EE:
782 addr -= 4;
783 break;
784 case ARM_STATE_JAZELLE:
785 /* ?? */
786 break;
788 dpm->wp_pc = addr;
791 /*----------------------------------------------------------------------*/
794 * Other debug and support utilities
797 void arm_dpm_report_dscr(struct arm_dpm *dpm, uint32_t dscr)
799 struct target *target = dpm->arm->target;
801 dpm->dscr = dscr;
803 /* Examine debug reason */
804 switch (DSCR_ENTRY(dscr)) {
805 case 6: /* Data abort (v6 only) */
806 case 7: /* Prefetch abort (v6 only) */
807 /* FALL THROUGH -- assume a v6 core in abort mode */
808 case 0: /* HALT request from debugger */
809 case 4: /* EDBGRQ */
810 target->debug_reason = DBG_REASON_DBGRQ;
811 break;
812 case 1: /* HW breakpoint */
813 case 3: /* SW BKPT */
814 case 5: /* vector catch */
815 target->debug_reason = DBG_REASON_BREAKPOINT;
816 break;
817 case 2: /* asynch watchpoint */
818 case 10: /* precise watchpoint */
819 target->debug_reason = DBG_REASON_WATCHPOINT;
820 break;
821 default:
822 target->debug_reason = DBG_REASON_UNDEFINED;
823 break;
827 /*----------------------------------------------------------------------*/
830 * Setup and management support.
834 * Hooks up this DPM to its associated target; call only once.
835 * Initially this only covers the register cache.
837 * Oh, and watchpoints. Yeah.
839 int arm_dpm_setup(struct arm_dpm *dpm)
841 struct arm *arm = dpm->arm;
842 struct target *target = arm->target;
843 struct reg_cache *cache;
845 arm->dpm = dpm;
847 /* register access setup */
848 arm->full_context = arm_dpm_full_context;
849 arm->read_core_reg = arm_dpm_read_core_reg;
850 arm->write_core_reg = arm_dpm_write_core_reg;
852 cache = arm_build_reg_cache(target, arm);
853 if (!cache)
854 return ERROR_FAIL;
856 *register_get_last_cache_p(&target->reg_cache) = cache;
858 /* coprocessor access setup */
859 arm->mrc = dpm_mrc;
860 arm->mcr = dpm_mcr;
862 /* breakpoint and watchpoint setup */
863 target->type->add_watchpoint = dpm_add_watchpoint;
864 target->type->remove_watchpoint = dpm_remove_watchpoint;
866 /* FIXME add breakpoint support */
867 /* FIXME add vector catch support */
869 dpm->nbp = 1 + ((dpm->didr >> 24) & 0xf);
870 dpm->dbp = calloc(dpm->nbp, sizeof *dpm->dbp);
872 dpm->nwp = 1 + ((dpm->didr >> 28) & 0xf);
873 dpm->dwp = calloc(dpm->nwp, sizeof *dpm->dwp);
875 if (!dpm->dbp || !dpm->dwp) {
876 free(dpm->dbp);
877 free(dpm->dwp);
878 return ERROR_FAIL;
881 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
882 target_name(target), dpm->nbp, dpm->nwp);
884 /* REVISIT ... and some of those breakpoints could match
885 * execution context IDs...
888 return ERROR_OK;
892 * Reinitializes DPM state at the beginning of a new debug session
893 * or after a reset which may have affected the debug module.
895 int arm_dpm_initialize(struct arm_dpm *dpm)
897 /* Disable all breakpoints and watchpoints at startup. */
898 if (dpm->bpwp_disable) {
899 unsigned i;
901 for (i = 0; i < dpm->nbp; i++) {
902 dpm->dbp[i].bpwp.number = i;
903 (void) dpm->bpwp_disable(dpm, i);
905 for (i = 0; i < dpm->nwp; i++) {
906 dpm->dwp[i].bpwp.number = 16 + i;
907 (void) dpm->bpwp_disable(dpm, 16 + i);
909 } else
910 LOG_WARNING("%s: can't disable breakpoints and watchpoints",
911 target_name(dpm->arm->target));
913 return ERROR_OK;