aarch64: simplify mode and state handling
[openocd.git] / src / target / armv8_dpm.c
blobc79b1a0ff73f4d588250392debba8208a1acff15
1 /*
2 * Copyright (C) 2009 by David Brownell
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
16 #ifdef HAVE_CONFIG_H
17 #include "config.h"
18 #endif
20 #include "arm.h"
21 #include "armv8.h"
22 #include "armv8_dpm.h"
23 #include <jtag/jtag.h>
24 #include "register.h"
25 #include "breakpoints.h"
26 #include "target_type.h"
27 #include "armv8_opcodes.h"
29 #include "helper/time_support.h"
31 /* T32 ITR format */
32 #define T32_FMTITR(instr) (((instr & 0x0000FFFF) << 16) | ((instr & 0xFFFF0000) >> 16))
34 /**
35 * @file
36 * Implements various ARM DPM operations using architectural debug registers.
37 * These routines layer over core-specific communication methods to cope with
38 * implementation differences between cores like ARM1136 and Cortex-A8.
40 * The "Debug Programmers' Model" (DPM) for ARMv6 and ARMv7 is defined by
41 * Part C (Debug Architecture) of the ARM Architecture Reference Manual,
42 * ARMv7-A and ARMv7-R edition (ARM DDI 0406B). In OpenOCD, DPM operations
43 * are abstracted through internal programming interfaces to share code and
44 * to minimize needless differences in debug behavior between cores.
47 /**
48 * Get core state from EDSCR, without necessity to retrieve CPSR
50 enum arm_state armv8_dpm_get_core_state(struct arm_dpm *dpm)
52 int el = (dpm->dscr >> 8) & 0x3;
53 int rw = (dpm->dscr >> 10) & 0xF;
55 dpm->last_el = el;
57 /* In Debug state, each bit gives the current Execution state of each EL */
58 if ((rw >> el) & 0b1)
59 return ARM_STATE_AARCH64;
61 return ARM_STATE_ARM;
64 /*----------------------------------------------------------------------*/
66 static int dpmv8_write_dcc(struct armv8_common *armv8, uint32_t data)
68 return mem_ap_write_u32(armv8->debug_ap,
69 armv8->debug_base + CPUV8_DBG_DTRRX, data);
72 static int dpmv8_write_dcc_64(struct armv8_common *armv8, uint64_t data)
74 int ret;
75 ret = mem_ap_write_u32(armv8->debug_ap,
76 armv8->debug_base + CPUV8_DBG_DTRRX, data);
77 if (ret == ERROR_OK)
78 ret = mem_ap_write_u32(armv8->debug_ap,
79 armv8->debug_base + CPUV8_DBG_DTRTX, data >> 32);
80 return ret;
83 static int dpmv8_read_dcc(struct armv8_common *armv8, uint32_t *data,
84 uint32_t *dscr_p)
86 uint32_t dscr = DSCR_ITE;
87 int retval;
89 if (dscr_p)
90 dscr = *dscr_p;
92 /* Wait for DTRRXfull */
93 long long then = timeval_ms();
94 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
95 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
96 armv8->debug_base + CPUV8_DBG_DSCR,
97 &dscr);
98 if (retval != ERROR_OK)
99 return retval;
100 if (timeval_ms() > then + 1000) {
101 LOG_ERROR("Timeout waiting for read dcc");
102 return ERROR_FAIL;
106 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
107 armv8->debug_base + CPUV8_DBG_DTRTX,
108 data);
109 if (retval != ERROR_OK)
110 return retval;
112 if (dscr_p)
113 *dscr_p = dscr;
115 return retval;
118 static int dpmv8_read_dcc_64(struct armv8_common *armv8, uint64_t *data,
119 uint32_t *dscr_p)
121 uint32_t dscr = DSCR_ITE;
122 uint32_t higher;
123 int retval;
125 if (dscr_p)
126 dscr = *dscr_p;
128 /* Wait for DTRRXfull */
129 long long then = timeval_ms();
130 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
131 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
132 armv8->debug_base + CPUV8_DBG_DSCR,
133 &dscr);
134 if (retval != ERROR_OK)
135 return retval;
136 if (timeval_ms() > then + 1000) {
137 LOG_ERROR("Timeout waiting for DTR_TX_FULL, dscr = 0x%08" PRIx32, dscr);
138 return ERROR_FAIL;
142 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
143 armv8->debug_base + CPUV8_DBG_DTRTX,
144 (uint32_t *)data);
145 if (retval != ERROR_OK)
146 return retval;
148 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
149 armv8->debug_base + CPUV8_DBG_DTRRX,
150 &higher);
151 if (retval != ERROR_OK)
152 return retval;
154 *data = *(uint32_t *)data | (uint64_t)higher << 32;
156 if (dscr_p)
157 *dscr_p = dscr;
159 return retval;
162 static int dpmv8_dpm_prepare(struct arm_dpm *dpm)
164 struct armv8_common *armv8 = dpm->arm->arch_info;
165 uint32_t dscr;
166 int retval;
168 /* set up invariant: ITE is set after ever DPM operation */
169 long long then = timeval_ms();
170 for (;; ) {
171 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
172 armv8->debug_base + CPUV8_DBG_DSCR,
173 &dscr);
174 if (retval != ERROR_OK)
175 return retval;
176 if ((dscr & DSCR_ITE) != 0)
177 break;
178 if (timeval_ms() > then + 1000) {
179 LOG_ERROR("Timeout waiting for dpm prepare");
180 return ERROR_FAIL;
184 /* update the stored copy of dscr */
185 dpm->dscr = dscr;
187 /* this "should never happen" ... */
188 if (dscr & DSCR_DTR_RX_FULL) {
189 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
190 /* Clear DCCRX */
191 retval = mem_ap_read_u32(armv8->debug_ap,
192 armv8->debug_base + CPUV8_DBG_DTRRX, &dscr);
193 if (retval != ERROR_OK)
194 return retval;
197 return retval;
200 static int dpmv8_dpm_finish(struct arm_dpm *dpm)
202 /* REVISIT what could be done here? */
203 return ERROR_OK;
206 static int dpmv8_exec_opcode(struct arm_dpm *dpm,
207 uint32_t opcode, uint32_t *p_dscr)
209 struct armv8_common *armv8 = dpm->arm->arch_info;
210 uint32_t dscr = dpm->dscr;
211 int retval;
213 if (p_dscr)
214 dscr = *p_dscr;
216 /* Wait for InstrCompl bit to be set */
217 long long then = timeval_ms();
218 while ((dscr & DSCR_ITE) == 0) {
219 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
220 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
221 if (retval != ERROR_OK) {
222 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
223 return retval;
225 if (timeval_ms() > then + 1000) {
226 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
227 return ERROR_FAIL;
231 if (armv8_dpm_get_core_state(dpm) != ARM_STATE_AARCH64)
232 opcode = T32_FMTITR(opcode);
234 retval = mem_ap_write_u32(armv8->debug_ap,
235 armv8->debug_base + CPUV8_DBG_ITR, opcode);
236 if (retval != ERROR_OK)
237 return retval;
239 then = timeval_ms();
240 do {
241 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
242 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
243 if (retval != ERROR_OK) {
244 LOG_ERROR("Could not read DSCR register");
245 return retval;
247 if (timeval_ms() > then + 1000) {
248 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
249 return ERROR_FAIL;
251 } while ((dscr & DSCR_ITE) == 0); /* Wait for InstrCompl bit to be set */
253 /* update dscr and el after each command execution */
254 dpm->dscr = dscr;
255 if (dpm->last_el != ((dscr >> 8) & 3))
256 LOG_DEBUG("EL %i -> %i", dpm->last_el, (dscr >> 8) & 3);
257 dpm->last_el = (dscr >> 8) & 3;
259 if (dscr & DSCR_ERR) {
260 LOG_ERROR("Opcode 0x%08"PRIx32", DSCR.ERR=1, DSCR.EL=%i", opcode, dpm->last_el);
261 armv8_dpm_handle_exception(dpm);
262 retval = ERROR_FAIL;
265 if (p_dscr)
266 *p_dscr = dscr;
268 return retval;
271 static int dpmv8_instr_execute(struct arm_dpm *dpm, uint32_t opcode)
273 return dpmv8_exec_opcode(dpm, opcode, NULL);
276 static int dpmv8_instr_write_data_dcc(struct arm_dpm *dpm,
277 uint32_t opcode, uint32_t data)
279 struct armv8_common *armv8 = dpm->arm->arch_info;
280 int retval;
282 retval = dpmv8_write_dcc(armv8, data);
283 if (retval != ERROR_OK)
284 return retval;
286 return dpmv8_exec_opcode(dpm, opcode, 0);
289 static int dpmv8_instr_write_data_dcc_64(struct arm_dpm *dpm,
290 uint32_t opcode, uint64_t data)
292 struct armv8_common *armv8 = dpm->arm->arch_info;
293 int retval;
295 retval = dpmv8_write_dcc_64(armv8, data);
296 if (retval != ERROR_OK)
297 return retval;
299 return dpmv8_exec_opcode(dpm, opcode, 0);
302 static int dpmv8_instr_write_data_r0(struct arm_dpm *dpm,
303 uint32_t opcode, uint32_t data)
305 struct armv8_common *armv8 = dpm->arm->arch_info;
306 uint32_t dscr = DSCR_ITE;
307 int retval;
309 retval = dpmv8_write_dcc(armv8, data);
310 if (retval != ERROR_OK)
311 return retval;
313 retval = dpmv8_exec_opcode(dpm, armv8_opcode(armv8, READ_REG_DTRRX), &dscr);
314 if (retval != ERROR_OK)
315 return retval;
317 /* then the opcode, taking data from R0 */
318 return dpmv8_exec_opcode(dpm, opcode, &dscr);
321 static int dpmv8_instr_write_data_r0_64(struct arm_dpm *dpm,
322 uint32_t opcode, uint64_t data)
324 struct armv8_common *armv8 = dpm->arm->arch_info;
325 int retval;
327 if (dpm->arm->core_state != ARM_STATE_AARCH64)
328 return dpmv8_instr_write_data_r0(dpm, opcode, data);
330 /* transfer data from DCC to R0 */
331 retval = dpmv8_write_dcc_64(armv8, data);
332 if (retval == ERROR_OK)
333 retval = dpmv8_exec_opcode(dpm, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dpm->dscr);
335 /* then the opcode, taking data from R0 */
336 if (retval == ERROR_OK)
337 retval = dpmv8_exec_opcode(dpm, opcode, &dpm->dscr);
339 return retval;
342 static int dpmv8_instr_cpsr_sync(struct arm_dpm *dpm)
344 int retval;
345 struct armv8_common *armv8 = dpm->arm->arch_info;
347 /* "Prefetch flush" after modifying execution status in CPSR */
348 retval = dpmv8_exec_opcode(dpm, armv8_opcode(armv8, ARMV8_OPC_DSB_SY), &dpm->dscr);
349 if (retval == ERROR_OK)
350 dpmv8_exec_opcode(dpm, armv8_opcode(armv8, ARMV8_OPC_ISB_SY), &dpm->dscr);
351 return retval;
354 static int dpmv8_instr_read_data_dcc(struct arm_dpm *dpm,
355 uint32_t opcode, uint32_t *data)
357 struct armv8_common *armv8 = dpm->arm->arch_info;
358 int retval;
360 /* the opcode, writing data to DCC */
361 retval = dpmv8_exec_opcode(dpm, opcode, &dpm->dscr);
362 if (retval != ERROR_OK)
363 return retval;
365 return dpmv8_read_dcc(armv8, data, &dpm->dscr);
368 static int dpmv8_instr_read_data_dcc_64(struct arm_dpm *dpm,
369 uint32_t opcode, uint64_t *data)
371 struct armv8_common *armv8 = dpm->arm->arch_info;
372 int retval;
374 /* the opcode, writing data to DCC */
375 retval = dpmv8_exec_opcode(dpm, opcode, &dpm->dscr);
376 if (retval != ERROR_OK)
377 return retval;
379 return dpmv8_read_dcc_64(armv8, data, &dpm->dscr);
382 static int dpmv8_instr_read_data_r0(struct arm_dpm *dpm,
383 uint32_t opcode, uint32_t *data)
385 struct armv8_common *armv8 = dpm->arm->arch_info;
386 int retval;
388 /* the opcode, writing data to R0 */
389 retval = dpmv8_exec_opcode(dpm, opcode, &dpm->dscr);
390 if (retval != ERROR_OK)
391 return retval;
393 /* write R0 to DCC */
394 retval = dpmv8_exec_opcode(dpm, armv8_opcode(armv8, WRITE_REG_DTRTX), &dpm->dscr);
395 if (retval != ERROR_OK)
396 return retval;
398 return dpmv8_read_dcc(armv8, data, &dpm->dscr);
401 static int dpmv8_instr_read_data_r0_64(struct arm_dpm *dpm,
402 uint32_t opcode, uint64_t *data)
404 struct armv8_common *armv8 = dpm->arm->arch_info;
405 int retval;
407 if (dpm->arm->core_state != ARM_STATE_AARCH64) {
408 uint32_t tmp;
409 retval = dpmv8_instr_read_data_r0(dpm, opcode, &tmp);
410 if (retval == ERROR_OK)
411 *data = tmp;
412 return retval;
415 /* the opcode, writing data to R0 */
416 retval = dpmv8_exec_opcode(dpm, opcode, &dpm->dscr);
417 if (retval != ERROR_OK)
418 return retval;
420 /* write R0 to DCC */
421 retval = dpmv8_exec_opcode(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), &dpm->dscr);
422 if (retval != ERROR_OK)
423 return retval;
425 return dpmv8_read_dcc_64(armv8, data, &dpm->dscr);
428 #if 0
429 static int dpmv8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
430 target_addr_t addr, uint32_t control)
432 struct armv8_common *armv8 = dpm->arm->arch_info;
433 uint32_t vr = armv8->debug_base;
434 uint32_t cr = armv8->debug_base;
435 int retval;
437 switch (index_t) {
438 case 0 ... 15: /* breakpoints */
439 vr += CPUV8_DBG_BVR_BASE;
440 cr += CPUV8_DBG_BCR_BASE;
441 break;
442 case 16 ... 31: /* watchpoints */
443 vr += CPUV8_DBG_WVR_BASE;
444 cr += CPUV8_DBG_WCR_BASE;
445 index_t -= 16;
446 break;
447 default:
448 return ERROR_FAIL;
450 vr += 16 * index_t;
451 cr += 16 * index_t;
453 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
454 (unsigned) vr, (unsigned) cr);
456 retval = mem_ap_write_atomic_u32(armv8->debug_ap, vr, addr);
457 if (retval != ERROR_OK)
458 return retval;
459 return mem_ap_write_atomic_u32(armv8->debug_ap, cr, control);
461 #endif
463 static int dpmv8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
465 struct armv8_common *armv8 = dpm->arm->arch_info;
466 uint32_t cr;
468 switch (index_t) {
469 case 0 ... 15:
470 cr = armv8->debug_base + CPUV8_DBG_BCR_BASE;
471 break;
472 case 16 ... 31:
473 cr = armv8->debug_base + CPUV8_DBG_WCR_BASE;
474 index_t -= 16;
475 break;
476 default:
477 return ERROR_FAIL;
479 cr += 16 * index_t;
481 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
483 /* clear control register */
484 return mem_ap_write_atomic_u32(armv8->debug_ap, cr, 0);
488 * Coprocessor support
491 /* Read coprocessor */
492 static int dpmv8_mrc(struct target *target, int cpnum,
493 uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm,
494 uint32_t *value)
496 struct arm *arm = target_to_arm(target);
497 struct arm_dpm *dpm = arm->dpm;
498 int retval;
500 retval = dpm->prepare(dpm);
501 if (retval != ERROR_OK)
502 return retval;
504 LOG_DEBUG("MRC p%d, %d, r0, c%d, c%d, %d", cpnum,
505 (int) op1, (int) CRn,
506 (int) CRm, (int) op2);
508 /* read coprocessor register into R0; return via DCC */
509 retval = dpm->instr_read_data_r0(dpm,
510 ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2),
511 value);
513 /* (void) */ dpm->finish(dpm);
514 return retval;
517 static int dpmv8_mcr(struct target *target, int cpnum,
518 uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm,
519 uint32_t value)
521 struct arm *arm = target_to_arm(target);
522 struct arm_dpm *dpm = arm->dpm;
523 int retval;
525 retval = dpm->prepare(dpm);
526 if (retval != ERROR_OK)
527 return retval;
529 LOG_DEBUG("MCR p%d, %d, r0, c%d, c%d, %d", cpnum,
530 (int) op1, (int) CRn,
531 (int) CRm, (int) op2);
533 /* read DCC into r0; then write coprocessor register from R0 */
534 retval = dpm->instr_write_data_r0(dpm,
535 ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2),
536 value);
538 /* (void) */ dpm->finish(dpm);
539 return retval;
542 /*----------------------------------------------------------------------*/
545 * Register access utilities
548 int armv8_dpm_modeswitch(struct arm_dpm *dpm, enum arm_mode mode)
550 struct armv8_common *armv8 = (struct armv8_common *)dpm->arm->arch_info;
551 int retval = ERROR_OK;
552 unsigned int target_el;
553 enum arm_state core_state;
554 uint32_t cpsr;
556 /* restore previous mode */
557 if (mode == ARM_MODE_ANY) {
558 cpsr = buf_get_u32(dpm->arm->cpsr->value, 0, 32);
560 LOG_DEBUG("restoring mode, cpsr = 0x%08"PRIx32, cpsr);
562 } else {
563 LOG_DEBUG("setting mode 0x%"PRIx32, mode);
564 cpsr = mode;
567 switch (cpsr & 0x1f) {
568 /* aarch32 modes */
569 case ARM_MODE_USR:
570 target_el = 0;
571 break;
572 case ARM_MODE_SVC:
573 case ARM_MODE_ABT:
574 case ARM_MODE_IRQ:
575 case ARM_MODE_FIQ:
576 target_el = 1;
577 break;
579 * TODO: handle ARM_MODE_HYP
580 * case ARM_MODE_HYP:
581 * target_el = 2;
582 * break;
584 case ARM_MODE_MON:
585 target_el = 3;
586 break;
587 /* aarch64 modes */
588 default:
589 target_el = (cpsr >> 2) & 3;
592 if (target_el > SYSTEM_CUREL_EL3) {
593 LOG_ERROR("%s: Invalid target exception level %i", __func__, target_el);
594 return ERROR_FAIL;
597 LOG_DEBUG("target_el = %i, last_el = %i", target_el, dpm->last_el);
598 if (target_el > dpm->last_el) {
599 retval = dpm->instr_execute(dpm,
600 armv8_opcode(armv8, ARMV8_OPC_DCPS) | target_el);
602 /* DCPS clobbers registers just like an exception taken */
603 armv8_dpm_handle_exception(dpm);
604 } else {
605 core_state = armv8_dpm_get_core_state(dpm);
606 if (core_state != ARM_STATE_AARCH64) {
607 /* cannot do DRPS/ERET when already in EL0 */
608 if (dpm->last_el != 0) {
609 /* load SPSR with the desired mode and execute DRPS */
610 LOG_DEBUG("SPSR = 0x%08"PRIx32, cpsr);
611 retval = dpm->instr_write_data_r0(dpm,
612 ARMV8_MSR_GP_xPSR_T1(1, 0, 15), cpsr);
613 if (retval == ERROR_OK)
614 retval = dpm->instr_execute(dpm, armv8_opcode(armv8, ARMV8_OPC_DRPS));
616 } else {
618 * need to execute multiple DRPS instructions until target_el
619 * is reached
621 while (retval == ERROR_OK && dpm->last_el != target_el) {
622 unsigned int cur_el = dpm->last_el;
623 retval = dpm->instr_execute(dpm, armv8_opcode(armv8, ARMV8_OPC_DRPS));
624 if (cur_el == dpm->last_el) {
625 LOG_INFO("Cannot reach EL %i, SPSR corrupted?", target_el);
626 break;
631 /* On executing DRPS, DSPSR and DLR become UNKNOWN, mark them as dirty */
632 dpm->arm->cpsr->dirty = true;
633 dpm->arm->pc->dirty = true;
636 * re-evaluate the core state, we might be in Aarch32 state now
637 * we rely on dpm->dscr being up-to-date
639 core_state = armv8_dpm_get_core_state(dpm);
640 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
641 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
644 return retval;
648 * Common register read, relies on armv8_select_reg_access() having been called.
650 static int dpmv8_read_reg(struct arm_dpm *dpm, struct reg *r, unsigned regnum)
652 struct armv8_common *armv8 = dpm->arm->arch_info;
653 uint64_t value_64;
654 int retval;
656 retval = armv8->read_reg_u64(armv8, regnum, &value_64);
658 if (retval == ERROR_OK) {
659 r->valid = true;
660 r->dirty = false;
661 buf_set_u64(r->value, 0, r->size, value_64);
662 if (r->size == 64)
663 LOG_DEBUG("READ: %s, %16.8llx", r->name, (unsigned long long) value_64);
664 else
665 LOG_DEBUG("READ: %s, %8.8x", r->name, (unsigned int) value_64);
667 return ERROR_OK;
671 * Common register write, relies on armv8_select_reg_access() having been called.
673 static int dpmv8_write_reg(struct arm_dpm *dpm, struct reg *r, unsigned regnum)
675 struct armv8_common *armv8 = dpm->arm->arch_info;
676 int retval = ERROR_FAIL;
677 uint64_t value_64;
679 value_64 = buf_get_u64(r->value, 0, r->size);
681 retval = armv8->write_reg_u64(armv8, regnum, value_64);
682 if (retval == ERROR_OK) {
683 r->dirty = false;
684 if (r->size == 64)
685 LOG_DEBUG("WRITE: %s, %16.8llx", r->name, (unsigned long long)value_64);
686 else
687 LOG_DEBUG("WRITE: %s, %8.8x", r->name, (unsigned int)value_64);
690 return ERROR_OK;
694 * Read basic registers of the the current context: R0 to R15, and CPSR;
695 * sets the core mode (such as USR or IRQ) and state (such as ARM or Thumb).
696 * In normal operation this is called on entry to halting debug state,
697 * possibly after some other operations supporting restore of debug state
698 * or making sure the CPU is fully idle (drain write buffer, etc).
700 int armv8_dpm_read_current_registers(struct arm_dpm *dpm)
702 struct arm *arm = dpm->arm;
703 struct armv8_common *armv8 = (struct armv8_common *)arm->arch_info;
704 struct reg_cache *cache;
705 struct reg *r;
706 uint32_t cpsr;
707 int retval;
709 retval = dpm->prepare(dpm);
710 if (retval != ERROR_OK)
711 return retval;
713 cache = arm->core_cache;
715 /* read R0 first (it's used for scratch), then CPSR */
716 r = cache->reg_list + ARMV8_R0;
717 if (!r->valid) {
718 retval = dpmv8_read_reg(dpm, r, ARMV8_R0);
719 if (retval != ERROR_OK)
720 goto fail;
722 r->dirty = true;
724 /* read R1, too, it will be clobbered during memory access */
725 r = cache->reg_list + ARMV8_R1;
726 if (!r->valid) {
727 retval = dpmv8_read_reg(dpm, r, ARMV8_R1);
728 if (retval != ERROR_OK)
729 goto fail;
732 /* read cpsr to r0 and get it back */
733 retval = dpm->instr_read_data_r0(dpm,
734 armv8_opcode(armv8, READ_REG_DSPSR), &cpsr);
735 if (retval != ERROR_OK)
736 goto fail;
738 /* update core mode and state */
739 armv8_set_cpsr(arm, cpsr);
741 for (unsigned int i = ARMV8_PC; i < cache->num_regs ; i++) {
742 struct arm_reg *arm_reg;
744 r = armv8_reg_current(arm, i);
745 if (r->valid)
746 continue;
749 * Only read registers that are available from the
750 * current EL (or core mode).
752 arm_reg = r->arch_info;
753 if (arm_reg->mode != ARM_MODE_ANY &&
754 dpm->last_el != armv8_curel_from_core_mode(arm_reg->mode))
755 continue;
757 retval = dpmv8_read_reg(dpm, r, i);
758 if (retval != ERROR_OK)
759 goto fail;
763 fail:
764 dpm->finish(dpm);
765 return retval;
768 /* Avoid needless I/O ... leave breakpoints and watchpoints alone
769 * unless they're removed, or need updating because of single-stepping
770 * or running debugger code.
772 static int dpmv8_maybe_update_bpwp(struct arm_dpm *dpm, bool bpwp,
773 struct dpm_bpwp *xp, int *set_p)
775 int retval = ERROR_OK;
776 bool disable;
778 if (!set_p) {
779 if (!xp->dirty)
780 goto done;
781 xp->dirty = false;
782 /* removed or startup; we must disable it */
783 disable = true;
784 } else if (bpwp) {
785 if (!xp->dirty)
786 goto done;
787 /* disabled, but we must set it */
788 xp->dirty = disable = false;
789 *set_p = true;
790 } else {
791 if (!*set_p)
792 goto done;
793 /* set, but we must temporarily disable it */
794 xp->dirty = disable = true;
795 *set_p = false;
798 if (disable)
799 retval = dpm->bpwp_disable(dpm, xp->number);
800 else
801 retval = dpm->bpwp_enable(dpm, xp->number,
802 xp->address, xp->control);
804 if (retval != ERROR_OK)
805 LOG_ERROR("%s: can't %s HW %spoint %d",
806 disable ? "disable" : "enable",
807 target_name(dpm->arm->target),
808 (xp->number < 16) ? "break" : "watch",
809 xp->number & 0xf);
810 done:
811 return retval;
814 static int dpmv8_add_breakpoint(struct target *target, struct breakpoint *bp);
817 * Writes all modified core registers for all processor modes. In normal
818 * operation this is called on exit from halting debug state.
820 * @param dpm: represents the processor
821 * @param bpwp: true ensures breakpoints and watchpoints are set,
822 * false ensures they are cleared
824 int armv8_dpm_write_dirty_registers(struct arm_dpm *dpm, bool bpwp)
826 struct arm *arm = dpm->arm;
827 struct reg_cache *cache = arm->core_cache;
828 int retval;
830 retval = dpm->prepare(dpm);
831 if (retval != ERROR_OK)
832 goto done;
834 /* If we're managing hardware breakpoints for this core, enable
835 * or disable them as requested.
837 * REVISIT We don't yet manage them for ANY cores. Eventually
838 * we should be able to assume we handle them; but until then,
839 * cope with the hand-crafted breakpoint code.
841 if (arm->target->type->add_breakpoint == dpmv8_add_breakpoint) {
842 for (unsigned i = 0; i < dpm->nbp; i++) {
843 struct dpm_bp *dbp = dpm->dbp + i;
844 struct breakpoint *bp = dbp->bp;
846 retval = dpmv8_maybe_update_bpwp(dpm, bpwp, &dbp->bpwp,
847 bp ? &bp->set : NULL);
848 if (retval != ERROR_OK)
849 goto done;
853 /* enable/disable watchpoints */
854 for (unsigned i = 0; i < dpm->nwp; i++) {
855 struct dpm_wp *dwp = dpm->dwp + i;
856 struct watchpoint *wp = dwp->wp;
858 retval = dpmv8_maybe_update_bpwp(dpm, bpwp, &dwp->bpwp,
859 wp ? &wp->set : NULL);
860 if (retval != ERROR_OK)
861 goto done;
864 /* NOTE: writes to breakpoint and watchpoint registers might
865 * be queued, and need (efficient/batched) flushing later.
868 /* Restore original core mode and state */
869 retval = armv8_dpm_modeswitch(dpm, ARM_MODE_ANY);
870 if (retval != ERROR_OK)
871 goto done;
873 /* check everything except our scratch register R0 */
874 for (unsigned i = 1; i < cache->num_regs; i++) {
875 struct arm_reg *r;
877 /* skip PC and CPSR */
878 if (i == ARMV8_PC || i == ARMV8_xPSR)
879 continue;
880 /* skip invalid */
881 if (!cache->reg_list[i].valid)
882 continue;
883 /* skip non-dirty */
884 if (!cache->reg_list[i].dirty)
885 continue;
887 /* skip all registers not on the current EL */
888 r = cache->reg_list[i].arch_info;
889 if (r->mode != ARM_MODE_ANY &&
890 dpm->last_el != armv8_curel_from_core_mode(r->mode))
891 continue;
893 retval = dpmv8_write_reg(dpm, &cache->reg_list[i], i);
894 if (retval != ERROR_OK)
895 break;
898 /* flush CPSR and PC */
899 if (retval == ERROR_OK)
900 retval = dpmv8_write_reg(dpm, &cache->reg_list[ARMV8_xPSR], ARMV8_xPSR);
901 if (retval == ERROR_OK)
902 retval = dpmv8_write_reg(dpm, &cache->reg_list[ARMV8_PC], ARMV8_PC);
903 /* flush R0 -- it's *very* dirty by now */
904 if (retval == ERROR_OK)
905 retval = dpmv8_write_reg(dpm, &cache->reg_list[0], 0);
906 if (retval == ERROR_OK)
907 dpm->instr_cpsr_sync(dpm);
908 done:
909 dpm->finish(dpm);
910 return retval;
914 * Standard ARM register accessors ... there are three methods
915 * in "struct arm", to support individual read/write and bulk read
916 * of registers.
919 static int armv8_dpm_read_core_reg(struct target *target, struct reg *r,
920 int regnum, enum arm_mode mode)
922 struct arm *arm = target_to_arm(target);
923 struct arm_dpm *dpm = target_to_arm(target)->dpm;
924 int retval;
925 int max = arm->core_cache->num_regs;
927 if (regnum < 0 || regnum >= max)
928 return ERROR_COMMAND_SYNTAX_ERROR;
931 * REVISIT what happens if we try to read SPSR in a core mode
932 * which has no such register?
934 retval = dpm->prepare(dpm);
935 if (retval != ERROR_OK)
936 return retval;
938 retval = dpmv8_read_reg(dpm, r, regnum);
939 if (retval != ERROR_OK)
940 goto fail;
942 fail:
943 /* (void) */ dpm->finish(dpm);
944 return retval;
947 static int armv8_dpm_write_core_reg(struct target *target, struct reg *r,
948 int regnum, enum arm_mode mode, uint8_t *value)
950 struct arm *arm = target_to_arm(target);
951 struct arm_dpm *dpm = target_to_arm(target)->dpm;
952 int retval;
953 int max = arm->core_cache->num_regs;
955 if (regnum < 0 || regnum > max)
956 return ERROR_COMMAND_SYNTAX_ERROR;
958 /* REVISIT what happens if we try to write SPSR in a core mode
959 * which has no such register?
962 retval = dpm->prepare(dpm);
963 if (retval != ERROR_OK)
964 return retval;
966 retval = dpmv8_write_reg(dpm, r, regnum);
968 /* always clean up, regardless of error */
969 dpm->finish(dpm);
971 return retval;
974 static int armv8_dpm_full_context(struct target *target)
976 struct arm *arm = target_to_arm(target);
977 struct arm_dpm *dpm = arm->dpm;
978 struct reg_cache *cache = arm->core_cache;
979 int retval;
980 bool did_read;
982 retval = dpm->prepare(dpm);
983 if (retval != ERROR_OK)
984 goto done;
986 do {
987 enum arm_mode mode = ARM_MODE_ANY;
989 did_read = false;
991 /* We "know" arm_dpm_read_current_registers() was called so
992 * the unmapped registers (R0..R7, PC, AND CPSR) and some
993 * view of R8..R14 are current. We also "know" oddities of
994 * register mapping: special cases for R8..R12 and SPSR.
996 * Pick some mode with unread registers and read them all.
997 * Repeat until done.
999 for (unsigned i = 0; i < cache->num_regs; i++) {
1000 struct arm_reg *r;
1002 if (cache->reg_list[i].valid)
1003 continue;
1004 r = cache->reg_list[i].arch_info;
1006 /* may need to pick a mode and set CPSR */
1007 if (!did_read) {
1008 did_read = true;
1009 mode = r->mode;
1011 /* For regular (ARM_MODE_ANY) R8..R12
1012 * in case we've entered debug state
1013 * in FIQ mode we need to patch mode.
1015 if (mode != ARM_MODE_ANY)
1016 retval = armv8_dpm_modeswitch(dpm, mode);
1017 else
1018 retval = armv8_dpm_modeswitch(dpm, ARM_MODE_USR);
1020 if (retval != ERROR_OK)
1021 goto done;
1023 if (r->mode != mode)
1024 continue;
1026 /* CPSR was read, so "R16" must mean SPSR */
1027 retval = dpmv8_read_reg(dpm,
1028 &cache->reg_list[i],
1029 (r->num == 16) ? 17 : r->num);
1030 if (retval != ERROR_OK)
1031 goto done;
1034 } while (did_read);
1036 retval = armv8_dpm_modeswitch(dpm, ARM_MODE_ANY);
1037 /* (void) */ dpm->finish(dpm);
1038 done:
1039 return retval;
1043 /*----------------------------------------------------------------------*/
1046 * Breakpoint and Watchpoint support.
1048 * Hardware {break,watch}points are usually left active, to minimize
1049 * debug entry/exit costs. When they are set or cleared, it's done in
1050 * batches. Also, DPM-conformant hardware can update debug registers
1051 * regardless of whether the CPU is running or halted ... though that
1052 * fact isn't currently leveraged.
1055 static int dpmv8_bpwp_setup(struct arm_dpm *dpm, struct dpm_bpwp *xp,
1056 uint32_t addr, uint32_t length)
1058 uint32_t control;
1060 control = (1 << 0) /* enable */
1061 | (3 << 1); /* both user and privileged access */
1063 /* Match 1, 2, or all 4 byte addresses in this word.
1065 * FIXME: v7 hardware allows lengths up to 2 GB for BP and WP.
1066 * Support larger length, when addr is suitably aligned. In
1067 * particular, allow watchpoints on 8 byte "double" values.
1069 * REVISIT allow watchpoints on unaligned 2-bit values; and on
1070 * v7 hardware, unaligned 4-byte ones too.
1072 switch (length) {
1073 case 1:
1074 control |= (1 << (addr & 3)) << 5;
1075 break;
1076 case 2:
1077 /* require 2-byte alignment */
1078 if (!(addr & 1)) {
1079 control |= (3 << (addr & 2)) << 5;
1080 break;
1082 /* FALL THROUGH */
1083 case 4:
1084 /* require 4-byte alignment */
1085 if (!(addr & 3)) {
1086 control |= 0xf << 5;
1087 break;
1089 /* FALL THROUGH */
1090 default:
1091 LOG_ERROR("unsupported {break,watch}point length/alignment");
1092 return ERROR_COMMAND_SYNTAX_ERROR;
1095 /* other shared control bits:
1096 * bits 15:14 == 0 ... both secure and nonsecure states (v6.1+ only)
1097 * bit 20 == 0 ... not linked to a context ID
1098 * bit 28:24 == 0 ... not ignoring N LSBs (v7 only)
1101 xp->address = addr & ~3;
1102 xp->control = control;
1103 xp->dirty = true;
1105 LOG_DEBUG("BPWP: addr %8.8" PRIx32 ", control %" PRIx32 ", number %d",
1106 xp->address, control, xp->number);
1108 /* hardware is updated in write_dirty_registers() */
1109 return ERROR_OK;
1112 static int dpmv8_add_breakpoint(struct target *target, struct breakpoint *bp)
1114 struct arm *arm = target_to_arm(target);
1115 struct arm_dpm *dpm = arm->dpm;
1116 int retval = ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1118 if (bp->length < 2)
1119 return ERROR_COMMAND_SYNTAX_ERROR;
1120 if (!dpm->bpwp_enable)
1121 return retval;
1123 /* FIXME we need a generic solution for software breakpoints. */
1124 if (bp->type == BKPT_SOFT)
1125 LOG_DEBUG("using HW bkpt, not SW...");
1127 for (unsigned i = 0; i < dpm->nbp; i++) {
1128 if (!dpm->dbp[i].bp) {
1129 retval = dpmv8_bpwp_setup(dpm, &dpm->dbp[i].bpwp,
1130 bp->address, bp->length);
1131 if (retval == ERROR_OK)
1132 dpm->dbp[i].bp = bp;
1133 break;
1137 return retval;
1140 static int dpmv8_remove_breakpoint(struct target *target, struct breakpoint *bp)
1142 struct arm *arm = target_to_arm(target);
1143 struct arm_dpm *dpm = arm->dpm;
1144 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1146 for (unsigned i = 0; i < dpm->nbp; i++) {
1147 if (dpm->dbp[i].bp == bp) {
1148 dpm->dbp[i].bp = NULL;
1149 dpm->dbp[i].bpwp.dirty = true;
1151 /* hardware is updated in write_dirty_registers() */
1152 retval = ERROR_OK;
1153 break;
1157 return retval;
1160 static int dpmv8_watchpoint_setup(struct arm_dpm *dpm, unsigned index_t,
1161 struct watchpoint *wp)
1163 int retval;
1164 struct dpm_wp *dwp = dpm->dwp + index_t;
1165 uint32_t control;
1167 /* this hardware doesn't support data value matching or masking */
1168 if (wp->value || wp->mask != ~(uint32_t)0) {
1169 LOG_DEBUG("watchpoint values and masking not supported");
1170 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1173 retval = dpmv8_bpwp_setup(dpm, &dwp->bpwp, wp->address, wp->length);
1174 if (retval != ERROR_OK)
1175 return retval;
1177 control = dwp->bpwp.control;
1178 switch (wp->rw) {
1179 case WPT_READ:
1180 control |= 1 << 3;
1181 break;
1182 case WPT_WRITE:
1183 control |= 2 << 3;
1184 break;
1185 case WPT_ACCESS:
1186 control |= 3 << 3;
1187 break;
1189 dwp->bpwp.control = control;
1191 dpm->dwp[index_t].wp = wp;
1193 return retval;
1196 static int dpmv8_add_watchpoint(struct target *target, struct watchpoint *wp)
1198 struct arm *arm = target_to_arm(target);
1199 struct arm_dpm *dpm = arm->dpm;
1200 int retval = ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1202 if (dpm->bpwp_enable) {
1203 for (unsigned i = 0; i < dpm->nwp; i++) {
1204 if (!dpm->dwp[i].wp) {
1205 retval = dpmv8_watchpoint_setup(dpm, i, wp);
1206 break;
1211 return retval;
1214 static int dpmv8_remove_watchpoint(struct target *target, struct watchpoint *wp)
1216 struct arm *arm = target_to_arm(target);
1217 struct arm_dpm *dpm = arm->dpm;
1218 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1220 for (unsigned i = 0; i < dpm->nwp; i++) {
1221 if (dpm->dwp[i].wp == wp) {
1222 dpm->dwp[i].wp = NULL;
1223 dpm->dwp[i].bpwp.dirty = true;
1225 /* hardware is updated in write_dirty_registers() */
1226 retval = ERROR_OK;
1227 break;
1231 return retval;
1234 void armv8_dpm_report_wfar(struct arm_dpm *dpm, uint64_t addr)
1236 switch (dpm->arm->core_state) {
1237 case ARM_STATE_ARM:
1238 case ARM_STATE_AARCH64:
1239 addr -= 8;
1240 break;
1241 case ARM_STATE_THUMB:
1242 case ARM_STATE_THUMB_EE:
1243 addr -= 4;
1244 break;
1245 case ARM_STATE_JAZELLE:
1246 /* ?? */
1247 break;
1248 default:
1249 LOG_DEBUG("Unknown core_state");
1250 break;
1252 dpm->wp_pc = addr;
1256 * Handle exceptions taken in debug state. This happens mostly for memory
1257 * accesses that violated a MMU policy. Taking an exception while in debug
1258 * state clobbers certain state registers on the target exception level.
1259 * Just mark those registers dirty so that they get restored on resume.
1260 * This works both for Aarch32 and Aarch64 states.
1262 * This function must not perform any actions that trigger another exception
1263 * or a recursion will happen.
1265 void armv8_dpm_handle_exception(struct arm_dpm *dpm)
1267 struct armv8_common *armv8 = dpm->arm->arch_info;
1268 struct reg_cache *cache = dpm->arm->core_cache;
1269 enum arm_state core_state;
1270 uint64_t dlr;
1271 uint32_t dspsr;
1272 unsigned int el;
1274 static const int clobbered_regs_by_el[3][5] = {
1275 { ARMV8_PC, ARMV8_xPSR, ARMV8_ELR_EL1, ARMV8_ESR_EL1, ARMV8_SPSR_EL1 },
1276 { ARMV8_PC, ARMV8_xPSR, ARMV8_ELR_EL2, ARMV8_ESR_EL2, ARMV8_SPSR_EL2 },
1277 { ARMV8_PC, ARMV8_xPSR, ARMV8_ELR_EL3, ARMV8_ESR_EL3, ARMV8_SPSR_EL3 },
1280 el = (dpm->dscr >> 8) & 3;
1282 /* safety check, must not happen since EL0 cannot be a target for an exception */
1283 if (el < SYSTEM_CUREL_EL1 || el > SYSTEM_CUREL_EL3) {
1284 LOG_ERROR("%s: EL %i is invalid, DSCR corrupted?", __func__, el);
1285 return;
1288 /* Clear sticky error */
1289 mem_ap_write_u32(armv8->debug_ap,
1290 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1292 armv8->read_reg_u64(armv8, ARMV8_xPSR, &dlr);
1293 dspsr = dlr;
1294 armv8->read_reg_u64(armv8, ARMV8_PC, &dlr);
1296 LOG_DEBUG("Exception taken to EL %i, DLR=0x%016"PRIx64" DSPSR=0x%08"PRIx32,
1297 el, dlr, dspsr);
1299 /* mark all clobbered registers as dirty */
1300 for (int i = 0; i < 5; i++)
1301 cache->reg_list[clobbered_regs_by_el[el-1][i]].dirty = true;
1304 * re-evaluate the core state, we might be in Aarch64 state now
1305 * we rely on dpm->dscr being up-to-date
1307 core_state = armv8_dpm_get_core_state(dpm);
1308 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
1309 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
1311 armv8_dpm_modeswitch(dpm, ARM_MODE_ANY);
1314 /*----------------------------------------------------------------------*/
1317 * Other debug and support utilities
1320 void armv8_dpm_report_dscr(struct arm_dpm *dpm, uint32_t dscr)
1322 struct target *target = dpm->arm->target;
1324 dpm->dscr = dscr;
1325 dpm->last_el = (dscr >> 8) & 3;
1327 /* Examine debug reason */
1328 switch (DSCR_ENTRY(dscr)) {
1329 /* FALL THROUGH -- assume a v6 core in abort mode */
1330 case DSCRV8_ENTRY_EXT_DEBUG: /* EDBGRQ */
1331 target->debug_reason = DBG_REASON_DBGRQ;
1332 break;
1333 case DSCRV8_ENTRY_HALT_STEP_EXECLU: /* HALT step */
1334 case DSCRV8_ENTRY_HALT_STEP_NORMAL: /* Halt step*/
1335 case DSCRV8_ENTRY_HALT_STEP:
1336 target->debug_reason = DBG_REASON_SINGLESTEP;
1337 break;
1338 case DSCRV8_ENTRY_HLT: /* HLT instruction (software breakpoint) */
1339 case DSCRV8_ENTRY_BKPT: /* SW BKPT (?) */
1340 case DSCRV8_ENTRY_RESET_CATCH: /* Reset catch */
1341 case DSCRV8_ENTRY_OS_UNLOCK: /*OS unlock catch*/
1342 case DSCRV8_ENTRY_EXCEPTION_CATCH: /*exception catch*/
1343 case DSCRV8_ENTRY_SW_ACCESS_DBG: /*SW access dbg register*/
1344 target->debug_reason = DBG_REASON_BREAKPOINT;
1345 break;
1346 case DSCRV8_ENTRY_WATCHPOINT: /* asynch watchpoint */
1347 target->debug_reason = DBG_REASON_WATCHPOINT;
1348 break;
1349 default:
1350 target->debug_reason = DBG_REASON_UNDEFINED;
1351 break;
1356 /*----------------------------------------------------------------------*/
1359 * Setup and management support.
1363 * Hooks up this DPM to its associated target; call only once.
1364 * Initially this only covers the register cache.
1366 * Oh, and watchpoints. Yeah.
1368 int armv8_dpm_setup(struct arm_dpm *dpm)
1370 struct arm *arm = dpm->arm;
1371 struct target *target = arm->target;
1372 struct reg_cache *cache;
1373 arm->dpm = dpm;
1375 /* register access setup */
1376 arm->full_context = armv8_dpm_full_context;
1377 arm->read_core_reg = armv8_dpm_read_core_reg;
1378 arm->write_core_reg = armv8_dpm_write_core_reg;
1380 if (arm->core_cache == NULL) {
1381 cache = armv8_build_reg_cache(target);
1382 if (!cache)
1383 return ERROR_FAIL;
1386 /* coprocessor access setup */
1387 arm->mrc = dpmv8_mrc;
1388 arm->mcr = dpmv8_mcr;
1390 dpm->prepare = dpmv8_dpm_prepare;
1391 dpm->finish = dpmv8_dpm_finish;
1393 dpm->instr_execute = dpmv8_instr_execute;
1394 dpm->instr_write_data_dcc = dpmv8_instr_write_data_dcc;
1395 dpm->instr_write_data_dcc_64 = dpmv8_instr_write_data_dcc_64;
1396 dpm->instr_write_data_r0 = dpmv8_instr_write_data_r0;
1397 dpm->instr_write_data_r0_64 = dpmv8_instr_write_data_r0_64;
1398 dpm->instr_cpsr_sync = dpmv8_instr_cpsr_sync;
1400 dpm->instr_read_data_dcc = dpmv8_instr_read_data_dcc;
1401 dpm->instr_read_data_dcc_64 = dpmv8_instr_read_data_dcc_64;
1402 dpm->instr_read_data_r0 = dpmv8_instr_read_data_r0;
1403 dpm->instr_read_data_r0_64 = dpmv8_instr_read_data_r0_64;
1405 dpm->arm_reg_current = armv8_reg_current;
1407 /* dpm->bpwp_enable = dpmv8_bpwp_enable; */
1408 dpm->bpwp_disable = dpmv8_bpwp_disable;
1410 /* breakpoint setup -- optional until it works everywhere */
1411 if (!target->type->add_breakpoint) {
1412 target->type->add_breakpoint = dpmv8_add_breakpoint;
1413 target->type->remove_breakpoint = dpmv8_remove_breakpoint;
1416 /* watchpoint setup */
1417 target->type->add_watchpoint = dpmv8_add_watchpoint;
1418 target->type->remove_watchpoint = dpmv8_remove_watchpoint;
1420 /* FIXME add vector catch support */
1422 dpm->nbp = 1 + ((dpm->didr >> 12) & 0xf);
1423 dpm->dbp = calloc(dpm->nbp, sizeof *dpm->dbp);
1425 dpm->nwp = 1 + ((dpm->didr >> 20) & 0xf);
1426 dpm->dwp = calloc(dpm->nwp, sizeof *dpm->dwp);
1428 if (!dpm->dbp || !dpm->dwp) {
1429 free(dpm->dbp);
1430 free(dpm->dwp);
1431 return ERROR_FAIL;
1434 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
1435 target_name(target), dpm->nbp, dpm->nwp);
1437 /* REVISIT ... and some of those breakpoints could match
1438 * execution context IDs...
1441 return ERROR_OK;
1445 * Reinitializes DPM state at the beginning of a new debug session
1446 * or after a reset which may have affected the debug module.
1448 int armv8_dpm_initialize(struct arm_dpm *dpm)
1450 /* Disable all breakpoints and watchpoints at startup. */
1451 if (dpm->bpwp_disable) {
1452 unsigned i;
1454 for (i = 0; i < dpm->nbp; i++) {
1455 dpm->dbp[i].bpwp.number = i;
1456 (void) dpm->bpwp_disable(dpm, i);
1458 for (i = 0; i < dpm->nwp; i++) {
1459 dpm->dwp[i].bpwp.number = 16 + i;
1460 (void) dpm->bpwp_disable(dpm, 16 + i);
1462 } else
1463 LOG_WARNING("%s: can't disable breakpoints and watchpoints",
1464 target_name(dpm->arm->target));
1466 return ERROR_OK;