gdb server: new feature, add stop reason in stop reply packet for gdb
[openocd.git] / src / target / cortex_a.c
blob969158077cfa842a77f1e1c939f15a727ba0dde8
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex R4 support *
22 * *
23 * This program is free software; you can redistribute it and/or modify *
24 * it under the terms of the GNU General Public License as published by *
25 * the Free Software Foundation; either version 2 of the License, or *
26 * (at your option) any later version. *
27 * *
28 * This program is distributed in the hope that it will be useful, *
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
31 * GNU General Public License for more details. *
32 * *
33 * You should have received a copy of the GNU General Public License *
34 * along with this program; if not, write to the *
35 * Free Software Foundation, Inc., *
36 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
37 * *
38 * Cortex-A8(tm) TRM, ARM DDI 0344H *
39 * Cortex-A9(tm) TRM, ARM DDI 0407F *
40 * Cortex-A4(tm) TRM, ARM DDI 0363E *
41 * *
42 ***************************************************************************/
44 #ifdef HAVE_CONFIG_H
45 #include "config.h"
46 #endif
48 #include "breakpoints.h"
49 #include "cortex_a.h"
50 #include "register.h"
51 #include "target_request.h"
52 #include "target_type.h"
53 #include "arm_opcodes.h"
54 #include <helper/time_support.h>
56 static int cortex_a8_poll(struct target *target);
57 static int cortex_a8_debug_entry(struct target *target);
58 static int cortex_a8_restore_context(struct target *target, bool bpwp);
59 static int cortex_a8_set_breakpoint(struct target *target,
60 struct breakpoint *breakpoint, uint8_t matchmode);
61 static int cortex_a8_set_context_breakpoint(struct target *target,
62 struct breakpoint *breakpoint, uint8_t matchmode);
63 static int cortex_a8_set_hybrid_breakpoint(struct target *target,
64 struct breakpoint *breakpoint);
65 static int cortex_a8_unset_breakpoint(struct target *target,
66 struct breakpoint *breakpoint);
67 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
68 uint32_t *value, int regnum);
69 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
70 uint32_t value, int regnum);
71 static int cortex_a8_mmu(struct target *target, int *enabled);
72 static int cortex_a8_virt2phys(struct target *target,
73 uint32_t virt, uint32_t *phys);
74 static int cortex_a8_read_apb_ab_memory(struct target *target,
75 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
78 /* restore cp15_control_reg at resume */
79 static int cortex_a8_restore_cp15_control_reg(struct target *target)
81 int retval = ERROR_OK;
82 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
83 struct armv7a_common *armv7a = target_to_armv7a(target);
85 if (cortex_a8->cp15_control_reg != cortex_a8->cp15_control_reg_curr) {
86 cortex_a8->cp15_control_reg_curr = cortex_a8->cp15_control_reg;
87 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg); */
88 retval = armv7a->arm.mcr(target, 15,
89 0, 0, /* op1, op2 */
90 1, 0, /* CRn, CRm */
91 cortex_a8->cp15_control_reg);
93 return retval;
96 /* check address before cortex_a8_apb read write access with mmu on
97 * remove apb predictible data abort */
98 static int cortex_a8_check_address(struct target *target, uint32_t address)
100 struct armv7a_common *armv7a = target_to_armv7a(target);
101 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
102 uint32_t os_border = armv7a->armv7a_mmu.os_border;
103 if ((address < os_border) &&
104 (armv7a->arm.core_mode == ARM_MODE_SVC)) {
105 LOG_ERROR("%x access in userspace and target in supervisor", address);
106 return ERROR_FAIL;
108 if ((address >= os_border) &&
109 (cortex_a8->curr_mode != ARM_MODE_SVC)) {
110 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
111 cortex_a8->curr_mode = ARM_MODE_SVC;
112 LOG_INFO("%x access in kernel space and target not in supervisor",
113 address);
114 return ERROR_OK;
116 if ((address < os_border) &&
117 (cortex_a8->curr_mode == ARM_MODE_SVC)) {
118 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
119 cortex_a8->curr_mode = ARM_MODE_ANY;
121 return ERROR_OK;
123 /* modify cp15_control_reg in order to enable or disable mmu for :
124 * - virt2phys address conversion
125 * - read or write memory in phys or virt address */
126 static int cortex_a8_mmu_modify(struct target *target, int enable)
128 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
129 struct armv7a_common *armv7a = target_to_armv7a(target);
130 int retval = ERROR_OK;
131 if (enable) {
132 /* if mmu enabled at target stop and mmu not enable */
133 if (!(cortex_a8->cp15_control_reg & 0x1U)) {
134 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
135 return ERROR_FAIL;
137 if (!(cortex_a8->cp15_control_reg_curr & 0x1U)) {
138 cortex_a8->cp15_control_reg_curr |= 0x1U;
139 retval = armv7a->arm.mcr(target, 15,
140 0, 0, /* op1, op2 */
141 1, 0, /* CRn, CRm */
142 cortex_a8->cp15_control_reg_curr);
144 } else {
145 if (cortex_a8->cp15_control_reg_curr & 0x4U) {
146 /* data cache is active */
147 cortex_a8->cp15_control_reg_curr &= ~0x4U;
148 /* flush data cache armv7 function to be called */
149 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache)
150 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache(target);
152 if ((cortex_a8->cp15_control_reg_curr & 0x1U)) {
153 cortex_a8->cp15_control_reg_curr &= ~0x1U;
154 retval = armv7a->arm.mcr(target, 15,
155 0, 0, /* op1, op2 */
156 1, 0, /* CRn, CRm */
157 cortex_a8->cp15_control_reg_curr);
160 return retval;
164 * Cortex-A8 Basic debug access, very low level assumes state is saved
166 static int cortex_a8_init_debug_access(struct target *target)
168 struct armv7a_common *armv7a = target_to_armv7a(target);
169 struct adiv5_dap *swjdp = armv7a->arm.dap;
170 int retval;
171 uint32_t dummy;
173 LOG_DEBUG(" ");
175 /* Unlocking the debug registers for modification
176 * The debugport might be uninitialised so try twice */
177 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
178 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
179 if (retval != ERROR_OK) {
180 /* try again */
181 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
182 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
183 if (retval == ERROR_OK)
184 LOG_USER(
185 "Locking debug access failed on first, but succeeded on second try.");
187 if (retval != ERROR_OK)
188 return retval;
189 /* Clear Sticky Power Down status Bit in PRSR to enable access to
190 the registers in the Core Power Domain */
191 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
192 armv7a->debug_base + CPUDBG_PRSR, &dummy);
193 if (retval != ERROR_OK)
194 return retval;
196 /* Enabling of instruction execution in debug mode is done in debug_entry code */
198 /* Resync breakpoint registers */
200 /* Since this is likely called from init or reset, update target state information*/
201 return cortex_a8_poll(target);
204 /* To reduce needless round-trips, pass in a pointer to the current
205 * DSCR value. Initialize it to zero if you just need to know the
206 * value on return from this function; or DSCR_INSTR_COMP if you
207 * happen to know that no instruction is pending.
209 static int cortex_a8_exec_opcode(struct target *target,
210 uint32_t opcode, uint32_t *dscr_p)
212 uint32_t dscr;
213 int retval;
214 struct armv7a_common *armv7a = target_to_armv7a(target);
215 struct adiv5_dap *swjdp = armv7a->arm.dap;
217 dscr = dscr_p ? *dscr_p : 0;
219 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
221 /* Wait for InstrCompl bit to be set */
222 long long then = timeval_ms();
223 while ((dscr & DSCR_INSTR_COMP) == 0) {
224 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
225 armv7a->debug_base + CPUDBG_DSCR, &dscr);
226 if (retval != ERROR_OK) {
227 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
228 return retval;
230 if (timeval_ms() > then + 1000) {
231 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
232 return ERROR_FAIL;
236 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
237 armv7a->debug_base + CPUDBG_ITR, opcode);
238 if (retval != ERROR_OK)
239 return retval;
241 then = timeval_ms();
242 do {
243 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
244 armv7a->debug_base + CPUDBG_DSCR, &dscr);
245 if (retval != ERROR_OK) {
246 LOG_ERROR("Could not read DSCR register");
247 return retval;
249 if (timeval_ms() > then + 1000) {
250 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
251 return ERROR_FAIL;
253 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
255 if (dscr_p)
256 *dscr_p = dscr;
258 return retval;
261 /**************************************************************************
262 Read core register with very few exec_opcode, fast but needs work_area.
263 This can cause problems with MMU active.
264 **************************************************************************/
265 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
266 uint32_t *regfile)
268 int retval = ERROR_OK;
269 struct armv7a_common *armv7a = target_to_armv7a(target);
270 struct adiv5_dap *swjdp = armv7a->arm.dap;
272 retval = cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
273 if (retval != ERROR_OK)
274 return retval;
275 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
276 if (retval != ERROR_OK)
277 return retval;
278 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
279 if (retval != ERROR_OK)
280 return retval;
282 retval = mem_ap_sel_read_buf_u32(swjdp, armv7a->memory_ap,
283 (uint8_t *)(&regfile[1]), 4*15, address);
285 return retval;
288 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
289 uint32_t *value, int regnum)
291 int retval = ERROR_OK;
292 uint8_t reg = regnum&0xFF;
293 uint32_t dscr = 0;
294 struct armv7a_common *armv7a = target_to_armv7a(target);
295 struct adiv5_dap *swjdp = armv7a->arm.dap;
297 if (reg > 17)
298 return retval;
300 if (reg < 15) {
301 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
302 retval = cortex_a8_exec_opcode(target,
303 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
304 &dscr);
305 if (retval != ERROR_OK)
306 return retval;
307 } else if (reg == 15) {
308 /* "MOV r0, r15"; then move r0 to DCCTX */
309 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
310 if (retval != ERROR_OK)
311 return retval;
312 retval = cortex_a8_exec_opcode(target,
313 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
314 &dscr);
315 if (retval != ERROR_OK)
316 return retval;
317 } else {
318 /* "MRS r0, CPSR" or "MRS r0, SPSR"
319 * then move r0 to DCCTX
321 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
322 if (retval != ERROR_OK)
323 return retval;
324 retval = cortex_a8_exec_opcode(target,
325 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
326 &dscr);
327 if (retval != ERROR_OK)
328 return retval;
331 /* Wait for DTRRXfull then read DTRRTX */
332 long long then = timeval_ms();
333 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
334 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
335 armv7a->debug_base + CPUDBG_DSCR, &dscr);
336 if (retval != ERROR_OK)
337 return retval;
338 if (timeval_ms() > then + 1000) {
339 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
340 return ERROR_FAIL;
344 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
345 armv7a->debug_base + CPUDBG_DTRTX, value);
346 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
348 return retval;
351 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
352 uint32_t value, int regnum)
354 int retval = ERROR_OK;
355 uint8_t Rd = regnum&0xFF;
356 uint32_t dscr;
357 struct armv7a_common *armv7a = target_to_armv7a(target);
358 struct adiv5_dap *swjdp = armv7a->arm.dap;
360 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
362 /* Check that DCCRX is not full */
363 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
364 armv7a->debug_base + CPUDBG_DSCR, &dscr);
365 if (retval != ERROR_OK)
366 return retval;
367 if (dscr & DSCR_DTR_RX_FULL) {
368 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
369 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
370 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
371 &dscr);
372 if (retval != ERROR_OK)
373 return retval;
376 if (Rd > 17)
377 return retval;
379 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
380 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
381 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
382 armv7a->debug_base + CPUDBG_DTRRX, value);
383 if (retval != ERROR_OK)
384 return retval;
386 if (Rd < 15) {
387 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
388 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
389 &dscr);
391 if (retval != ERROR_OK)
392 return retval;
393 } else if (Rd == 15) {
394 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
395 * then "mov r15, r0"
397 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
398 &dscr);
399 if (retval != ERROR_OK)
400 return retval;
401 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
402 if (retval != ERROR_OK)
403 return retval;
404 } else {
405 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
406 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
408 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
409 &dscr);
410 if (retval != ERROR_OK)
411 return retval;
412 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
413 &dscr);
414 if (retval != ERROR_OK)
415 return retval;
417 /* "Prefetch flush" after modifying execution status in CPSR */
418 if (Rd == 16) {
419 retval = cortex_a8_exec_opcode(target,
420 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
421 &dscr);
422 if (retval != ERROR_OK)
423 return retval;
427 return retval;
430 /* Write to memory mapped registers directly with no cache or mmu handling */
431 static int cortex_a8_dap_write_memap_register_u32(struct target *target,
432 uint32_t address,
433 uint32_t value)
435 int retval;
436 struct armv7a_common *armv7a = target_to_armv7a(target);
437 struct adiv5_dap *swjdp = armv7a->arm.dap;
439 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, address, value);
441 return retval;
445 * Cortex-A8 implementation of Debug Programmer's Model
447 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
448 * so there's no need to poll for it before executing an instruction.
450 * NOTE that in several of these cases the "stall" mode might be useful.
451 * It'd let us queue a few operations together... prepare/finish might
452 * be the places to enable/disable that mode.
455 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
457 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
460 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
462 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
463 return mem_ap_sel_write_u32(a8->armv7a_common.arm.dap,
464 a8->armv7a_common.debug_ap, a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
467 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
468 uint32_t *dscr_p)
470 struct adiv5_dap *swjdp = a8->armv7a_common.arm.dap;
471 uint32_t dscr = DSCR_INSTR_COMP;
472 int retval;
474 if (dscr_p)
475 dscr = *dscr_p;
477 /* Wait for DTRRXfull */
478 long long then = timeval_ms();
479 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
480 retval = mem_ap_sel_read_atomic_u32(swjdp, a8->armv7a_common.debug_ap,
481 a8->armv7a_common.debug_base + CPUDBG_DSCR,
482 &dscr);
483 if (retval != ERROR_OK)
484 return retval;
485 if (timeval_ms() > then + 1000) {
486 LOG_ERROR("Timeout waiting for read dcc");
487 return ERROR_FAIL;
491 retval = mem_ap_sel_read_atomic_u32(swjdp, a8->armv7a_common.debug_ap,
492 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
493 if (retval != ERROR_OK)
494 return retval;
495 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
497 if (dscr_p)
498 *dscr_p = dscr;
500 return retval;
503 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
505 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
506 struct adiv5_dap *swjdp = a8->armv7a_common.arm.dap;
507 uint32_t dscr;
508 int retval;
510 /* set up invariant: INSTR_COMP is set after ever DPM operation */
511 long long then = timeval_ms();
512 for (;; ) {
513 retval = mem_ap_sel_read_atomic_u32(swjdp, a8->armv7a_common.debug_ap,
514 a8->armv7a_common.debug_base + CPUDBG_DSCR,
515 &dscr);
516 if (retval != ERROR_OK)
517 return retval;
518 if ((dscr & DSCR_INSTR_COMP) != 0)
519 break;
520 if (timeval_ms() > then + 1000) {
521 LOG_ERROR("Timeout waiting for dpm prepare");
522 return ERROR_FAIL;
526 /* this "should never happen" ... */
527 if (dscr & DSCR_DTR_RX_FULL) {
528 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
529 /* Clear DCCRX */
530 retval = cortex_a8_exec_opcode(
531 a8->armv7a_common.arm.target,
532 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
533 &dscr);
534 if (retval != ERROR_OK)
535 return retval;
538 return retval;
541 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
543 /* REVISIT what could be done here? */
544 return ERROR_OK;
547 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
548 uint32_t opcode, uint32_t data)
550 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
551 int retval;
552 uint32_t dscr = DSCR_INSTR_COMP;
554 retval = cortex_a8_write_dcc(a8, data);
555 if (retval != ERROR_OK)
556 return retval;
558 return cortex_a8_exec_opcode(
559 a8->armv7a_common.arm.target,
560 opcode,
561 &dscr);
564 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
565 uint32_t opcode, uint32_t data)
567 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
568 uint32_t dscr = DSCR_INSTR_COMP;
569 int retval;
571 retval = cortex_a8_write_dcc(a8, data);
572 if (retval != ERROR_OK)
573 return retval;
575 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
576 retval = cortex_a8_exec_opcode(
577 a8->armv7a_common.arm.target,
578 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
579 &dscr);
580 if (retval != ERROR_OK)
581 return retval;
583 /* then the opcode, taking data from R0 */
584 retval = cortex_a8_exec_opcode(
585 a8->armv7a_common.arm.target,
586 opcode,
587 &dscr);
589 return retval;
592 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
594 struct target *target = dpm->arm->target;
595 uint32_t dscr = DSCR_INSTR_COMP;
597 /* "Prefetch flush" after modifying execution status in CPSR */
598 return cortex_a8_exec_opcode(target,
599 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
600 &dscr);
603 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
604 uint32_t opcode, uint32_t *data)
606 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
607 int retval;
608 uint32_t dscr = DSCR_INSTR_COMP;
610 /* the opcode, writing data to DCC */
611 retval = cortex_a8_exec_opcode(
612 a8->armv7a_common.arm.target,
613 opcode,
614 &dscr);
615 if (retval != ERROR_OK)
616 return retval;
618 return cortex_a8_read_dcc(a8, data, &dscr);
622 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
623 uint32_t opcode, uint32_t *data)
625 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
626 uint32_t dscr = DSCR_INSTR_COMP;
627 int retval;
629 /* the opcode, writing data to R0 */
630 retval = cortex_a8_exec_opcode(
631 a8->armv7a_common.arm.target,
632 opcode,
633 &dscr);
634 if (retval != ERROR_OK)
635 return retval;
637 /* write R0 to DCC */
638 retval = cortex_a8_exec_opcode(
639 a8->armv7a_common.arm.target,
640 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
641 &dscr);
642 if (retval != ERROR_OK)
643 return retval;
645 return cortex_a8_read_dcc(a8, data, &dscr);
648 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
649 uint32_t addr, uint32_t control)
651 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
652 uint32_t vr = a8->armv7a_common.debug_base;
653 uint32_t cr = a8->armv7a_common.debug_base;
654 int retval;
656 switch (index_t) {
657 case 0 ... 15: /* breakpoints */
658 vr += CPUDBG_BVR_BASE;
659 cr += CPUDBG_BCR_BASE;
660 break;
661 case 16 ... 31: /* watchpoints */
662 vr += CPUDBG_WVR_BASE;
663 cr += CPUDBG_WCR_BASE;
664 index_t -= 16;
665 break;
666 default:
667 return ERROR_FAIL;
669 vr += 4 * index_t;
670 cr += 4 * index_t;
672 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
673 (unsigned) vr, (unsigned) cr);
675 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
676 vr, addr);
677 if (retval != ERROR_OK)
678 return retval;
679 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
680 cr, control);
681 return retval;
684 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
686 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
687 uint32_t cr;
689 switch (index_t) {
690 case 0 ... 15:
691 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
692 break;
693 case 16 ... 31:
694 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
695 index_t -= 16;
696 break;
697 default:
698 return ERROR_FAIL;
700 cr += 4 * index_t;
702 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
704 /* clear control register */
705 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
708 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
710 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
711 int retval;
713 dpm->arm = &a8->armv7a_common.arm;
714 dpm->didr = didr;
716 dpm->prepare = cortex_a8_dpm_prepare;
717 dpm->finish = cortex_a8_dpm_finish;
719 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
720 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
721 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
723 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
724 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
726 dpm->bpwp_enable = cortex_a8_bpwp_enable;
727 dpm->bpwp_disable = cortex_a8_bpwp_disable;
729 retval = arm_dpm_setup(dpm);
730 if (retval == ERROR_OK)
731 retval = arm_dpm_initialize(dpm);
733 return retval;
735 static struct target *get_cortex_a8(struct target *target, int32_t coreid)
737 struct target_list *head;
738 struct target *curr;
740 head = target->head;
741 while (head != (struct target_list *)NULL) {
742 curr = head->target;
743 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
744 return curr;
745 head = head->next;
747 return target;
749 static int cortex_a8_halt(struct target *target);
751 static int cortex_a8_halt_smp(struct target *target)
753 int retval = 0;
754 struct target_list *head;
755 struct target *curr;
756 head = target->head;
757 while (head != (struct target_list *)NULL) {
758 curr = head->target;
759 if ((curr != target) && (curr->state != TARGET_HALTED))
760 retval += cortex_a8_halt(curr);
761 head = head->next;
763 return retval;
766 static int update_halt_gdb(struct target *target)
768 int retval = 0;
769 if (target->gdb_service->core[0] == -1) {
770 target->gdb_service->target = target;
771 target->gdb_service->core[0] = target->coreid;
772 retval += cortex_a8_halt_smp(target);
774 return retval;
778 * Cortex-A8 Run control
781 static int cortex_a8_poll(struct target *target)
783 int retval = ERROR_OK;
784 uint32_t dscr;
785 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
786 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
787 struct adiv5_dap *swjdp = armv7a->arm.dap;
788 enum target_state prev_target_state = target->state;
789 /* toggle to another core is done by gdb as follow */
790 /* maint packet J core_id */
791 /* continue */
792 /* the next polling trigger an halt event sent to gdb */
793 if ((target->state == TARGET_HALTED) && (target->smp) &&
794 (target->gdb_service) &&
795 (target->gdb_service->target == NULL)) {
796 target->gdb_service->target =
797 get_cortex_a8(target, target->gdb_service->core[1]);
798 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
799 return retval;
801 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
802 armv7a->debug_base + CPUDBG_DSCR, &dscr);
803 if (retval != ERROR_OK)
804 return retval;
805 cortex_a8->cpudbg_dscr = dscr;
807 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
808 if (prev_target_state != TARGET_HALTED) {
809 /* We have a halting debug event */
810 LOG_DEBUG("Target halted");
811 target->state = TARGET_HALTED;
812 if ((prev_target_state == TARGET_RUNNING)
813 || (prev_target_state == TARGET_UNKNOWN)
814 || (prev_target_state == TARGET_RESET)) {
815 retval = cortex_a8_debug_entry(target);
816 if (retval != ERROR_OK)
817 return retval;
818 if (target->smp) {
819 retval = update_halt_gdb(target);
820 if (retval != ERROR_OK)
821 return retval;
823 target_call_event_callbacks(target,
824 TARGET_EVENT_HALTED);
826 if (prev_target_state == TARGET_DEBUG_RUNNING) {
827 LOG_DEBUG(" ");
829 retval = cortex_a8_debug_entry(target);
830 if (retval != ERROR_OK)
831 return retval;
832 if (target->smp) {
833 retval = update_halt_gdb(target);
834 if (retval != ERROR_OK)
835 return retval;
838 target_call_event_callbacks(target,
839 TARGET_EVENT_DEBUG_HALTED);
842 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
843 target->state = TARGET_RUNNING;
844 else {
845 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
846 target->state = TARGET_UNKNOWN;
849 return retval;
852 static int cortex_a8_halt(struct target *target)
854 int retval = ERROR_OK;
855 uint32_t dscr;
856 struct armv7a_common *armv7a = target_to_armv7a(target);
857 struct adiv5_dap *swjdp = armv7a->arm.dap;
860 * Tell the core to be halted by writing DRCR with 0x1
861 * and then wait for the core to be halted.
863 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
864 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
865 if (retval != ERROR_OK)
866 return retval;
869 * enter halting debug mode
871 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
872 armv7a->debug_base + CPUDBG_DSCR, &dscr);
873 if (retval != ERROR_OK)
874 return retval;
876 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
877 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
878 if (retval != ERROR_OK)
879 return retval;
881 long long then = timeval_ms();
882 for (;; ) {
883 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
884 armv7a->debug_base + CPUDBG_DSCR, &dscr);
885 if (retval != ERROR_OK)
886 return retval;
887 if ((dscr & DSCR_CORE_HALTED) != 0)
888 break;
889 if (timeval_ms() > then + 1000) {
890 LOG_ERROR("Timeout waiting for halt");
891 return ERROR_FAIL;
895 target->debug_reason = DBG_REASON_DBGRQ;
897 return ERROR_OK;
900 static int cortex_a8_internal_restore(struct target *target, int current,
901 uint32_t *address, int handle_breakpoints, int debug_execution)
903 struct armv7a_common *armv7a = target_to_armv7a(target);
904 struct arm *arm = &armv7a->arm;
905 int retval;
906 uint32_t resume_pc;
908 if (!debug_execution)
909 target_free_all_working_areas(target);
911 #if 0
912 if (debug_execution) {
913 /* Disable interrupts */
914 /* We disable interrupts in the PRIMASK register instead of
915 * masking with C_MASKINTS,
916 * This is probably the same issue as Cortex-M3 Errata 377493:
917 * C_MASKINTS in parallel with disabled interrupts can cause
918 * local faults to not be taken. */
919 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
920 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
921 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
923 /* Make sure we are in Thumb mode */
924 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
925 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
926 32) | (1 << 24));
927 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
928 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
930 #endif
932 /* current = 1: continue on current pc, otherwise continue at <address> */
933 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
934 if (!current)
935 resume_pc = *address;
936 else
937 *address = resume_pc;
939 /* Make sure that the Armv7 gdb thumb fixups does not
940 * kill the return address
942 switch (arm->core_state) {
943 case ARM_STATE_ARM:
944 resume_pc &= 0xFFFFFFFC;
945 break;
946 case ARM_STATE_THUMB:
947 case ARM_STATE_THUMB_EE:
948 /* When the return address is loaded into PC
949 * bit 0 must be 1 to stay in Thumb state
951 resume_pc |= 0x1;
952 break;
953 case ARM_STATE_JAZELLE:
954 LOG_ERROR("How do I resume into Jazelle state??");
955 return ERROR_FAIL;
957 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
958 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
959 arm->pc->dirty = 1;
960 arm->pc->valid = 1;
961 /* restore dpm_mode at system halt */
962 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
963 /* called it now before restoring context because it uses cpu
964 * register r0 for restoring cp15 control register */
965 retval = cortex_a8_restore_cp15_control_reg(target);
966 if (retval != ERROR_OK)
967 return retval;
968 retval = cortex_a8_restore_context(target, handle_breakpoints);
969 if (retval != ERROR_OK)
970 return retval;
971 target->debug_reason = DBG_REASON_NOTHALTED;
972 target->state = TARGET_RUNNING;
974 /* registers are now invalid */
975 register_cache_invalidate(arm->core_cache);
977 #if 0
978 /* the front-end may request us not to handle breakpoints */
979 if (handle_breakpoints) {
980 /* Single step past breakpoint at current address */
981 breakpoint = breakpoint_find(target, resume_pc);
982 if (breakpoint) {
983 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
984 cortex_m3_unset_breakpoint(target, breakpoint);
985 cortex_m3_single_step_core(target);
986 cortex_m3_set_breakpoint(target, breakpoint);
990 #endif
991 return retval;
994 static int cortex_a8_internal_restart(struct target *target)
996 struct armv7a_common *armv7a = target_to_armv7a(target);
997 struct arm *arm = &armv7a->arm;
998 struct adiv5_dap *swjdp = arm->dap;
999 int retval;
1000 uint32_t dscr;
1002 * * Restart core and wait for it to be started. Clear ITRen and sticky
1003 * * exception flags: see ARMv7 ARM, C5.9.
1005 * REVISIT: for single stepping, we probably want to
1006 * disable IRQs by default, with optional override...
1009 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1010 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1011 if (retval != ERROR_OK)
1012 return retval;
1014 if ((dscr & DSCR_INSTR_COMP) == 0)
1015 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1017 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1018 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1019 if (retval != ERROR_OK)
1020 return retval;
1022 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1023 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1024 DRCR_CLEAR_EXCEPTIONS);
1025 if (retval != ERROR_OK)
1026 return retval;
1028 long long then = timeval_ms();
1029 for (;; ) {
1030 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1031 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1032 if (retval != ERROR_OK)
1033 return retval;
1034 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1035 break;
1036 if (timeval_ms() > then + 1000) {
1037 LOG_ERROR("Timeout waiting for resume");
1038 return ERROR_FAIL;
1042 target->debug_reason = DBG_REASON_NOTHALTED;
1043 target->state = TARGET_RUNNING;
1045 /* registers are now invalid */
1046 register_cache_invalidate(arm->core_cache);
1048 return ERROR_OK;
1051 static int cortex_a8_restore_smp(struct target *target, int handle_breakpoints)
1053 int retval = 0;
1054 struct target_list *head;
1055 struct target *curr;
1056 uint32_t address;
1057 head = target->head;
1058 while (head != (struct target_list *)NULL) {
1059 curr = head->target;
1060 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1061 /* resume current address , not in step mode */
1062 retval += cortex_a8_internal_restore(curr, 1, &address,
1063 handle_breakpoints, 0);
1064 retval += cortex_a8_internal_restart(curr);
1066 head = head->next;
1069 return retval;
1072 static int cortex_a8_resume(struct target *target, int current,
1073 uint32_t address, int handle_breakpoints, int debug_execution)
1075 int retval = 0;
1076 /* dummy resume for smp toggle in order to reduce gdb impact */
1077 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1078 /* simulate a start and halt of target */
1079 target->gdb_service->target = NULL;
1080 target->gdb_service->core[0] = target->gdb_service->core[1];
1081 /* fake resume at next poll we play the target core[1], see poll*/
1082 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1083 return 0;
1085 cortex_a8_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1086 if (target->smp) {
1087 target->gdb_service->core[0] = -1;
1088 retval = cortex_a8_restore_smp(target, handle_breakpoints);
1089 if (retval != ERROR_OK)
1090 return retval;
1092 cortex_a8_internal_restart(target);
1094 if (!debug_execution) {
1095 target->state = TARGET_RUNNING;
1096 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1097 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1098 } else {
1099 target->state = TARGET_DEBUG_RUNNING;
1100 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1101 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1104 return ERROR_OK;
1107 static int cortex_a8_debug_entry(struct target *target)
1109 int i;
1110 uint32_t regfile[16], cpsr, dscr;
1111 int retval = ERROR_OK;
1112 struct working_area *regfile_working_area = NULL;
1113 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1114 struct armv7a_common *armv7a = target_to_armv7a(target);
1115 struct arm *arm = &armv7a->arm;
1116 struct adiv5_dap *swjdp = armv7a->arm.dap;
1117 struct reg *reg;
1119 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
1121 /* REVISIT surely we should not re-read DSCR !! */
1122 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1123 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1124 if (retval != ERROR_OK)
1125 return retval;
1127 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1128 * imprecise data aborts get discarded by issuing a Data
1129 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1132 /* Enable the ITR execution once we are in debug mode */
1133 dscr |= DSCR_ITR_EN;
1134 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1135 armv7a->debug_base + CPUDBG_DSCR, dscr);
1136 if (retval != ERROR_OK)
1137 return retval;
1139 /* Examine debug reason */
1140 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
1142 /* save address of instruction that triggered the watchpoint? */
1143 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1144 uint32_t wfar;
1146 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1147 armv7a->debug_base + CPUDBG_WFAR,
1148 &wfar);
1149 if (retval != ERROR_OK)
1150 return retval;
1151 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1154 /* REVISIT fast_reg_read is never set ... */
1156 /* Examine target state and mode */
1157 if (cortex_a8->fast_reg_read)
1158 target_alloc_working_area(target, 64, &regfile_working_area);
1160 /* First load register acessible through core debug port*/
1161 if (!regfile_working_area)
1162 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1163 else {
1164 retval = cortex_a8_read_regs_through_mem(target,
1165 regfile_working_area->address, regfile);
1167 target_free_working_area(target, regfile_working_area);
1168 if (retval != ERROR_OK)
1169 return retval;
1171 /* read Current PSR */
1172 retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
1173 /* store current cpsr */
1174 if (retval != ERROR_OK)
1175 return retval;
1177 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1179 arm_set_cpsr(arm, cpsr);
1181 /* update cache */
1182 for (i = 0; i <= ARM_PC; i++) {
1183 reg = arm_reg_current(arm, i);
1185 buf_set_u32(reg->value, 0, 32, regfile[i]);
1186 reg->valid = 1;
1187 reg->dirty = 0;
1190 /* Fixup PC Resume Address */
1191 if (cpsr & (1 << 5)) {
1192 /* T bit set for Thumb or ThumbEE state */
1193 regfile[ARM_PC] -= 4;
1194 } else {
1195 /* ARM state */
1196 regfile[ARM_PC] -= 8;
1199 reg = arm->pc;
1200 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1201 reg->dirty = reg->valid;
1204 #if 0
1205 /* TODO, Move this */
1206 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1207 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1208 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1210 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1211 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1213 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1214 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1215 #endif
1217 /* Are we in an exception handler */
1218 /* armv4_5->exception_number = 0; */
1219 if (armv7a->post_debug_entry) {
1220 retval = armv7a->post_debug_entry(target);
1221 if (retval != ERROR_OK)
1222 return retval;
1225 return retval;
1228 static int cortex_a8_post_debug_entry(struct target *target)
1230 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1231 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1232 int retval;
1234 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1235 retval = armv7a->arm.mrc(target, 15,
1236 0, 0, /* op1, op2 */
1237 1, 0, /* CRn, CRm */
1238 &cortex_a8->cp15_control_reg);
1239 if (retval != ERROR_OK)
1240 return retval;
1241 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1242 cortex_a8->cp15_control_reg_curr = cortex_a8->cp15_control_reg;
1244 if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1)
1245 armv7a_identify_cache(target);
1247 if (armv7a->is_armv7r) {
1248 armv7a->armv7a_mmu.mmu_enabled = 0;
1249 } else {
1250 armv7a->armv7a_mmu.mmu_enabled =
1251 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1253 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1254 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1255 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1256 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1257 cortex_a8->curr_mode = armv7a->arm.core_mode;
1259 return ERROR_OK;
1262 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1263 int handle_breakpoints)
1265 struct armv7a_common *armv7a = target_to_armv7a(target);
1266 struct arm *arm = &armv7a->arm;
1267 struct breakpoint *breakpoint = NULL;
1268 struct breakpoint stepbreakpoint;
1269 struct reg *r;
1270 int retval;
1272 if (target->state != TARGET_HALTED) {
1273 LOG_WARNING("target not halted");
1274 return ERROR_TARGET_NOT_HALTED;
1277 /* current = 1: continue on current pc, otherwise continue at <address> */
1278 r = arm->pc;
1279 if (!current)
1280 buf_set_u32(r->value, 0, 32, address);
1281 else
1282 address = buf_get_u32(r->value, 0, 32);
1284 /* The front-end may request us not to handle breakpoints.
1285 * But since Cortex-A8 uses breakpoint for single step,
1286 * we MUST handle breakpoints.
1288 handle_breakpoints = 1;
1289 if (handle_breakpoints) {
1290 breakpoint = breakpoint_find(target, address);
1291 if (breakpoint)
1292 cortex_a8_unset_breakpoint(target, breakpoint);
1295 /* Setup single step breakpoint */
1296 stepbreakpoint.address = address;
1297 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1298 ? 2 : 4;
1299 stepbreakpoint.type = BKPT_HARD;
1300 stepbreakpoint.set = 0;
1302 /* Break on IVA mismatch */
1303 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1305 target->debug_reason = DBG_REASON_SINGLESTEP;
1307 retval = cortex_a8_resume(target, 1, address, 0, 0);
1308 if (retval != ERROR_OK)
1309 return retval;
1311 long long then = timeval_ms();
1312 while (target->state != TARGET_HALTED) {
1313 retval = cortex_a8_poll(target);
1314 if (retval != ERROR_OK)
1315 return retval;
1316 if (timeval_ms() > then + 1000) {
1317 LOG_ERROR("timeout waiting for target halt");
1318 return ERROR_FAIL;
1322 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1324 target->debug_reason = DBG_REASON_BREAKPOINT;
1326 if (breakpoint)
1327 cortex_a8_set_breakpoint(target, breakpoint, 0);
1329 if (target->state != TARGET_HALTED)
1330 LOG_DEBUG("target stepped");
1332 return ERROR_OK;
1335 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1337 struct armv7a_common *armv7a = target_to_armv7a(target);
1339 LOG_DEBUG(" ");
1341 if (armv7a->pre_restore_context)
1342 armv7a->pre_restore_context(target);
1344 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1348 * Cortex-A8 Breakpoint and watchpoint functions
1351 /* Setup hardware Breakpoint Register Pair */
1352 static int cortex_a8_set_breakpoint(struct target *target,
1353 struct breakpoint *breakpoint, uint8_t matchmode)
1355 int retval;
1356 int brp_i = 0;
1357 uint32_t control;
1358 uint8_t byte_addr_select = 0x0F;
1359 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1360 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1361 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1363 if (breakpoint->set) {
1364 LOG_WARNING("breakpoint already set");
1365 return ERROR_OK;
1368 if (breakpoint->type == BKPT_HARD) {
1369 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1370 brp_i++;
1371 if (brp_i >= cortex_a8->brp_num) {
1372 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1373 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1375 breakpoint->set = brp_i + 1;
1376 if (breakpoint->length == 2)
1377 byte_addr_select = (3 << (breakpoint->address & 0x02));
1378 control = ((matchmode & 0x7) << 20)
1379 | (byte_addr_select << 5)
1380 | (3 << 1) | 1;
1381 brp_list[brp_i].used = 1;
1382 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1383 brp_list[brp_i].control = control;
1384 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1385 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1386 brp_list[brp_i].value);
1387 if (retval != ERROR_OK)
1388 return retval;
1389 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1390 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1391 brp_list[brp_i].control);
1392 if (retval != ERROR_OK)
1393 return retval;
1394 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1395 brp_list[brp_i].control,
1396 brp_list[brp_i].value);
1397 } else if (breakpoint->type == BKPT_SOFT) {
1398 uint8_t code[4];
1399 if (breakpoint->length == 2)
1400 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1401 else
1402 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1403 retval = target_read_memory(target,
1404 breakpoint->address & 0xFFFFFFFE,
1405 breakpoint->length, 1,
1406 breakpoint->orig_instr);
1407 if (retval != ERROR_OK)
1408 return retval;
1409 retval = target_write_memory(target,
1410 breakpoint->address & 0xFFFFFFFE,
1411 breakpoint->length, 1, code);
1412 if (retval != ERROR_OK)
1413 return retval;
1414 breakpoint->set = 0x11; /* Any nice value but 0 */
1417 return ERROR_OK;
1420 static int cortex_a8_set_context_breakpoint(struct target *target,
1421 struct breakpoint *breakpoint, uint8_t matchmode)
1423 int retval = ERROR_FAIL;
1424 int brp_i = 0;
1425 uint32_t control;
1426 uint8_t byte_addr_select = 0x0F;
1427 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1428 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1429 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1431 if (breakpoint->set) {
1432 LOG_WARNING("breakpoint already set");
1433 return retval;
1435 /*check available context BRPs*/
1436 while ((brp_list[brp_i].used ||
1437 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a8->brp_num))
1438 brp_i++;
1440 if (brp_i >= cortex_a8->brp_num) {
1441 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1442 return ERROR_FAIL;
1445 breakpoint->set = brp_i + 1;
1446 control = ((matchmode & 0x7) << 20)
1447 | (byte_addr_select << 5)
1448 | (3 << 1) | 1;
1449 brp_list[brp_i].used = 1;
1450 brp_list[brp_i].value = (breakpoint->asid);
1451 brp_list[brp_i].control = control;
1452 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1453 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1454 brp_list[brp_i].value);
1455 if (retval != ERROR_OK)
1456 return retval;
1457 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1458 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1459 brp_list[brp_i].control);
1460 if (retval != ERROR_OK)
1461 return retval;
1462 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1463 brp_list[brp_i].control,
1464 brp_list[brp_i].value);
1465 return ERROR_OK;
1469 static int cortex_a8_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1471 int retval = ERROR_FAIL;
1472 int brp_1 = 0; /* holds the contextID pair */
1473 int brp_2 = 0; /* holds the IVA pair */
1474 uint32_t control_CTX, control_IVA;
1475 uint8_t CTX_byte_addr_select = 0x0F;
1476 uint8_t IVA_byte_addr_select = 0x0F;
1477 uint8_t CTX_machmode = 0x03;
1478 uint8_t IVA_machmode = 0x01;
1479 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1480 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1481 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1483 if (breakpoint->set) {
1484 LOG_WARNING("breakpoint already set");
1485 return retval;
1487 /*check available context BRPs*/
1488 while ((brp_list[brp_1].used ||
1489 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a8->brp_num))
1490 brp_1++;
1492 printf("brp(CTX) found num: %d\n", brp_1);
1493 if (brp_1 >= cortex_a8->brp_num) {
1494 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1495 return ERROR_FAIL;
1498 while ((brp_list[brp_2].used ||
1499 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a8->brp_num))
1500 brp_2++;
1502 printf("brp(IVA) found num: %d\n", brp_2);
1503 if (brp_2 >= cortex_a8->brp_num) {
1504 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1505 return ERROR_FAIL;
1508 breakpoint->set = brp_1 + 1;
1509 breakpoint->linked_BRP = brp_2;
1510 control_CTX = ((CTX_machmode & 0x7) << 20)
1511 | (brp_2 << 16)
1512 | (0 << 14)
1513 | (CTX_byte_addr_select << 5)
1514 | (3 << 1) | 1;
1515 brp_list[brp_1].used = 1;
1516 brp_list[brp_1].value = (breakpoint->asid);
1517 brp_list[brp_1].control = control_CTX;
1518 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1519 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1520 brp_list[brp_1].value);
1521 if (retval != ERROR_OK)
1522 return retval;
1523 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1524 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1525 brp_list[brp_1].control);
1526 if (retval != ERROR_OK)
1527 return retval;
1529 control_IVA = ((IVA_machmode & 0x7) << 20)
1530 | (brp_1 << 16)
1531 | (IVA_byte_addr_select << 5)
1532 | (3 << 1) | 1;
1533 brp_list[brp_2].used = 1;
1534 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1535 brp_list[brp_2].control = control_IVA;
1536 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1537 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1538 brp_list[brp_2].value);
1539 if (retval != ERROR_OK)
1540 return retval;
1541 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1542 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1543 brp_list[brp_2].control);
1544 if (retval != ERROR_OK)
1545 return retval;
1547 return ERROR_OK;
1550 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1552 int retval;
1553 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1554 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1555 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1557 if (!breakpoint->set) {
1558 LOG_WARNING("breakpoint not set");
1559 return ERROR_OK;
1562 if (breakpoint->type == BKPT_HARD) {
1563 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1564 int brp_i = breakpoint->set - 1;
1565 int brp_j = breakpoint->linked_BRP;
1566 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num)) {
1567 LOG_DEBUG("Invalid BRP number in breakpoint");
1568 return ERROR_OK;
1570 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1571 brp_list[brp_i].control, brp_list[brp_i].value);
1572 brp_list[brp_i].used = 0;
1573 brp_list[brp_i].value = 0;
1574 brp_list[brp_i].control = 0;
1575 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1576 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1577 brp_list[brp_i].control);
1578 if (retval != ERROR_OK)
1579 return retval;
1580 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1581 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1582 brp_list[brp_i].value);
1583 if (retval != ERROR_OK)
1584 return retval;
1585 if ((brp_j < 0) || (brp_j >= cortex_a8->brp_num)) {
1586 LOG_DEBUG("Invalid BRP number in breakpoint");
1587 return ERROR_OK;
1589 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1590 brp_list[brp_j].control, brp_list[brp_j].value);
1591 brp_list[brp_j].used = 0;
1592 brp_list[brp_j].value = 0;
1593 brp_list[brp_j].control = 0;
1594 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1595 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1596 brp_list[brp_j].control);
1597 if (retval != ERROR_OK)
1598 return retval;
1599 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1600 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1601 brp_list[brp_j].value);
1602 if (retval != ERROR_OK)
1603 return retval;
1604 breakpoint->linked_BRP = 0;
1605 breakpoint->set = 0;
1606 return ERROR_OK;
1608 } else {
1609 int brp_i = breakpoint->set - 1;
1610 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num)) {
1611 LOG_DEBUG("Invalid BRP number in breakpoint");
1612 return ERROR_OK;
1614 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1615 brp_list[brp_i].control, brp_list[brp_i].value);
1616 brp_list[brp_i].used = 0;
1617 brp_list[brp_i].value = 0;
1618 brp_list[brp_i].control = 0;
1619 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1620 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1621 brp_list[brp_i].control);
1622 if (retval != ERROR_OK)
1623 return retval;
1624 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1625 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1626 brp_list[brp_i].value);
1627 if (retval != ERROR_OK)
1628 return retval;
1629 breakpoint->set = 0;
1630 return ERROR_OK;
1632 } else {
1633 /* restore original instruction (kept in target endianness) */
1634 if (breakpoint->length == 4) {
1635 retval = target_write_memory(target,
1636 breakpoint->address & 0xFFFFFFFE,
1637 4, 1, breakpoint->orig_instr);
1638 if (retval != ERROR_OK)
1639 return retval;
1640 } else {
1641 retval = target_write_memory(target,
1642 breakpoint->address & 0xFFFFFFFE,
1643 2, 1, breakpoint->orig_instr);
1644 if (retval != ERROR_OK)
1645 return retval;
1648 breakpoint->set = 0;
1650 return ERROR_OK;
1653 static int cortex_a8_add_breakpoint(struct target *target,
1654 struct breakpoint *breakpoint)
1656 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1658 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1)) {
1659 LOG_INFO("no hardware breakpoint available");
1660 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1663 if (breakpoint->type == BKPT_HARD)
1664 cortex_a8->brp_num_available--;
1666 return cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1669 static int cortex_a8_add_context_breakpoint(struct target *target,
1670 struct breakpoint *breakpoint)
1672 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1674 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1)) {
1675 LOG_INFO("no hardware breakpoint available");
1676 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1679 if (breakpoint->type == BKPT_HARD)
1680 cortex_a8->brp_num_available--;
1682 return cortex_a8_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1685 static int cortex_a8_add_hybrid_breakpoint(struct target *target,
1686 struct breakpoint *breakpoint)
1688 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1690 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1)) {
1691 LOG_INFO("no hardware breakpoint available");
1692 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1695 if (breakpoint->type == BKPT_HARD)
1696 cortex_a8->brp_num_available--;
1698 return cortex_a8_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1702 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1704 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1706 #if 0
1707 /* It is perfectly possible to remove breakpoints while the target is running */
1708 if (target->state != TARGET_HALTED) {
1709 LOG_WARNING("target not halted");
1710 return ERROR_TARGET_NOT_HALTED;
1712 #endif
1714 if (breakpoint->set) {
1715 cortex_a8_unset_breakpoint(target, breakpoint);
1716 if (breakpoint->type == BKPT_HARD)
1717 cortex_a8->brp_num_available++;
1721 return ERROR_OK;
1725 * Cortex-A8 Reset functions
1728 static int cortex_a8_assert_reset(struct target *target)
1730 struct armv7a_common *armv7a = target_to_armv7a(target);
1732 LOG_DEBUG(" ");
1734 /* FIXME when halt is requested, make it work somehow... */
1736 /* Issue some kind of warm reset. */
1737 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1738 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1739 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1740 /* REVISIT handle "pulls" cases, if there's
1741 * hardware that needs them to work.
1743 jtag_add_reset(0, 1);
1744 } else {
1745 LOG_ERROR("%s: how to reset?", target_name(target));
1746 return ERROR_FAIL;
1749 /* registers are now invalid */
1750 register_cache_invalidate(armv7a->arm.core_cache);
1752 target->state = TARGET_RESET;
1754 return ERROR_OK;
1757 static int cortex_a8_deassert_reset(struct target *target)
1759 int retval;
1761 LOG_DEBUG(" ");
1763 /* be certain SRST is off */
1764 jtag_add_reset(0, 0);
1766 retval = cortex_a8_poll(target);
1767 if (retval != ERROR_OK)
1768 return retval;
1770 if (target->reset_halt) {
1771 if (target->state != TARGET_HALTED) {
1772 LOG_WARNING("%s: ran after reset and before halt ...",
1773 target_name(target));
1774 retval = target_halt(target);
1775 if (retval != ERROR_OK)
1776 return retval;
1780 return ERROR_OK;
1783 static int cortex_a8_write_apb_ab_memory(struct target *target,
1784 uint32_t address, uint32_t size,
1785 uint32_t count, const uint8_t *buffer)
1787 /* write memory through APB-AP */
1789 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1790 struct armv7a_common *armv7a = target_to_armv7a(target);
1791 struct arm *arm = &armv7a->arm;
1792 struct adiv5_dap *swjdp = armv7a->arm.dap;
1793 int total_bytes = count * size;
1794 int total_u32;
1795 int start_byte = address & 0x3;
1796 int end_byte = (address + total_bytes) & 0x3;
1797 struct reg *reg;
1798 uint32_t dscr;
1799 uint8_t *tmp_buff = NULL;
1801 if (target->state != TARGET_HALTED) {
1802 LOG_WARNING("target not halted");
1803 return ERROR_TARGET_NOT_HALTED;
1806 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1808 /* Mark register R0 as dirty, as it will be used
1809 * for transferring the data.
1810 * It will be restored automatically when exiting
1811 * debug mode
1813 reg = arm_reg_current(arm, 0);
1814 reg->dirty = true;
1816 /* clear any abort */
1817 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1818 if (retval != ERROR_OK)
1819 return retval;
1821 /* This algorithm comes from either :
1822 * Cortex-A8 TRM Example 12-25
1823 * Cortex-R4 TRM Example 11-26
1824 * (slight differences)
1827 /* The algorithm only copies 32 bit words, so the buffer
1828 * should be expanded to include the words at either end.
1829 * The first and last words will be read first to avoid
1830 * corruption if needed.
1832 tmp_buff = (uint8_t *) malloc(total_u32 << 2);
1835 if ((start_byte != 0) && (total_u32 > 1)) {
1836 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1837 * the other bytes in the word.
1839 retval = cortex_a8_read_apb_ab_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1840 if (retval != ERROR_OK)
1841 goto error_free_buff_w;
1844 /* If end of write is not aligned, or the write is less than 4 bytes */
1845 if ((end_byte != 0) ||
1846 ((total_u32 == 1) && (total_bytes != 4))) {
1848 /* Read the last word to avoid corruption during 32 bit write */
1849 int mem_offset = (total_u32-1) << 4;
1850 retval = cortex_a8_read_apb_ab_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1851 if (retval != ERROR_OK)
1852 goto error_free_buff_w;
1855 /* Copy the write buffer over the top of the temporary buffer */
1856 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1858 /* We now have a 32 bit aligned buffer that can be written */
1860 /* Read DSCR */
1861 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1862 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1863 if (retval != ERROR_OK)
1864 goto error_free_buff_w;
1866 /* Set DTR mode to Fast (2) */
1867 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_FAST_MODE;
1868 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1869 armv7a->debug_base + CPUDBG_DSCR, dscr);
1870 if (retval != ERROR_OK)
1871 goto error_free_buff_w;
1873 /* Copy the destination address into R0 */
1874 /* - pend an instruction MRC p14, 0, R0, c5, c0 */
1875 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1876 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
1877 if (retval != ERROR_OK)
1878 goto error_unset_dtr_w;
1879 /* Write address into DTRRX, which triggers previous instruction */
1880 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1881 armv7a->debug_base + CPUDBG_DTRRX, address & (~0x3));
1882 if (retval != ERROR_OK)
1883 goto error_unset_dtr_w;
1885 /* Write the data transfer instruction into the ITR
1886 * (STC p14, c5, [R0], 4)
1888 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1889 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
1890 if (retval != ERROR_OK)
1891 goto error_unset_dtr_w;
1893 /* Do the write */
1894 retval = mem_ap_sel_write_buf_u32_noincr(swjdp, armv7a->debug_ap,
1895 tmp_buff, (total_u32)<<2, armv7a->debug_base + CPUDBG_DTRRX);
1896 if (retval != ERROR_OK)
1897 goto error_unset_dtr_w;
1900 /* Switch DTR mode back to non-blocking (0) */
1901 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1902 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1903 armv7a->debug_base + CPUDBG_DSCR, dscr);
1904 if (retval != ERROR_OK)
1905 goto error_unset_dtr_w;
1907 /* Check for sticky abort flags in the DSCR */
1908 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1909 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1910 if (retval != ERROR_OK)
1911 goto error_free_buff_w;
1912 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
1913 /* Abort occurred - clear it and exit */
1914 LOG_ERROR("abort occurred - dscr = 0x%08x", dscr);
1915 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1916 armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1917 goto error_free_buff_w;
1920 /* Done */
1921 free(tmp_buff);
1922 return ERROR_OK;
1924 error_unset_dtr_w:
1925 /* Unset DTR mode */
1926 mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1927 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1928 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1929 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1930 armv7a->debug_base + CPUDBG_DSCR, dscr);
1931 error_free_buff_w:
1932 LOG_ERROR("error");
1933 free(tmp_buff);
1934 return ERROR_FAIL;
1937 static int cortex_a8_read_apb_ab_memory(struct target *target,
1938 uint32_t address, uint32_t size,
1939 uint32_t count, uint8_t *buffer)
1941 /* read memory through APB-AP */
1943 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1944 struct armv7a_common *armv7a = target_to_armv7a(target);
1945 struct adiv5_dap *swjdp = armv7a->arm.dap;
1946 struct arm *arm = &armv7a->arm;
1947 int total_bytes = count * size;
1948 int total_u32;
1949 int start_byte = address & 0x3;
1950 struct reg *reg;
1951 uint32_t dscr;
1952 uint32_t *tmp_buff;
1953 uint32_t buff32[2];
1954 if (target->state != TARGET_HALTED) {
1955 LOG_WARNING("target not halted");
1956 return ERROR_TARGET_NOT_HALTED;
1959 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1961 /* Due to offset word alignment, the buffer may not have space
1962 * to read the full first and last int32 words,
1963 * hence, malloc space to read into, then copy and align into the buffer.
1965 tmp_buff = malloc(total_u32 * 4);
1966 if (tmp_buff == NULL)
1967 return ERROR_FAIL;
1969 /* Mark register R0 as dirty, as it will be used
1970 * for transferring the data.
1971 * It will be restored automatically when exiting
1972 * debug mode
1974 reg = arm_reg_current(arm, 0);
1975 reg->dirty = true;
1977 /* clear any abort */
1978 retval =
1979 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1980 if (retval != ERROR_OK)
1981 goto error_free_buff_r;
1983 /* Read DSCR */
1984 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1985 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1987 /* This algorithm comes from either :
1988 * Cortex-A8 TRM Example 12-24
1989 * Cortex-R4 TRM Example 11-25
1990 * (slight differences)
1993 /* Set DTR access mode to stall mode b01 */
1994 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_STALL_MODE;
1995 retval += mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1996 armv7a->debug_base + CPUDBG_DSCR, dscr);
1998 /* Write R0 with value 'address' using write procedure for stall mode */
1999 /* - Write the address for read access into DTRRX */
2000 retval += mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2001 armv7a->debug_base + CPUDBG_DTRRX, address & ~0x3);
2002 /* - Copy value from DTRRX to R0 using instruction mrc p14, 0, r0, c5, c0 */
2003 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2006 /* Write the data transfer instruction (ldc p14, c5, [r0],4)
2007 * and the DTR mode setting to fast mode
2008 * in one combined write (since they are adjacent registers)
2010 buff32[0] = ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4);
2011 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_FAST_MODE;
2012 buff32[1] = dscr;
2013 /* group the 2 access CPUDBG_ITR 0x84 and CPUDBG_DSCR 0x88 */
2014 retval += mem_ap_sel_write_buf_u32(swjdp, armv7a->debug_ap, (uint8_t *)buff32, 8,
2015 armv7a->debug_base + CPUDBG_ITR);
2016 if (retval != ERROR_OK)
2017 goto error_unset_dtr_r;
2020 /* The last word needs to be handled separately - read all other words in one go.
2022 if (total_u32 > 1) {
2023 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2024 * Abort flags are sticky, so can be read at end of transactions
2026 * This data is read in aligned to 32 bit boundary, hence may need shifting later.
2028 retval = mem_ap_sel_read_buf_u32_noincr(swjdp, armv7a->debug_ap, (uint8_t *)tmp_buff, (total_u32-1) * 4,
2029 armv7a->debug_base + CPUDBG_DTRTX);
2030 if (retval != ERROR_OK)
2031 goto error_unset_dtr_r;
2034 /* set DTR access mode back to non blocking b00 */
2035 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
2036 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2037 armv7a->debug_base + CPUDBG_DSCR, dscr);
2038 if (retval != ERROR_OK)
2039 goto error_free_buff_r;
2041 /* Wait for the final read instruction to finish */
2042 do {
2043 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2044 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2045 if (retval != ERROR_OK)
2046 goto error_free_buff_r;
2047 } while ((dscr & DSCR_INSTR_COMP) == 0);
2050 /* Check for sticky abort flags in the DSCR */
2051 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2052 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2053 if (retval != ERROR_OK)
2054 goto error_free_buff_r;
2055 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2056 /* Abort occurred - clear it and exit */
2057 LOG_ERROR("abort occurred - dscr = 0x%08x", dscr);
2058 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2059 armv7a->debug_base + CPUDBG_DRCR, 1<<2);
2060 goto error_free_buff_r;
2063 /* Read the last word */
2064 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2065 armv7a->debug_base + CPUDBG_DTRTX, &tmp_buff[total_u32 - 1]);
2066 if (retval != ERROR_OK)
2067 goto error_free_buff_r;
2069 /* Copy and align the data into the output buffer */
2070 memcpy(buffer, (uint8_t *)tmp_buff + start_byte, total_bytes);
2072 free(tmp_buff);
2074 /* Done */
2075 return ERROR_OK;
2078 error_unset_dtr_r:
2079 /* Unset DTR mode */
2080 mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2081 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2082 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
2083 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2084 armv7a->debug_base + CPUDBG_DSCR, dscr);
2085 error_free_buff_r:
2086 LOG_ERROR("error");
2087 free(tmp_buff);
2088 return ERROR_FAIL;
2093 * Cortex-A8 Memory access
2095 * This is same Cortex M3 but we must also use the correct
2096 * ap number for every access.
2099 static int cortex_a8_read_phys_memory(struct target *target,
2100 uint32_t address, uint32_t size,
2101 uint32_t count, uint8_t *buffer)
2103 struct armv7a_common *armv7a = target_to_armv7a(target);
2104 struct adiv5_dap *swjdp = armv7a->arm.dap;
2105 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2106 uint8_t apsel = swjdp->apsel;
2107 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d",
2108 address, size, count);
2110 if (count && buffer) {
2112 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2114 /* read memory through AHB-AP */
2116 switch (size) {
2117 case 4:
2118 retval = mem_ap_sel_read_buf_u32(swjdp, armv7a->memory_ap,
2119 buffer, 4 * count, address);
2120 break;
2121 case 2:
2122 retval = mem_ap_sel_read_buf_u16(swjdp, armv7a->memory_ap,
2123 buffer, 2 * count, address);
2124 break;
2125 case 1:
2126 retval = mem_ap_sel_read_buf_u8(swjdp, armv7a->memory_ap,
2127 buffer, count, address);
2128 break;
2130 } else {
2132 /* read memory through APB-AP */
2133 if (!armv7a->is_armv7r) {
2134 /* disable mmu */
2135 retval = cortex_a8_mmu_modify(target, 0);
2136 if (retval != ERROR_OK)
2137 return retval;
2139 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
2142 return retval;
2145 static int cortex_a8_read_memory(struct target *target, uint32_t address,
2146 uint32_t size, uint32_t count, uint8_t *buffer)
2148 int enabled = 0;
2149 uint32_t virt, phys;
2150 int retval;
2151 struct armv7a_common *armv7a = target_to_armv7a(target);
2152 struct adiv5_dap *swjdp = armv7a->arm.dap;
2153 uint8_t apsel = swjdp->apsel;
2155 /* cortex_a8 handles unaligned memory access */
2156 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
2157 size, count);
2158 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2159 if (!armv7a->is_armv7r) {
2160 retval = cortex_a8_mmu(target, &enabled);
2161 if (retval != ERROR_OK)
2162 return retval;
2165 if (enabled) {
2166 virt = address;
2167 retval = cortex_a8_virt2phys(target, virt, &phys);
2168 if (retval != ERROR_OK)
2169 return retval;
2171 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x",
2172 virt, phys);
2173 address = phys;
2176 retval = cortex_a8_read_phys_memory(target, address, size, count, buffer);
2177 } else {
2178 if (!armv7a->is_armv7r) {
2179 retval = cortex_a8_check_address(target, address);
2180 if (retval != ERROR_OK)
2181 return retval;
2182 /* enable mmu */
2183 retval = cortex_a8_mmu_modify(target, 1);
2184 if (retval != ERROR_OK)
2185 return retval;
2187 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
2189 return retval;
2192 static int cortex_a8_write_phys_memory(struct target *target,
2193 uint32_t address, uint32_t size,
2194 uint32_t count, const uint8_t *buffer)
2196 struct armv7a_common *armv7a = target_to_armv7a(target);
2197 struct adiv5_dap *swjdp = armv7a->arm.dap;
2198 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2199 uint8_t apsel = swjdp->apsel;
2201 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address,
2202 size, count);
2204 if (count && buffer) {
2206 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2208 /* write memory through AHB-AP */
2210 switch (size) {
2211 case 4:
2212 retval = mem_ap_sel_write_buf_u32(swjdp, armv7a->memory_ap,
2213 buffer, 4 * count, address);
2214 break;
2215 case 2:
2216 retval = mem_ap_sel_write_buf_u16(swjdp, armv7a->memory_ap,
2217 buffer, 2 * count, address);
2218 break;
2219 case 1:
2220 retval = mem_ap_sel_write_buf_u8(swjdp, armv7a->memory_ap,
2221 buffer, count, address);
2222 break;
2225 } else {
2227 /* write memory through APB-AP */
2228 if (!armv7a->is_armv7r) {
2229 retval = cortex_a8_mmu_modify(target, 0);
2230 if (retval != ERROR_OK)
2231 return retval;
2233 return cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
2238 /* REVISIT this op is generic ARMv7-A/R stuff */
2239 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2240 struct arm_dpm *dpm = armv7a->arm.dpm;
2242 retval = dpm->prepare(dpm);
2243 if (retval != ERROR_OK)
2244 return retval;
2246 /* The Cache handling will NOT work with MMU active, the
2247 * wrong addresses will be invalidated!
2249 * For both ICache and DCache, walk all cache lines in the
2250 * address range. Cortex-A8 has fixed 64 byte line length.
2252 * REVISIT per ARMv7, these may trigger watchpoints ...
2255 /* invalidate I-Cache */
2256 if (armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled) {
2257 /* ICIMVAU - Invalidate Cache single entry
2258 * with MVA to PoU
2259 * MCR p15, 0, r0, c7, c5, 1
2261 for (uint32_t cacheline = address;
2262 cacheline < address + size * count;
2263 cacheline += 64) {
2264 retval = dpm->instr_write_data_r0(dpm,
2265 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2266 cacheline);
2267 if (retval != ERROR_OK)
2268 return retval;
2272 /* invalidate D-Cache */
2273 if (armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled) {
2274 /* DCIMVAC - Invalidate data Cache line
2275 * with MVA to PoC
2276 * MCR p15, 0, r0, c7, c6, 1
2278 for (uint32_t cacheline = address;
2279 cacheline < address + size * count;
2280 cacheline += 64) {
2281 retval = dpm->instr_write_data_r0(dpm,
2282 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2283 cacheline);
2284 if (retval != ERROR_OK)
2285 return retval;
2289 /* (void) */ dpm->finish(dpm);
2292 return retval;
2295 static int cortex_a8_write_memory(struct target *target, uint32_t address,
2296 uint32_t size, uint32_t count, const uint8_t *buffer)
2298 int enabled = 0;
2299 uint32_t virt, phys;
2300 int retval;
2301 struct armv7a_common *armv7a = target_to_armv7a(target);
2302 struct adiv5_dap *swjdp = armv7a->arm.dap;
2303 uint8_t apsel = swjdp->apsel;
2304 /* cortex_a8 handles unaligned memory access */
2305 LOG_DEBUG("Writing memory at address 0x%x; size %d; count %d", address,
2306 size, count);
2307 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2309 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size,
2310 count);
2311 if (!armv7a->is_armv7r) {
2312 retval = cortex_a8_mmu(target, &enabled);
2313 if (retval != ERROR_OK)
2314 return retval;
2316 if (enabled) {
2317 virt = address;
2318 retval = cortex_a8_virt2phys(target, virt, &phys);
2319 if (retval != ERROR_OK)
2320 return retval;
2321 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x",
2322 virt,
2323 phys);
2324 address = phys;
2328 retval = cortex_a8_write_phys_memory(target, address, size,
2329 count, buffer);
2330 } else {
2331 if (!armv7a->is_armv7r) {
2332 retval = cortex_a8_check_address(target, address);
2333 if (retval != ERROR_OK)
2334 return retval;
2335 /* enable mmu */
2336 retval = cortex_a8_mmu_modify(target, 1);
2337 if (retval != ERROR_OK)
2338 return retval;
2340 retval = cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
2342 return retval;
2345 static int cortex_a8_handle_target_request(void *priv)
2347 struct target *target = priv;
2348 struct armv7a_common *armv7a = target_to_armv7a(target);
2349 struct adiv5_dap *swjdp = armv7a->arm.dap;
2350 int retval;
2352 if (!target_was_examined(target))
2353 return ERROR_OK;
2354 if (!target->dbg_msg_enabled)
2355 return ERROR_OK;
2357 if (target->state == TARGET_RUNNING) {
2358 uint32_t request;
2359 uint32_t dscr;
2360 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2361 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2363 /* check if we have data */
2364 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2365 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2366 armv7a->debug_base + CPUDBG_DTRTX, &request);
2367 if (retval == ERROR_OK) {
2368 target_request(target, request);
2369 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2370 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2375 return ERROR_OK;
2379 * Cortex-A8 target information and configuration
2382 static int cortex_a8_examine_first(struct target *target)
2384 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2385 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2386 struct adiv5_dap *swjdp = armv7a->arm.dap;
2387 int i;
2388 int retval = ERROR_OK;
2389 uint32_t didr, ctypr, ttypr, cpuid;
2391 /* We do one extra read to ensure DAP is configured,
2392 * we call ahbap_debugport_init(swjdp) instead
2394 retval = ahbap_debugport_init(swjdp);
2395 if (retval != ERROR_OK)
2396 return retval;
2398 /* Search for the APB-AB - it is needed for access to debug registers */
2399 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2400 if (retval != ERROR_OK) {
2401 LOG_ERROR("Could not find APB-AP for debug access");
2402 return retval;
2404 /* Search for the AHB-AB */
2405 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
2406 if (retval != ERROR_OK) {
2407 /* AHB-AP not found - use APB-AP */
2408 LOG_DEBUG("Could not find AHB-AP - using APB-AP for memory access");
2409 armv7a->memory_ap_available = false;
2410 } else {
2411 armv7a->memory_ap_available = true;
2415 if (!target->dbgbase_set) {
2416 uint32_t dbgbase;
2417 /* Get ROM Table base */
2418 uint32_t apid;
2419 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
2420 if (retval != ERROR_OK)
2421 return retval;
2422 /* Lookup 0x15 -- Processor DAP */
2423 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
2424 &armv7a->debug_base);
2425 if (retval != ERROR_OK)
2426 return retval;
2427 } else
2428 armv7a->debug_base = target->dbgbase;
2430 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2431 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2432 if (retval != ERROR_OK)
2433 return retval;
2435 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2436 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2437 if (retval != ERROR_OK) {
2438 LOG_DEBUG("Examine %s failed", "CPUID");
2439 return retval;
2442 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2443 armv7a->debug_base + CPUDBG_CTYPR, &ctypr);
2444 if (retval != ERROR_OK) {
2445 LOG_DEBUG("Examine %s failed", "CTYPR");
2446 return retval;
2449 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2450 armv7a->debug_base + CPUDBG_TTYPR, &ttypr);
2451 if (retval != ERROR_OK) {
2452 LOG_DEBUG("Examine %s failed", "TTYPR");
2453 return retval;
2456 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2457 armv7a->debug_base + CPUDBG_DIDR, &didr);
2458 if (retval != ERROR_OK) {
2459 LOG_DEBUG("Examine %s failed", "DIDR");
2460 return retval;
2463 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2464 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2465 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2466 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2468 armv7a->arm.core_type = ARM_MODE_MON;
2469 retval = cortex_a8_dpm_setup(cortex_a8, didr);
2470 if (retval != ERROR_OK)
2471 return retval;
2473 /* Setup Breakpoint Register Pairs */
2474 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
2475 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2476 cortex_a8->brp_num_available = cortex_a8->brp_num;
2477 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
2478 /* cortex_a8->brb_enabled = ????; */
2479 for (i = 0; i < cortex_a8->brp_num; i++) {
2480 cortex_a8->brp_list[i].used = 0;
2481 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
2482 cortex_a8->brp_list[i].type = BRP_NORMAL;
2483 else
2484 cortex_a8->brp_list[i].type = BRP_CONTEXT;
2485 cortex_a8->brp_list[i].value = 0;
2486 cortex_a8->brp_list[i].control = 0;
2487 cortex_a8->brp_list[i].BRPn = i;
2490 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
2492 target_set_examined(target);
2493 return ERROR_OK;
2496 static int cortex_a8_examine(struct target *target)
2498 int retval = ERROR_OK;
2500 /* don't re-probe hardware after each reset */
2501 if (!target_was_examined(target))
2502 retval = cortex_a8_examine_first(target);
2504 /* Configure core debug access */
2505 if (retval == ERROR_OK)
2506 retval = cortex_a8_init_debug_access(target);
2508 return retval;
2512 * Cortex-A8 target creation and initialization
2515 static int cortex_a8_init_target(struct command_context *cmd_ctx,
2516 struct target *target)
2518 /* examine_first() does a bunch of this */
2519 return ERROR_OK;
2522 static int cortex_a8_init_arch_info(struct target *target,
2523 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
2525 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2526 struct adiv5_dap *dap = &armv7a->dap;
2528 armv7a->arm.dap = dap;
2530 /* Setup struct cortex_a8_common */
2531 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
2532 /* tap has no dap initialized */
2533 if (!tap->dap) {
2534 armv7a->arm.dap = dap;
2535 /* Setup struct cortex_a8_common */
2537 /* prepare JTAG information for the new target */
2538 cortex_a8->jtag_info.tap = tap;
2539 cortex_a8->jtag_info.scann_size = 4;
2541 /* Leave (only) generic DAP stuff for debugport_init() */
2542 dap->jtag_info = &cortex_a8->jtag_info;
2544 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
2545 dap->tar_autoincr_block = (1 << 10);
2546 dap->memaccess_tck = 80;
2547 tap->dap = dap;
2548 } else
2549 armv7a->arm.dap = tap->dap;
2551 cortex_a8->fast_reg_read = 0;
2553 /* register arch-specific functions */
2554 armv7a->examine_debug_reason = NULL;
2556 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
2558 armv7a->pre_restore_context = NULL;
2560 armv7a->armv7a_mmu.read_physical_memory = cortex_a8_read_phys_memory;
2563 /* arm7_9->handle_target_request = cortex_a8_handle_target_request; */
2565 /* REVISIT v7a setup should be in a v7a-specific routine */
2566 armv7a_init_arch_info(target, armv7a);
2567 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
2569 return ERROR_OK;
2572 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
2574 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
2576 cortex_a8->armv7a_common.is_armv7r = false;
2578 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
2581 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
2583 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
2585 cortex_a8->armv7a_common.is_armv7r = true;
2587 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
2591 static int cortex_a8_mmu(struct target *target, int *enabled)
2593 if (target->state != TARGET_HALTED) {
2594 LOG_ERROR("%s: target not halted", __func__);
2595 return ERROR_TARGET_INVALID;
2598 *enabled = target_to_cortex_a8(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2599 return ERROR_OK;
2602 static int cortex_a8_virt2phys(struct target *target,
2603 uint32_t virt, uint32_t *phys)
2605 int retval = ERROR_FAIL;
2606 struct armv7a_common *armv7a = target_to_armv7a(target);
2607 struct adiv5_dap *swjdp = armv7a->arm.dap;
2608 uint8_t apsel = swjdp->apsel;
2609 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2610 uint32_t ret;
2611 retval = armv7a_mmu_translate_va(target,
2612 virt, &ret);
2613 if (retval != ERROR_OK)
2614 goto done;
2615 *phys = ret;
2616 } else {/* use this method if armv7a->memory_ap not selected
2617 * mmu must be enable in order to get a correct translation */
2618 retval = cortex_a8_mmu_modify(target, 1);
2619 if (retval != ERROR_OK)
2620 goto done;
2621 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
2623 done:
2624 return retval;
2627 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
2629 struct target *target = get_current_target(CMD_CTX);
2630 struct armv7a_common *armv7a = target_to_armv7a(target);
2632 return armv7a_handle_cache_info_command(CMD_CTX,
2633 &armv7a->armv7a_mmu.armv7a_cache);
2637 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
2639 struct target *target = get_current_target(CMD_CTX);
2640 if (!target_was_examined(target)) {
2641 LOG_ERROR("target not examined yet");
2642 return ERROR_FAIL;
2645 return cortex_a8_init_debug_access(target);
2647 COMMAND_HANDLER(cortex_a8_handle_smp_off_command)
2649 struct target *target = get_current_target(CMD_CTX);
2650 /* check target is an smp target */
2651 struct target_list *head;
2652 struct target *curr;
2653 head = target->head;
2654 target->smp = 0;
2655 if (head != (struct target_list *)NULL) {
2656 while (head != (struct target_list *)NULL) {
2657 curr = head->target;
2658 curr->smp = 0;
2659 head = head->next;
2661 /* fixes the target display to the debugger */
2662 target->gdb_service->target = target;
2664 return ERROR_OK;
2667 COMMAND_HANDLER(cortex_a8_handle_smp_on_command)
2669 struct target *target = get_current_target(CMD_CTX);
2670 struct target_list *head;
2671 struct target *curr;
2672 head = target->head;
2673 if (head != (struct target_list *)NULL) {
2674 target->smp = 1;
2675 while (head != (struct target_list *)NULL) {
2676 curr = head->target;
2677 curr->smp = 1;
2678 head = head->next;
2681 return ERROR_OK;
2684 COMMAND_HANDLER(cortex_a8_handle_smp_gdb_command)
2686 struct target *target = get_current_target(CMD_CTX);
2687 int retval = ERROR_OK;
2688 struct target_list *head;
2689 head = target->head;
2690 if (head != (struct target_list *)NULL) {
2691 if (CMD_ARGC == 1) {
2692 int coreid = 0;
2693 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2694 if (ERROR_OK != retval)
2695 return retval;
2696 target->gdb_service->core[1] = coreid;
2699 command_print(CMD_CTX, "gdb coreid %d -> %d", target->gdb_service->core[0]
2700 , target->gdb_service->core[1]);
2702 return ERROR_OK;
2705 static const struct command_registration cortex_a8_exec_command_handlers[] = {
2707 .name = "cache_info",
2708 .handler = cortex_a8_handle_cache_info_command,
2709 .mode = COMMAND_EXEC,
2710 .help = "display information about target caches",
2711 .usage = "",
2714 .name = "dbginit",
2715 .handler = cortex_a8_handle_dbginit_command,
2716 .mode = COMMAND_EXEC,
2717 .help = "Initialize core debug",
2718 .usage = "",
2720 { .name = "smp_off",
2721 .handler = cortex_a8_handle_smp_off_command,
2722 .mode = COMMAND_EXEC,
2723 .help = "Stop smp handling",
2724 .usage = "",},
2726 .name = "smp_on",
2727 .handler = cortex_a8_handle_smp_on_command,
2728 .mode = COMMAND_EXEC,
2729 .help = "Restart smp handling",
2730 .usage = "",
2733 .name = "smp_gdb",
2734 .handler = cortex_a8_handle_smp_gdb_command,
2735 .mode = COMMAND_EXEC,
2736 .help = "display/fix current core played to gdb",
2737 .usage = "",
2741 COMMAND_REGISTRATION_DONE
2743 static const struct command_registration cortex_a8_command_handlers[] = {
2745 .chain = arm_command_handlers,
2748 .chain = armv7a_command_handlers,
2751 .name = "cortex_a",
2752 .mode = COMMAND_ANY,
2753 .help = "Cortex-A command group",
2754 .usage = "",
2755 .chain = cortex_a8_exec_command_handlers,
2757 COMMAND_REGISTRATION_DONE
2760 struct target_type cortexa8_target = {
2761 .name = "cortex_a",
2762 .deprecated_name = "cortex_a8",
2764 .poll = cortex_a8_poll,
2765 .arch_state = armv7a_arch_state,
2767 .target_request_data = NULL,
2769 .halt = cortex_a8_halt,
2770 .resume = cortex_a8_resume,
2771 .step = cortex_a8_step,
2773 .assert_reset = cortex_a8_assert_reset,
2774 .deassert_reset = cortex_a8_deassert_reset,
2775 .soft_reset_halt = NULL,
2777 /* REVISIT allow exporting VFP3 registers ... */
2778 .get_gdb_reg_list = arm_get_gdb_reg_list,
2780 .read_memory = cortex_a8_read_memory,
2781 .write_memory = cortex_a8_write_memory,
2783 .checksum_memory = arm_checksum_memory,
2784 .blank_check_memory = arm_blank_check_memory,
2786 .run_algorithm = armv4_5_run_algorithm,
2788 .add_breakpoint = cortex_a8_add_breakpoint,
2789 .add_context_breakpoint = cortex_a8_add_context_breakpoint,
2790 .add_hybrid_breakpoint = cortex_a8_add_hybrid_breakpoint,
2791 .remove_breakpoint = cortex_a8_remove_breakpoint,
2792 .add_watchpoint = NULL,
2793 .remove_watchpoint = NULL,
2795 .commands = cortex_a8_command_handlers,
2796 .target_create = cortex_a8_target_create,
2797 .init_target = cortex_a8_init_target,
2798 .examine = cortex_a8_examine,
2800 .read_phys_memory = cortex_a8_read_phys_memory,
2801 .write_phys_memory = cortex_a8_write_phys_memory,
2802 .mmu = cortex_a8_mmu,
2803 .virt2phys = cortex_a8_virt2phys,
2806 static const struct command_registration cortex_r4_exec_command_handlers[] = {
2808 .name = "cache_info",
2809 .handler = cortex_a8_handle_cache_info_command,
2810 .mode = COMMAND_EXEC,
2811 .help = "display information about target caches",
2812 .usage = "",
2815 .name = "dbginit",
2816 .handler = cortex_a8_handle_dbginit_command,
2817 .mode = COMMAND_EXEC,
2818 .help = "Initialize core debug",
2819 .usage = "",
2822 COMMAND_REGISTRATION_DONE
2824 static const struct command_registration cortex_r4_command_handlers[] = {
2826 .chain = arm_command_handlers,
2829 .chain = armv7a_command_handlers,
2832 .name = "cortex_r4",
2833 .mode = COMMAND_ANY,
2834 .help = "Cortex-R4 command group",
2835 .usage = "",
2836 .chain = cortex_r4_exec_command_handlers,
2838 COMMAND_REGISTRATION_DONE
2841 struct target_type cortexr4_target = {
2842 .name = "cortex_r4",
2844 .poll = cortex_a8_poll,
2845 .arch_state = armv7a_arch_state,
2847 .target_request_data = NULL,
2849 .halt = cortex_a8_halt,
2850 .resume = cortex_a8_resume,
2851 .step = cortex_a8_step,
2853 .assert_reset = cortex_a8_assert_reset,
2854 .deassert_reset = cortex_a8_deassert_reset,
2855 .soft_reset_halt = NULL,
2857 /* REVISIT allow exporting VFP3 registers ... */
2858 .get_gdb_reg_list = arm_get_gdb_reg_list,
2860 .read_memory = cortex_a8_read_memory,
2861 .write_memory = cortex_a8_write_memory,
2863 .checksum_memory = arm_checksum_memory,
2864 .blank_check_memory = arm_blank_check_memory,
2866 .run_algorithm = armv4_5_run_algorithm,
2868 .add_breakpoint = cortex_a8_add_breakpoint,
2869 .add_context_breakpoint = cortex_a8_add_context_breakpoint,
2870 .add_hybrid_breakpoint = cortex_a8_add_hybrid_breakpoint,
2871 .remove_breakpoint = cortex_a8_remove_breakpoint,
2872 .add_watchpoint = NULL,
2873 .remove_watchpoint = NULL,
2875 .commands = cortex_r4_command_handlers,
2876 .target_create = cortex_r4_target_create,
2877 .init_target = cortex_a8_init_target,
2878 .examine = cortex_a8_examine,