cortex a8: add error propagation for poll/resume
[openocd/cortex.git] / src / target / cortex_a8.c
blobee79d63f793c95c3eac2a1fb0951ba40244676f9
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * This program is free software; you can redistribute it and/or modify *
15 * it under the terms of the GNU General Public License as published by *
16 * the Free Software Foundation; either version 2 of the License, or *
17 * (at your option) any later version. *
18 * *
19 * This program is distributed in the hope that it will be useful, *
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
22 * GNU General Public License for more details. *
23 * *
24 * You should have received a copy of the GNU General Public License *
25 * along with this program; if not, write to the *
26 * Free Software Foundation, Inc., *
27 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
28 * *
29 * Cortex-A8(tm) TRM, ARM DDI 0344H *
30 * *
31 ***************************************************************************/
32 #ifdef HAVE_CONFIG_H
33 #include "config.h"
34 #endif
36 #include "breakpoints.h"
37 #include "cortex_a8.h"
38 #include "register.h"
39 #include "target_request.h"
40 #include "target_type.h"
41 #include "arm_opcodes.h"
42 #include <helper/time_support.h>
44 static int cortex_a8_poll(struct target *target);
45 static int cortex_a8_debug_entry(struct target *target);
46 static int cortex_a8_restore_context(struct target *target, bool bpwp);
47 static int cortex_a8_set_breakpoint(struct target *target,
48 struct breakpoint *breakpoint, uint8_t matchmode);
49 static int cortex_a8_unset_breakpoint(struct target *target,
50 struct breakpoint *breakpoint);
51 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
52 uint32_t *value, int regnum);
53 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
54 uint32_t value, int regnum);
55 static int cortex_a8_mmu(struct target *target, int *enabled);
56 static int cortex_a8_virt2phys(struct target *target,
57 uint32_t virt, uint32_t *phys);
58 static void cortex_a8_disable_mmu_caches(struct target *target, int mmu,
59 int d_u_cache, int i_cache);
60 static void cortex_a8_enable_mmu_caches(struct target *target, int mmu,
61 int d_u_cache, int i_cache);
62 static uint32_t cortex_a8_get_ttb(struct target *target);
66 * FIXME do topology discovery using the ROM; don't
67 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
68 * cores, with different AP numbering ... don't use a #define
69 * for these numbers, use per-core armv7a state.
71 #define swjdp_memoryap 0
72 #define swjdp_debugap 1
73 #define OMAP3530_DEBUG_BASE 0x54011000
76 * Cortex-A8 Basic debug access, very low level assumes state is saved
78 static int cortex_a8_init_debug_access(struct target *target)
80 struct armv7a_common *armv7a = target_to_armv7a(target);
81 struct adiv5_dap *swjdp = &armv7a->dap;
83 int retval;
84 uint32_t dummy;
86 LOG_DEBUG(" ");
88 /* Unlocking the debug registers for modification */
89 /* The debugport might be uninitialised so try twice */
90 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
91 if (retval != ERROR_OK)
93 /* try again */
94 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
96 if (retval != ERROR_OK)
97 return retval;
98 /* Clear Sticky Power Down status Bit in PRSR to enable access to
99 the registers in the Core Power Domain */
100 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_PRSR, &dummy);
101 if (retval != ERROR_OK)
102 return retval;
104 /* Enabling of instruction execution in debug mode is done in debug_entry code */
106 /* Resync breakpoint registers */
108 /* Since this is likley called from init or reset, update targtet state information*/
109 retval = cortex_a8_poll(target);
111 return retval;
114 /* To reduce needless round-trips, pass in a pointer to the current
115 * DSCR value. Initialize it to zero if you just need to know the
116 * value on return from this function; or DSCR_INSTR_COMP if you
117 * happen to know that no instruction is pending.
119 static int cortex_a8_exec_opcode(struct target *target,
120 uint32_t opcode, uint32_t *dscr_p)
122 uint32_t dscr;
123 int retval;
124 struct armv7a_common *armv7a = target_to_armv7a(target);
125 struct adiv5_dap *swjdp = &armv7a->dap;
127 dscr = dscr_p ? *dscr_p : 0;
129 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
131 /* Wait for InstrCompl bit to be set */
132 while ((dscr & DSCR_INSTR_COMP) == 0)
134 retval = mem_ap_read_atomic_u32(swjdp,
135 armv7a->debug_base + CPUDBG_DSCR, &dscr);
136 if (retval != ERROR_OK)
138 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
139 return retval;
143 mem_ap_write_u32(swjdp, armv7a->debug_base + CPUDBG_ITR, opcode);
147 retval = mem_ap_read_atomic_u32(swjdp,
148 armv7a->debug_base + CPUDBG_DSCR, &dscr);
149 if (retval != ERROR_OK)
151 LOG_ERROR("Could not read DSCR register");
152 return retval;
155 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
157 if (dscr_p)
158 *dscr_p = dscr;
160 return retval;
163 /**************************************************************************
164 Read core register with very few exec_opcode, fast but needs work_area.
165 This can cause problems with MMU active.
166 **************************************************************************/
167 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
168 uint32_t * regfile)
170 int retval = ERROR_OK;
171 struct armv7a_common *armv7a = target_to_armv7a(target);
172 struct adiv5_dap *swjdp = &armv7a->dap;
174 cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
175 cortex_a8_dap_write_coreregister_u32(target, address, 0);
176 cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
177 dap_ap_select(swjdp, swjdp_memoryap);
178 mem_ap_read_buf_u32(swjdp, (uint8_t *)(&regfile[1]), 4*15, address);
179 dap_ap_select(swjdp, swjdp_debugap);
181 return retval;
184 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
185 uint32_t *value, int regnum)
187 int retval = ERROR_OK;
188 uint8_t reg = regnum&0xFF;
189 uint32_t dscr = 0;
190 struct armv7a_common *armv7a = target_to_armv7a(target);
191 struct adiv5_dap *swjdp = &armv7a->dap;
193 if (reg > 17)
194 return retval;
196 if (reg < 15)
198 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
199 cortex_a8_exec_opcode(target,
200 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
201 &dscr);
203 else if (reg == 15)
205 /* "MOV r0, r15"; then move r0 to DCCTX */
206 cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
207 cortex_a8_exec_opcode(target,
208 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
209 &dscr);
211 else
213 /* "MRS r0, CPSR" or "MRS r0, SPSR"
214 * then move r0 to DCCTX
216 cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
217 cortex_a8_exec_opcode(target,
218 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
219 &dscr);
222 /* Wait for DTRRXfull then read DTRRTX */
223 while ((dscr & DSCR_DTR_TX_FULL) == 0)
225 retval = mem_ap_read_atomic_u32(swjdp,
226 armv7a->debug_base + CPUDBG_DSCR, &dscr);
229 retval = mem_ap_read_atomic_u32(swjdp,
230 armv7a->debug_base + CPUDBG_DTRTX, value);
231 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
233 return retval;
236 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
237 uint32_t value, int regnum)
239 int retval = ERROR_OK;
240 uint8_t Rd = regnum&0xFF;
241 uint32_t dscr;
242 struct armv7a_common *armv7a = target_to_armv7a(target);
243 struct adiv5_dap *swjdp = &armv7a->dap;
245 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
247 /* Check that DCCRX is not full */
248 retval = mem_ap_read_atomic_u32(swjdp,
249 armv7a->debug_base + CPUDBG_DSCR, &dscr);
250 if (dscr & DSCR_DTR_RX_FULL)
252 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
253 /* Clear DCCRX with MCR(p14, 0, Rd, c0, c5, 0), opcode 0xEE000E15 */
254 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
255 &dscr);
258 if (Rd > 17)
259 return retval;
261 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
262 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
263 retval = mem_ap_write_u32(swjdp,
264 armv7a->debug_base + CPUDBG_DTRRX, value);
266 if (Rd < 15)
268 /* DCCRX to Rn, "MCR p14, 0, Rn, c0, c5, 0", 0xEE00nE15 */
269 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
270 &dscr);
272 else if (Rd == 15)
274 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
275 * then "mov r15, r0"
277 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
278 &dscr);
279 cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
281 else
283 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
284 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
286 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
287 &dscr);
288 cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
289 &dscr);
291 /* "Prefetch flush" after modifying execution status in CPSR */
292 if (Rd == 16)
293 cortex_a8_exec_opcode(target,
294 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
295 &dscr);
298 return retval;
301 /* Write to memory mapped registers directly with no cache or mmu handling */
302 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
304 int retval;
305 struct armv7a_common *armv7a = target_to_armv7a(target);
306 struct adiv5_dap *swjdp = &armv7a->dap;
308 retval = mem_ap_write_atomic_u32(swjdp, address, value);
310 return retval;
314 * Cortex-A8 implementation of Debug Programmer's Model
316 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
317 * so there's no need to poll for it before executing an instruction.
319 * NOTE that in several of these cases the "stall" mode might be useful.
320 * It'd let us queue a few operations together... prepare/finish might
321 * be the places to enable/disable that mode.
324 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
326 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
329 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
331 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
332 return mem_ap_write_u32(&a8->armv7a_common.dap,
333 a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
336 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
337 uint32_t *dscr_p)
339 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
340 uint32_t dscr = DSCR_INSTR_COMP;
341 int retval;
343 if (dscr_p)
344 dscr = *dscr_p;
346 /* Wait for DTRRXfull */
347 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
348 retval = mem_ap_read_atomic_u32(swjdp,
349 a8->armv7a_common.debug_base + CPUDBG_DSCR,
350 &dscr);
353 retval = mem_ap_read_atomic_u32(swjdp,
354 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
355 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
357 if (dscr_p)
358 *dscr_p = dscr;
360 return retval;
363 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
365 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
366 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
367 uint32_t dscr;
368 int retval;
370 /* set up invariant: INSTR_COMP is set after ever DPM operation */
371 long long then = timeval_ms();
372 for (;;)
374 retval = mem_ap_read_atomic_u32(swjdp,
375 a8->armv7a_common.debug_base + CPUDBG_DSCR,
376 &dscr);
377 if (retval != ERROR_OK)
378 return retval;
379 if ((dscr & DSCR_INSTR_COMP) != 0)
380 break;
381 if (timeval_ms() > then + 1000)
383 LOG_ERROR("Timeout waiting for dpm prepare");
384 return ERROR_FAIL;
388 /* this "should never happen" ... */
389 if (dscr & DSCR_DTR_RX_FULL) {
390 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
391 /* Clear DCCRX */
392 retval = cortex_a8_exec_opcode(
393 a8->armv7a_common.armv4_5_common.target,
394 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
395 &dscr);
398 return retval;
401 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
403 /* REVISIT what could be done here? */
404 return ERROR_OK;
407 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
408 uint32_t opcode, uint32_t data)
410 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
411 int retval;
412 uint32_t dscr = DSCR_INSTR_COMP;
414 retval = cortex_a8_write_dcc(a8, data);
416 return cortex_a8_exec_opcode(
417 a8->armv7a_common.armv4_5_common.target,
418 opcode,
419 &dscr);
422 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
423 uint32_t opcode, uint32_t data)
425 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
426 uint32_t dscr = DSCR_INSTR_COMP;
427 int retval;
429 retval = cortex_a8_write_dcc(a8, data);
431 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
432 retval = cortex_a8_exec_opcode(
433 a8->armv7a_common.armv4_5_common.target,
434 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
435 &dscr);
437 /* then the opcode, taking data from R0 */
438 retval = cortex_a8_exec_opcode(
439 a8->armv7a_common.armv4_5_common.target,
440 opcode,
441 &dscr);
443 return retval;
446 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
448 struct target *target = dpm->arm->target;
449 uint32_t dscr = DSCR_INSTR_COMP;
451 /* "Prefetch flush" after modifying execution status in CPSR */
452 return cortex_a8_exec_opcode(target,
453 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
454 &dscr);
457 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
458 uint32_t opcode, uint32_t *data)
460 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
461 int retval;
462 uint32_t dscr = DSCR_INSTR_COMP;
464 /* the opcode, writing data to DCC */
465 retval = cortex_a8_exec_opcode(
466 a8->armv7a_common.armv4_5_common.target,
467 opcode,
468 &dscr);
470 return cortex_a8_read_dcc(a8, data, &dscr);
474 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
475 uint32_t opcode, uint32_t *data)
477 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
478 uint32_t dscr = DSCR_INSTR_COMP;
479 int retval;
481 /* the opcode, writing data to R0 */
482 retval = cortex_a8_exec_opcode(
483 a8->armv7a_common.armv4_5_common.target,
484 opcode,
485 &dscr);
487 /* write R0 to DCC */
488 retval = cortex_a8_exec_opcode(
489 a8->armv7a_common.armv4_5_common.target,
490 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
491 &dscr);
493 return cortex_a8_read_dcc(a8, data, &dscr);
496 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
497 uint32_t addr, uint32_t control)
499 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
500 uint32_t vr = a8->armv7a_common.debug_base;
501 uint32_t cr = a8->armv7a_common.debug_base;
502 int retval;
504 switch (index_t) {
505 case 0 ... 15: /* breakpoints */
506 vr += CPUDBG_BVR_BASE;
507 cr += CPUDBG_BCR_BASE;
508 break;
509 case 16 ... 31: /* watchpoints */
510 vr += CPUDBG_WVR_BASE;
511 cr += CPUDBG_WCR_BASE;
512 index_t -= 16;
513 break;
514 default:
515 return ERROR_FAIL;
517 vr += 4 * index_t;
518 cr += 4 * index_t;
520 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
521 (unsigned) vr, (unsigned) cr);
523 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
524 vr, addr);
525 if (retval != ERROR_OK)
526 return retval;
527 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
528 cr, control);
529 return retval;
532 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
534 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
535 uint32_t cr;
537 switch (index_t) {
538 case 0 ... 15:
539 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
540 break;
541 case 16 ... 31:
542 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
543 index_t -= 16;
544 break;
545 default:
546 return ERROR_FAIL;
548 cr += 4 * index_t;
550 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
552 /* clear control register */
553 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
556 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
558 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
559 int retval;
561 dpm->arm = &a8->armv7a_common.armv4_5_common;
562 dpm->didr = didr;
564 dpm->prepare = cortex_a8_dpm_prepare;
565 dpm->finish = cortex_a8_dpm_finish;
567 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
568 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
569 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
571 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
572 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
574 dpm->bpwp_enable = cortex_a8_bpwp_enable;
575 dpm->bpwp_disable = cortex_a8_bpwp_disable;
577 retval = arm_dpm_setup(dpm);
578 if (retval == ERROR_OK)
579 retval = arm_dpm_initialize(dpm);
581 return retval;
586 * Cortex-A8 Run control
589 static int cortex_a8_poll(struct target *target)
591 int retval = ERROR_OK;
592 uint32_t dscr;
593 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
594 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
595 struct adiv5_dap *swjdp = &armv7a->dap;
596 enum target_state prev_target_state = target->state;
597 uint8_t saved_apsel = dap_ap_get_select(swjdp);
599 dap_ap_select(swjdp, swjdp_debugap);
600 retval = mem_ap_read_atomic_u32(swjdp,
601 armv7a->debug_base + CPUDBG_DSCR, &dscr);
602 if (retval != ERROR_OK)
604 dap_ap_select(swjdp, saved_apsel);
605 return retval;
607 cortex_a8->cpudbg_dscr = dscr;
609 if ((dscr & 0x3) == 0x3)
611 if (prev_target_state != TARGET_HALTED)
613 /* We have a halting debug event */
614 LOG_DEBUG("Target halted");
615 target->state = TARGET_HALTED;
616 if ((prev_target_state == TARGET_RUNNING)
617 || (prev_target_state == TARGET_RESET))
619 retval = cortex_a8_debug_entry(target);
620 if (retval != ERROR_OK)
621 return retval;
623 target_call_event_callbacks(target,
624 TARGET_EVENT_HALTED);
626 if (prev_target_state == TARGET_DEBUG_RUNNING)
628 LOG_DEBUG(" ");
630 retval = cortex_a8_debug_entry(target);
631 if (retval != ERROR_OK)
632 return retval;
634 target_call_event_callbacks(target,
635 TARGET_EVENT_DEBUG_HALTED);
639 else if ((dscr & 0x3) == 0x2)
641 target->state = TARGET_RUNNING;
643 else
645 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
646 target->state = TARGET_UNKNOWN;
649 dap_ap_select(swjdp, saved_apsel);
651 return retval;
654 static int cortex_a8_halt(struct target *target)
656 int retval = ERROR_OK;
657 uint32_t dscr;
658 struct armv7a_common *armv7a = target_to_armv7a(target);
659 struct adiv5_dap *swjdp = &armv7a->dap;
660 uint8_t saved_apsel = dap_ap_get_select(swjdp);
661 dap_ap_select(swjdp, swjdp_debugap);
664 * Tell the core to be halted by writing DRCR with 0x1
665 * and then wait for the core to be halted.
667 retval = mem_ap_write_atomic_u32(swjdp,
668 armv7a->debug_base + CPUDBG_DRCR, 0x1);
669 if (retval != ERROR_OK)
670 goto out;
673 * enter halting debug mode
675 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DSCR, &dscr);
676 if (retval != ERROR_OK)
677 goto out;
679 retval = mem_ap_write_atomic_u32(swjdp,
680 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
681 if (retval != ERROR_OK)
682 goto out;
684 long long then = timeval_ms();
685 for (;;)
687 retval = mem_ap_read_atomic_u32(swjdp,
688 armv7a->debug_base + CPUDBG_DSCR, &dscr);
689 if (retval != ERROR_OK)
690 goto out;
691 if ((dscr & DSCR_CORE_HALTED) != 0)
693 break;
695 if (timeval_ms() > then + 1000)
697 LOG_ERROR("Timeout waiting for halt");
698 return ERROR_FAIL;
702 target->debug_reason = DBG_REASON_DBGRQ;
704 out:
705 dap_ap_select(swjdp, saved_apsel);
706 return retval;
709 static int cortex_a8_resume(struct target *target, int current,
710 uint32_t address, int handle_breakpoints, int debug_execution)
712 struct armv7a_common *armv7a = target_to_armv7a(target);
713 struct arm *armv4_5 = &armv7a->armv4_5_common;
714 struct adiv5_dap *swjdp = &armv7a->dap;
715 int retval;
717 // struct breakpoint *breakpoint = NULL;
718 uint32_t resume_pc, dscr;
720 uint8_t saved_apsel = dap_ap_get_select(swjdp);
721 dap_ap_select(swjdp, swjdp_debugap);
723 if (!debug_execution)
724 target_free_all_working_areas(target);
726 #if 0
727 if (debug_execution)
729 /* Disable interrupts */
730 /* We disable interrupts in the PRIMASK register instead of
731 * masking with C_MASKINTS,
732 * This is probably the same issue as Cortex-M3 Errata 377493:
733 * C_MASKINTS in parallel with disabled interrupts can cause
734 * local faults to not be taken. */
735 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
736 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
737 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
739 /* Make sure we are in Thumb mode */
740 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
741 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
742 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
743 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
745 #endif
747 /* current = 1: continue on current pc, otherwise continue at <address> */
748 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
749 if (!current)
750 resume_pc = address;
752 /* Make sure that the Armv7 gdb thumb fixups does not
753 * kill the return address
755 switch (armv4_5->core_state)
757 case ARM_STATE_ARM:
758 resume_pc &= 0xFFFFFFFC;
759 break;
760 case ARM_STATE_THUMB:
761 case ARM_STATE_THUMB_EE:
762 /* When the return address is loaded into PC
763 * bit 0 must be 1 to stay in Thumb state
765 resume_pc |= 0x1;
766 break;
767 case ARM_STATE_JAZELLE:
768 LOG_ERROR("How do I resume into Jazelle state??");
769 return ERROR_FAIL;
771 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
772 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
773 armv4_5->pc->dirty = 1;
774 armv4_5->pc->valid = 1;
776 cortex_a8_restore_context(target, handle_breakpoints);
778 #if 0
779 /* the front-end may request us not to handle breakpoints */
780 if (handle_breakpoints)
782 /* Single step past breakpoint at current address */
783 if ((breakpoint = breakpoint_find(target, resume_pc)))
785 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
786 cortex_m3_unset_breakpoint(target, breakpoint);
787 cortex_m3_single_step_core(target);
788 cortex_m3_set_breakpoint(target, breakpoint);
792 #endif
793 /* Restart core and wait for it to be started
794 * NOTE: this clears DSCR_ITR_EN and other bits.
796 * REVISIT: for single stepping, we probably want to
797 * disable IRQs by default, with optional override...
799 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR, 0x2);
800 if (retval != ERROR_OK)
801 return retval;
803 long long then = timeval_ms();
804 for (;;)
806 retval = mem_ap_read_atomic_u32(swjdp,
807 armv7a->debug_base + CPUDBG_DSCR, &dscr);
808 if (retval != ERROR_OK)
809 return retval;
810 if ((dscr & DSCR_CORE_RESTARTED) != 0)
811 break;
812 if (timeval_ms() > then + 1000)
814 LOG_ERROR("Timeout waiting for resume");
815 return ERROR_FAIL;
819 target->debug_reason = DBG_REASON_NOTHALTED;
820 target->state = TARGET_RUNNING;
822 /* registers are now invalid */
823 register_cache_invalidate(armv4_5->core_cache);
825 if (!debug_execution)
827 target->state = TARGET_RUNNING;
828 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
829 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
831 else
833 target->state = TARGET_DEBUG_RUNNING;
834 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
835 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
838 dap_ap_select(swjdp, saved_apsel);
840 return ERROR_OK;
843 static int cortex_a8_debug_entry(struct target *target)
845 int i;
846 uint32_t regfile[16], cpsr, dscr;
847 int retval = ERROR_OK;
848 struct working_area *regfile_working_area = NULL;
849 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
850 struct armv7a_common *armv7a = target_to_armv7a(target);
851 struct arm *armv4_5 = &armv7a->armv4_5_common;
852 struct adiv5_dap *swjdp = &armv7a->dap;
853 struct reg *reg;
855 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
857 /* REVISIT surely we should not re-read DSCR !! */
858 retval = mem_ap_read_atomic_u32(swjdp,
859 armv7a->debug_base + CPUDBG_DSCR, &dscr);
860 if (retval != ERROR_OK)
861 return retval;
863 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
864 * imprecise data aborts get discarded by issuing a Data
865 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
868 /* Enable the ITR execution once we are in debug mode */
869 dscr |= DSCR_ITR_EN;
870 retval = mem_ap_write_atomic_u32(swjdp,
871 armv7a->debug_base + CPUDBG_DSCR, dscr);
872 if (retval != ERROR_OK)
873 return retval;
875 /* Examine debug reason */
876 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
878 /* save address of instruction that triggered the watchpoint? */
879 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
880 uint32_t wfar;
882 retval = mem_ap_read_atomic_u32(swjdp,
883 armv7a->debug_base + CPUDBG_WFAR,
884 &wfar);
885 if (retval != ERROR_OK)
886 return retval;
887 arm_dpm_report_wfar(&armv7a->dpm, wfar);
890 /* REVISIT fast_reg_read is never set ... */
892 /* Examine target state and mode */
893 if (cortex_a8->fast_reg_read)
894 target_alloc_working_area(target, 64, &regfile_working_area);
896 /* First load register acessible through core debug port*/
897 if (!regfile_working_area)
899 retval = arm_dpm_read_current_registers(&armv7a->dpm);
901 else
903 dap_ap_select(swjdp, swjdp_memoryap);
904 cortex_a8_read_regs_through_mem(target,
905 regfile_working_area->address, regfile);
906 dap_ap_select(swjdp, swjdp_memoryap);
907 target_free_working_area(target, regfile_working_area);
909 /* read Current PSR */
910 cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
911 dap_ap_select(swjdp, swjdp_debugap);
912 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
914 arm_set_cpsr(armv4_5, cpsr);
916 /* update cache */
917 for (i = 0; i <= ARM_PC; i++)
919 reg = arm_reg_current(armv4_5, i);
921 buf_set_u32(reg->value, 0, 32, regfile[i]);
922 reg->valid = 1;
923 reg->dirty = 0;
926 /* Fixup PC Resume Address */
927 if (cpsr & (1 << 5))
929 // T bit set for Thumb or ThumbEE state
930 regfile[ARM_PC] -= 4;
932 else
934 // ARM state
935 regfile[ARM_PC] -= 8;
938 reg = armv4_5->pc;
939 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
940 reg->dirty = reg->valid;
943 #if 0
944 /* TODO, Move this */
945 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
946 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
947 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
949 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
950 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
952 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
953 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
954 #endif
956 /* Are we in an exception handler */
957 // armv4_5->exception_number = 0;
958 if (armv7a->post_debug_entry)
959 armv7a->post_debug_entry(target);
961 return retval;
964 static void cortex_a8_post_debug_entry(struct target *target)
966 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
967 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
968 int retval;
970 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
971 retval = armv7a->armv4_5_common.mrc(target, 15,
972 0, 0, /* op1, op2 */
973 1, 0, /* CRn, CRm */
974 &cortex_a8->cp15_control_reg);
975 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
977 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
979 uint32_t cache_type_reg;
981 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
982 retval = armv7a->armv4_5_common.mrc(target, 15,
983 0, 1, /* op1, op2 */
984 0, 0, /* CRn, CRm */
985 &cache_type_reg);
986 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
988 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
989 armv4_5_identify_cache(cache_type_reg,
990 &armv7a->armv4_5_mmu.armv4_5_cache);
993 armv7a->armv4_5_mmu.mmu_enabled =
994 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
995 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
996 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
997 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
998 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1003 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1004 int handle_breakpoints)
1006 struct armv7a_common *armv7a = target_to_armv7a(target);
1007 struct arm *armv4_5 = &armv7a->armv4_5_common;
1008 struct breakpoint *breakpoint = NULL;
1009 struct breakpoint stepbreakpoint;
1010 struct reg *r;
1011 int retval;
1013 int timeout = 100;
1015 if (target->state != TARGET_HALTED)
1017 LOG_WARNING("target not halted");
1018 return ERROR_TARGET_NOT_HALTED;
1021 /* current = 1: continue on current pc, otherwise continue at <address> */
1022 r = armv4_5->pc;
1023 if (!current)
1025 buf_set_u32(r->value, 0, 32, address);
1027 else
1029 address = buf_get_u32(r->value, 0, 32);
1032 /* The front-end may request us not to handle breakpoints.
1033 * But since Cortex-A8 uses breakpoint for single step,
1034 * we MUST handle breakpoints.
1036 handle_breakpoints = 1;
1037 if (handle_breakpoints) {
1038 breakpoint = breakpoint_find(target, address);
1039 if (breakpoint)
1040 cortex_a8_unset_breakpoint(target, breakpoint);
1043 /* Setup single step breakpoint */
1044 stepbreakpoint.address = address;
1045 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1046 ? 2 : 4;
1047 stepbreakpoint.type = BKPT_HARD;
1048 stepbreakpoint.set = 0;
1050 /* Break on IVA mismatch */
1051 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1053 target->debug_reason = DBG_REASON_SINGLESTEP;
1055 retval = cortex_a8_resume(target, 1, address, 0, 0);
1056 if (retval != ERROR_OK)
1057 return retval;
1059 while (target->state != TARGET_HALTED)
1061 retval = cortex_a8_poll(target);
1062 if (retval != ERROR_OK)
1063 return retval;
1064 if (--timeout == 0)
1066 LOG_ERROR("timeout waiting for target halt");
1067 return ERROR_FAIL;
1071 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1072 if (timeout > 0)
1073 target->debug_reason = DBG_REASON_BREAKPOINT;
1075 if (breakpoint)
1076 cortex_a8_set_breakpoint(target, breakpoint, 0);
1078 if (target->state != TARGET_HALTED)
1079 LOG_DEBUG("target stepped");
1081 return ERROR_OK;
1084 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1086 struct armv7a_common *armv7a = target_to_armv7a(target);
1088 LOG_DEBUG(" ");
1090 if (armv7a->pre_restore_context)
1091 armv7a->pre_restore_context(target);
1093 arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1095 return ERROR_OK;
1100 * Cortex-A8 Breakpoint and watchpoint fuctions
1103 /* Setup hardware Breakpoint Register Pair */
1104 static int cortex_a8_set_breakpoint(struct target *target,
1105 struct breakpoint *breakpoint, uint8_t matchmode)
1107 int retval;
1108 int brp_i=0;
1109 uint32_t control;
1110 uint8_t byte_addr_select = 0x0F;
1111 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1112 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1113 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1115 if (breakpoint->set)
1117 LOG_WARNING("breakpoint already set");
1118 return ERROR_OK;
1121 if (breakpoint->type == BKPT_HARD)
1123 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1124 brp_i++ ;
1125 if (brp_i >= cortex_a8->brp_num)
1127 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1128 return ERROR_FAIL;
1130 breakpoint->set = brp_i + 1;
1131 if (breakpoint->length == 2)
1133 byte_addr_select = (3 << (breakpoint->address & 0x02));
1135 control = ((matchmode & 0x7) << 20)
1136 | (byte_addr_select << 5)
1137 | (3 << 1) | 1;
1138 brp_list[brp_i].used = 1;
1139 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1140 brp_list[brp_i].control = control;
1141 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1142 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1143 brp_list[brp_i].value);
1144 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1145 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1146 brp_list[brp_i].control);
1147 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1148 brp_list[brp_i].control,
1149 brp_list[brp_i].value);
1151 else if (breakpoint->type == BKPT_SOFT)
1153 uint8_t code[4];
1154 if (breakpoint->length == 2)
1156 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1158 else
1160 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1162 retval = target->type->read_memory(target,
1163 breakpoint->address & 0xFFFFFFFE,
1164 breakpoint->length, 1,
1165 breakpoint->orig_instr);
1166 if (retval != ERROR_OK)
1167 return retval;
1168 retval = target->type->write_memory(target,
1169 breakpoint->address & 0xFFFFFFFE,
1170 breakpoint->length, 1, code);
1171 if (retval != ERROR_OK)
1172 return retval;
1173 breakpoint->set = 0x11; /* Any nice value but 0 */
1176 return ERROR_OK;
1179 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1181 int retval;
1182 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1183 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1184 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1186 if (!breakpoint->set)
1188 LOG_WARNING("breakpoint not set");
1189 return ERROR_OK;
1192 if (breakpoint->type == BKPT_HARD)
1194 int brp_i = breakpoint->set - 1;
1195 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1197 LOG_DEBUG("Invalid BRP number in breakpoint");
1198 return ERROR_OK;
1200 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1201 brp_list[brp_i].control, brp_list[brp_i].value);
1202 brp_list[brp_i].used = 0;
1203 brp_list[brp_i].value = 0;
1204 brp_list[brp_i].control = 0;
1205 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1206 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1207 brp_list[brp_i].control);
1208 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1209 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1210 brp_list[brp_i].value);
1212 else
1214 /* restore original instruction (kept in target endianness) */
1215 if (breakpoint->length == 4)
1217 retval = target->type->write_memory(target,
1218 breakpoint->address & 0xFFFFFFFE,
1219 4, 1, breakpoint->orig_instr);
1220 if (retval != ERROR_OK)
1221 return retval;
1223 else
1225 retval = target->type->write_memory(target,
1226 breakpoint->address & 0xFFFFFFFE,
1227 2, 1, breakpoint->orig_instr);
1228 if (retval != ERROR_OK)
1229 return retval;
1232 breakpoint->set = 0;
1234 return ERROR_OK;
1237 static int cortex_a8_add_breakpoint(struct target *target,
1238 struct breakpoint *breakpoint)
1240 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1242 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1244 LOG_INFO("no hardware breakpoint available");
1245 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1248 if (breakpoint->type == BKPT_HARD)
1249 cortex_a8->brp_num_available--;
1250 cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1252 return ERROR_OK;
1255 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1257 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1259 #if 0
1260 /* It is perfectly possible to remove brakpoints while the taget is running */
1261 if (target->state != TARGET_HALTED)
1263 LOG_WARNING("target not halted");
1264 return ERROR_TARGET_NOT_HALTED;
1266 #endif
1268 if (breakpoint->set)
1270 cortex_a8_unset_breakpoint(target, breakpoint);
1271 if (breakpoint->type == BKPT_HARD)
1272 cortex_a8->brp_num_available++ ;
1276 return ERROR_OK;
1282 * Cortex-A8 Reset fuctions
1285 static int cortex_a8_assert_reset(struct target *target)
1287 struct armv7a_common *armv7a = target_to_armv7a(target);
1289 LOG_DEBUG(" ");
1291 /* FIXME when halt is requested, make it work somehow... */
1293 /* Issue some kind of warm reset. */
1294 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1295 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1296 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1297 /* REVISIT handle "pulls" cases, if there's
1298 * hardware that needs them to work.
1300 jtag_add_reset(0, 1);
1301 } else {
1302 LOG_ERROR("%s: how to reset?", target_name(target));
1303 return ERROR_FAIL;
1306 /* registers are now invalid */
1307 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1309 target->state = TARGET_RESET;
1311 return ERROR_OK;
1314 static int cortex_a8_deassert_reset(struct target *target)
1316 int retval;
1318 LOG_DEBUG(" ");
1320 /* be certain SRST is off */
1321 jtag_add_reset(0, 0);
1323 retval = cortex_a8_poll(target);
1324 if (retval != ERROR_OK)
1325 return retval;
1327 if (target->reset_halt) {
1328 if (target->state != TARGET_HALTED) {
1329 LOG_WARNING("%s: ran after reset and before halt ...",
1330 target_name(target));
1331 if ((retval = target_halt(target)) != ERROR_OK)
1332 return retval;
1336 return ERROR_OK;
1340 * Cortex-A8 Memory access
1342 * This is same Cortex M3 but we must also use the correct
1343 * ap number for every access.
1346 static int cortex_a8_read_phys_memory(struct target *target,
1347 uint32_t address, uint32_t size,
1348 uint32_t count, uint8_t *buffer)
1350 struct armv7a_common *armv7a = target_to_armv7a(target);
1351 struct adiv5_dap *swjdp = &armv7a->dap;
1352 int retval = ERROR_INVALID_ARGUMENTS;
1354 /* cortex_a8 handles unaligned memory access */
1356 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1357 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address, size, count);
1358 if (count && buffer) {
1359 switch (size) {
1360 case 4:
1361 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1362 break;
1363 case 2:
1364 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1365 break;
1366 case 1:
1367 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1368 break;
1372 return retval;
1375 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1376 uint32_t size, uint32_t count, uint8_t *buffer)
1378 int enabled = 0;
1379 uint32_t virt, phys;
1381 /* cortex_a8 handles unaligned memory access */
1383 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1384 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address, size, count);
1385 cortex_a8_mmu(target, &enabled);
1386 if(enabled)
1388 virt = address;
1389 cortex_a8_virt2phys(target, virt, &phys);
1390 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1391 address = phys;
1394 return cortex_a8_read_phys_memory(target, address, size, count, buffer);
1397 static int cortex_a8_write_phys_memory(struct target *target,
1398 uint32_t address, uint32_t size,
1399 uint32_t count, uint8_t *buffer)
1401 struct armv7a_common *armv7a = target_to_armv7a(target);
1402 struct adiv5_dap *swjdp = &armv7a->dap;
1403 int retval = ERROR_INVALID_ARGUMENTS;
1405 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1407 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address, size, count);
1408 if (count && buffer) {
1409 switch (size) {
1410 case 4:
1411 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1412 break;
1413 case 2:
1414 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1415 break;
1416 case 1:
1417 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1418 break;
1422 /* REVISIT this op is generic ARMv7-A/R stuff */
1423 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1425 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1427 retval = dpm->prepare(dpm);
1428 if (retval != ERROR_OK)
1429 return retval;
1431 /* The Cache handling will NOT work with MMU active, the
1432 * wrong addresses will be invalidated!
1434 * For both ICache and DCache, walk all cache lines in the
1435 * address range. Cortex-A8 has fixed 64 byte line length.
1437 * REVISIT per ARMv7, these may trigger watchpoints ...
1440 /* invalidate I-Cache */
1441 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1443 /* ICIMVAU - Invalidate Cache single entry
1444 * with MVA to PoU
1445 * MCR p15, 0, r0, c7, c5, 1
1447 for (uint32_t cacheline = address;
1448 cacheline < address + size * count;
1449 cacheline += 64) {
1450 retval = dpm->instr_write_data_r0(dpm,
1451 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1452 cacheline);
1456 /* invalidate D-Cache */
1457 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1459 /* DCIMVAC - Invalidate data Cache line
1460 * with MVA to PoC
1461 * MCR p15, 0, r0, c7, c6, 1
1463 for (uint32_t cacheline = address;
1464 cacheline < address + size * count;
1465 cacheline += 64) {
1466 retval = dpm->instr_write_data_r0(dpm,
1467 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1468 cacheline);
1472 /* (void) */ dpm->finish(dpm);
1475 return retval;
1478 static int cortex_a8_write_memory(struct target *target, uint32_t address,
1479 uint32_t size, uint32_t count, uint8_t *buffer)
1481 int enabled = 0;
1482 uint32_t virt, phys;
1484 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1486 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1487 cortex_a8_mmu(target, &enabled);
1488 if(enabled)
1490 virt = address;
1491 cortex_a8_virt2phys(target, virt, &phys);
1492 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1493 address = phys;
1496 return cortex_a8_write_phys_memory(target, address, size,
1497 count, buffer);
1500 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
1501 uint32_t count, uint8_t *buffer)
1503 return cortex_a8_write_memory(target, address, 4, count, buffer);
1507 static int cortex_a8_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1509 #if 0
1510 u16 dcrdr;
1512 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1513 *ctrl = (uint8_t)dcrdr;
1514 *value = (uint8_t)(dcrdr >> 8);
1516 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1518 /* write ack back to software dcc register
1519 * signify we have read data */
1520 if (dcrdr & (1 << 0))
1522 dcrdr = 0;
1523 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1525 #endif
1526 return ERROR_OK;
1530 static int cortex_a8_handle_target_request(void *priv)
1532 struct target *target = priv;
1533 struct armv7a_common *armv7a = target_to_armv7a(target);
1534 struct adiv5_dap *swjdp = &armv7a->dap;
1536 if (!target_was_examined(target))
1537 return ERROR_OK;
1538 if (!target->dbg_msg_enabled)
1539 return ERROR_OK;
1541 if (target->state == TARGET_RUNNING)
1543 uint8_t data = 0;
1544 uint8_t ctrl = 0;
1546 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1548 /* check if we have data */
1549 if (ctrl & (1 << 0))
1551 uint32_t request;
1553 /* we assume target is quick enough */
1554 request = data;
1555 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1556 request |= (data << 8);
1557 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1558 request |= (data << 16);
1559 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1560 request |= (data << 24);
1561 target_request(target, request);
1565 return ERROR_OK;
1569 * Cortex-A8 target information and configuration
1572 static int cortex_a8_examine_first(struct target *target)
1574 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1575 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1576 struct adiv5_dap *swjdp = &armv7a->dap;
1577 int i;
1578 int retval = ERROR_OK;
1579 uint32_t didr, ctypr, ttypr, cpuid;
1581 /* stop assuming this is an OMAP! */
1582 LOG_DEBUG("TODO - autoconfigure");
1584 /* Here we shall insert a proper ROM Table scan */
1585 armv7a->debug_base = OMAP3530_DEBUG_BASE;
1587 /* We do one extra read to ensure DAP is configured,
1588 * we call ahbap_debugport_init(swjdp) instead
1590 retval = ahbap_debugport_init(swjdp);
1591 if (retval != ERROR_OK)
1592 return retval;
1594 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1595 if (retval != ERROR_OK)
1596 return retval;
1598 if ((retval = mem_ap_read_atomic_u32(swjdp,
1599 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1601 LOG_DEBUG("Examine %s failed", "CPUID");
1602 return retval;
1605 if ((retval = mem_ap_read_atomic_u32(swjdp,
1606 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1608 LOG_DEBUG("Examine %s failed", "CTYPR");
1609 return retval;
1612 if ((retval = mem_ap_read_atomic_u32(swjdp,
1613 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1615 LOG_DEBUG("Examine %s failed", "TTYPR");
1616 return retval;
1619 if ((retval = mem_ap_read_atomic_u32(swjdp,
1620 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1622 LOG_DEBUG("Examine %s failed", "DIDR");
1623 return retval;
1626 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1627 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1628 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1629 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1631 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1632 retval = cortex_a8_dpm_setup(cortex_a8, didr);
1633 if (retval != ERROR_OK)
1634 return retval;
1636 /* Setup Breakpoint Register Pairs */
1637 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
1638 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1639 cortex_a8->brp_num_available = cortex_a8->brp_num;
1640 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
1641 // cortex_a8->brb_enabled = ????;
1642 for (i = 0; i < cortex_a8->brp_num; i++)
1644 cortex_a8->brp_list[i].used = 0;
1645 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
1646 cortex_a8->brp_list[i].type = BRP_NORMAL;
1647 else
1648 cortex_a8->brp_list[i].type = BRP_CONTEXT;
1649 cortex_a8->brp_list[i].value = 0;
1650 cortex_a8->brp_list[i].control = 0;
1651 cortex_a8->brp_list[i].BRPn = i;
1654 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
1656 target_set_examined(target);
1657 return ERROR_OK;
1660 static int cortex_a8_examine(struct target *target)
1662 int retval = ERROR_OK;
1664 /* don't re-probe hardware after each reset */
1665 if (!target_was_examined(target))
1666 retval = cortex_a8_examine_first(target);
1668 /* Configure core debug access */
1669 if (retval == ERROR_OK)
1670 retval = cortex_a8_init_debug_access(target);
1672 return retval;
1676 * Cortex-A8 target creation and initialization
1679 static int cortex_a8_init_target(struct command_context *cmd_ctx,
1680 struct target *target)
1682 /* examine_first() does a bunch of this */
1683 return ERROR_OK;
1686 static int cortex_a8_init_arch_info(struct target *target,
1687 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
1689 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1690 struct arm *armv4_5 = &armv7a->armv4_5_common;
1691 struct adiv5_dap *dap = &armv7a->dap;
1693 armv7a->armv4_5_common.dap = dap;
1695 /* Setup struct cortex_a8_common */
1696 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
1697 armv4_5->arch_info = armv7a;
1699 /* prepare JTAG information for the new target */
1700 cortex_a8->jtag_info.tap = tap;
1701 cortex_a8->jtag_info.scann_size = 4;
1703 /* Leave (only) generic DAP stuff for debugport_init() */
1704 dap->jtag_info = &cortex_a8->jtag_info;
1705 dap->memaccess_tck = 80;
1707 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1708 dap->tar_autoincr_block = (1 << 10);
1710 cortex_a8->fast_reg_read = 0;
1712 /* Set default value */
1713 cortex_a8->current_address_mode = ARM_MODE_ANY;
1715 /* register arch-specific functions */
1716 armv7a->examine_debug_reason = NULL;
1718 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
1720 armv7a->pre_restore_context = NULL;
1721 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
1722 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
1723 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
1724 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
1725 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
1726 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
1727 armv7a->armv4_5_mmu.has_tiny_pages = 1;
1728 armv7a->armv4_5_mmu.mmu_enabled = 0;
1731 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
1733 /* REVISIT v7a setup should be in a v7a-specific routine */
1734 arm_init_arch_info(target, armv4_5);
1735 armv7a->common_magic = ARMV7_COMMON_MAGIC;
1737 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
1739 return ERROR_OK;
1742 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
1744 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
1746 cortex_a8_init_arch_info(target, cortex_a8, target->tap);
1748 return ERROR_OK;
1751 static uint32_t cortex_a8_get_ttb(struct target *target)
1753 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1754 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1755 uint32_t ttb = 0, retval = ERROR_OK;
1757 /* current_address_mode is set inside cortex_a8_virt2phys()
1758 where we can determine if address belongs to user or kernel */
1759 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
1761 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1762 retval = armv7a->armv4_5_common.mrc(target, 15,
1763 0, 1, /* op1, op2 */
1764 2, 0, /* CRn, CRm */
1765 &ttb);
1767 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
1769 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1770 retval = armv7a->armv4_5_common.mrc(target, 15,
1771 0, 0, /* op1, op2 */
1772 2, 0, /* CRn, CRm */
1773 &ttb);
1775 /* we don't know whose address is: user or kernel
1776 we assume that if we are in kernel mode then
1777 address belongs to kernel else if in user mode
1778 - to user */
1779 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
1781 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1782 retval = armv7a->armv4_5_common.mrc(target, 15,
1783 0, 1, /* op1, op2 */
1784 2, 0, /* CRn, CRm */
1785 &ttb);
1787 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
1789 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1790 retval = armv7a->armv4_5_common.mrc(target, 15,
1791 0, 0, /* op1, op2 */
1792 2, 0, /* CRn, CRm */
1793 &ttb);
1795 /* finaly we don't know whose ttb to use: user or kernel */
1796 else
1797 LOG_ERROR("Don't know how to get ttb for current mode!!!");
1799 ttb &= 0xffffc000;
1801 return ttb;
1804 static void cortex_a8_disable_mmu_caches(struct target *target, int mmu,
1805 int d_u_cache, int i_cache)
1807 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1808 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1809 uint32_t cp15_control;
1811 /* read cp15 control register */
1812 armv7a->armv4_5_common.mrc(target, 15,
1813 0, 0, /* op1, op2 */
1814 1, 0, /* CRn, CRm */
1815 &cp15_control);
1818 if (mmu)
1819 cp15_control &= ~0x1U;
1821 if (d_u_cache)
1822 cp15_control &= ~0x4U;
1824 if (i_cache)
1825 cp15_control &= ~0x1000U;
1827 armv7a->armv4_5_common.mcr(target, 15,
1828 0, 0, /* op1, op2 */
1829 1, 0, /* CRn, CRm */
1830 cp15_control);
1833 static void cortex_a8_enable_mmu_caches(struct target *target, int mmu,
1834 int d_u_cache, int i_cache)
1836 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1837 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1838 uint32_t cp15_control;
1840 /* read cp15 control register */
1841 armv7a->armv4_5_common.mrc(target, 15,
1842 0, 0, /* op1, op2 */
1843 1, 0, /* CRn, CRm */
1844 &cp15_control);
1846 if (mmu)
1847 cp15_control |= 0x1U;
1849 if (d_u_cache)
1850 cp15_control |= 0x4U;
1852 if (i_cache)
1853 cp15_control |= 0x1000U;
1855 armv7a->armv4_5_common.mcr(target, 15,
1856 0, 0, /* op1, op2 */
1857 1, 0, /* CRn, CRm */
1858 cp15_control);
1862 static int cortex_a8_mmu(struct target *target, int *enabled)
1864 if (target->state != TARGET_HALTED) {
1865 LOG_ERROR("%s: target not halted", __func__);
1866 return ERROR_TARGET_INVALID;
1869 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
1870 return ERROR_OK;
1873 static int cortex_a8_virt2phys(struct target *target,
1874 uint32_t virt, uint32_t *phys)
1876 uint32_t cb;
1877 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1878 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1879 struct armv7a_common *armv7a = target_to_armv7a(target);
1881 /* We assume that virtual address is separated
1882 between user and kernel in Linux style:
1883 0x00000000-0xbfffffff - User space
1884 0xc0000000-0xffffffff - Kernel space */
1885 if( virt < 0xc0000000 ) /* Linux user space */
1886 cortex_a8->current_address_mode = ARM_MODE_USR;
1887 else /* Linux kernel */
1888 cortex_a8->current_address_mode = ARM_MODE_SVC;
1889 uint32_t ret;
1890 int retval = armv4_5_mmu_translate_va(target,
1891 &armv7a->armv4_5_mmu, virt, &cb, &ret);
1892 if (retval != ERROR_OK)
1893 return retval;
1894 /* Reset the flag. We don't want someone else to use it by error */
1895 cortex_a8->current_address_mode = ARM_MODE_ANY;
1897 *phys = ret;
1898 return ERROR_OK;
1901 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
1903 struct target *target = get_current_target(CMD_CTX);
1904 struct armv7a_common *armv7a = target_to_armv7a(target);
1906 return armv4_5_handle_cache_info_command(CMD_CTX,
1907 &armv7a->armv4_5_mmu.armv4_5_cache);
1911 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
1913 struct target *target = get_current_target(CMD_CTX);
1914 if (!target_was_examined(target))
1916 LOG_ERROR("target not examined yet");
1917 return ERROR_FAIL;
1920 return cortex_a8_init_debug_access(target);
1923 static const struct command_registration cortex_a8_exec_command_handlers[] = {
1925 .name = "cache_info",
1926 .handler = cortex_a8_handle_cache_info_command,
1927 .mode = COMMAND_EXEC,
1928 .help = "display information about target caches",
1931 .name = "dbginit",
1932 .handler = cortex_a8_handle_dbginit_command,
1933 .mode = COMMAND_EXEC,
1934 .help = "Initialize core debug",
1936 COMMAND_REGISTRATION_DONE
1938 static const struct command_registration cortex_a8_command_handlers[] = {
1940 .chain = arm_command_handlers,
1943 .chain = armv7a_command_handlers,
1946 .name = "cortex_a8",
1947 .mode = COMMAND_ANY,
1948 .help = "Cortex-A8 command group",
1949 .chain = cortex_a8_exec_command_handlers,
1951 COMMAND_REGISTRATION_DONE
1954 struct target_type cortexa8_target = {
1955 .name = "cortex_a8",
1957 .poll = cortex_a8_poll,
1958 .arch_state = armv7a_arch_state,
1960 .target_request_data = NULL,
1962 .halt = cortex_a8_halt,
1963 .resume = cortex_a8_resume,
1964 .step = cortex_a8_step,
1966 .assert_reset = cortex_a8_assert_reset,
1967 .deassert_reset = cortex_a8_deassert_reset,
1968 .soft_reset_halt = NULL,
1970 /* REVISIT allow exporting VFP3 registers ... */
1971 .get_gdb_reg_list = arm_get_gdb_reg_list,
1973 .read_memory = cortex_a8_read_memory,
1974 .write_memory = cortex_a8_write_memory,
1975 .bulk_write_memory = cortex_a8_bulk_write_memory,
1977 .checksum_memory = arm_checksum_memory,
1978 .blank_check_memory = arm_blank_check_memory,
1980 .run_algorithm = armv4_5_run_algorithm,
1982 .add_breakpoint = cortex_a8_add_breakpoint,
1983 .remove_breakpoint = cortex_a8_remove_breakpoint,
1984 .add_watchpoint = NULL,
1985 .remove_watchpoint = NULL,
1987 .commands = cortex_a8_command_handlers,
1988 .target_create = cortex_a8_target_create,
1989 .init_target = cortex_a8_init_target,
1990 .examine = cortex_a8_examine,
1992 .read_phys_memory = cortex_a8_read_phys_memory,
1993 .write_phys_memory = cortex_a8_write_phys_memory,
1994 .mmu = cortex_a8_mmu,
1995 .virt2phys = cortex_a8_virt2phys,