cortex a8: add timeouts waiting for restart, prepare and halt
[openocd/dnglaze.git] / src / target / cortex_a8.c
blobc39dba336621d36cb0288e1d8107a3492d0acc45
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * This program is free software; you can redistribute it and/or modify *
15 * it under the terms of the GNU General Public License as published by *
16 * the Free Software Foundation; either version 2 of the License, or *
17 * (at your option) any later version. *
18 * *
19 * This program is distributed in the hope that it will be useful, *
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
22 * GNU General Public License for more details. *
23 * *
24 * You should have received a copy of the GNU General Public License *
25 * along with this program; if not, write to the *
26 * Free Software Foundation, Inc., *
27 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
28 * *
29 * Cortex-A8(tm) TRM, ARM DDI 0344H *
30 * *
31 ***************************************************************************/
32 #ifdef HAVE_CONFIG_H
33 #include "config.h"
34 #endif
36 #include "breakpoints.h"
37 #include "cortex_a8.h"
38 #include "register.h"
39 #include "target_request.h"
40 #include "target_type.h"
41 #include "arm_opcodes.h"
42 #include <helper/time_support.h>
44 static int cortex_a8_poll(struct target *target);
45 static int cortex_a8_debug_entry(struct target *target);
46 static int cortex_a8_restore_context(struct target *target, bool bpwp);
47 static int cortex_a8_set_breakpoint(struct target *target,
48 struct breakpoint *breakpoint, uint8_t matchmode);
49 static int cortex_a8_unset_breakpoint(struct target *target,
50 struct breakpoint *breakpoint);
51 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
52 uint32_t *value, int regnum);
53 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
54 uint32_t value, int regnum);
55 static int cortex_a8_mmu(struct target *target, int *enabled);
56 static int cortex_a8_virt2phys(struct target *target,
57 uint32_t virt, uint32_t *phys);
58 static void cortex_a8_disable_mmu_caches(struct target *target, int mmu,
59 int d_u_cache, int i_cache);
60 static void cortex_a8_enable_mmu_caches(struct target *target, int mmu,
61 int d_u_cache, int i_cache);
62 static uint32_t cortex_a8_get_ttb(struct target *target);
66 * FIXME do topology discovery using the ROM; don't
67 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
68 * cores, with different AP numbering ... don't use a #define
69 * for these numbers, use per-core armv7a state.
71 #define swjdp_memoryap 0
72 #define swjdp_debugap 1
73 #define OMAP3530_DEBUG_BASE 0x54011000
76 * Cortex-A8 Basic debug access, very low level assumes state is saved
78 static int cortex_a8_init_debug_access(struct target *target)
80 struct armv7a_common *armv7a = target_to_armv7a(target);
81 struct adiv5_dap *swjdp = &armv7a->dap;
83 int retval;
84 uint32_t dummy;
86 LOG_DEBUG(" ");
88 /* Unlocking the debug registers for modification */
89 /* The debugport might be uninitialised so try twice */
90 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
91 if (retval != ERROR_OK)
93 /* try again */
94 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
96 if (retval != ERROR_OK)
97 return retval;
98 /* Clear Sticky Power Down status Bit in PRSR to enable access to
99 the registers in the Core Power Domain */
100 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_PRSR, &dummy);
101 /* Enabling of instruction execution in debug mode is done in debug_entry code */
103 /* Resync breakpoint registers */
105 /* Since this is likley called from init or reset, update targtet state information*/
106 cortex_a8_poll(target);
108 return retval;
111 /* To reduce needless round-trips, pass in a pointer to the current
112 * DSCR value. Initialize it to zero if you just need to know the
113 * value on return from this function; or DSCR_INSTR_COMP if you
114 * happen to know that no instruction is pending.
116 static int cortex_a8_exec_opcode(struct target *target,
117 uint32_t opcode, uint32_t *dscr_p)
119 uint32_t dscr;
120 int retval;
121 struct armv7a_common *armv7a = target_to_armv7a(target);
122 struct adiv5_dap *swjdp = &armv7a->dap;
124 dscr = dscr_p ? *dscr_p : 0;
126 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
128 /* Wait for InstrCompl bit to be set */
129 while ((dscr & DSCR_INSTR_COMP) == 0)
131 retval = mem_ap_read_atomic_u32(swjdp,
132 armv7a->debug_base + CPUDBG_DSCR, &dscr);
133 if (retval != ERROR_OK)
135 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
136 return retval;
140 mem_ap_write_u32(swjdp, armv7a->debug_base + CPUDBG_ITR, opcode);
144 retval = mem_ap_read_atomic_u32(swjdp,
145 armv7a->debug_base + CPUDBG_DSCR, &dscr);
146 if (retval != ERROR_OK)
148 LOG_ERROR("Could not read DSCR register");
149 return retval;
152 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
154 if (dscr_p)
155 *dscr_p = dscr;
157 return retval;
160 /**************************************************************************
161 Read core register with very few exec_opcode, fast but needs work_area.
162 This can cause problems with MMU active.
163 **************************************************************************/
164 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
165 uint32_t * regfile)
167 int retval = ERROR_OK;
168 struct armv7a_common *armv7a = target_to_armv7a(target);
169 struct adiv5_dap *swjdp = &armv7a->dap;
171 cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
172 cortex_a8_dap_write_coreregister_u32(target, address, 0);
173 cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
174 dap_ap_select(swjdp, swjdp_memoryap);
175 mem_ap_read_buf_u32(swjdp, (uint8_t *)(&regfile[1]), 4*15, address);
176 dap_ap_select(swjdp, swjdp_debugap);
178 return retval;
181 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
182 uint32_t *value, int regnum)
184 int retval = ERROR_OK;
185 uint8_t reg = regnum&0xFF;
186 uint32_t dscr = 0;
187 struct armv7a_common *armv7a = target_to_armv7a(target);
188 struct adiv5_dap *swjdp = &armv7a->dap;
190 if (reg > 17)
191 return retval;
193 if (reg < 15)
195 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
196 cortex_a8_exec_opcode(target,
197 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
198 &dscr);
200 else if (reg == 15)
202 /* "MOV r0, r15"; then move r0 to DCCTX */
203 cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
204 cortex_a8_exec_opcode(target,
205 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
206 &dscr);
208 else
210 /* "MRS r0, CPSR" or "MRS r0, SPSR"
211 * then move r0 to DCCTX
213 cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
214 cortex_a8_exec_opcode(target,
215 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
216 &dscr);
219 /* Wait for DTRRXfull then read DTRRTX */
220 while ((dscr & DSCR_DTR_TX_FULL) == 0)
222 retval = mem_ap_read_atomic_u32(swjdp,
223 armv7a->debug_base + CPUDBG_DSCR, &dscr);
226 retval = mem_ap_read_atomic_u32(swjdp,
227 armv7a->debug_base + CPUDBG_DTRTX, value);
228 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
230 return retval;
233 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
234 uint32_t value, int regnum)
236 int retval = ERROR_OK;
237 uint8_t Rd = regnum&0xFF;
238 uint32_t dscr;
239 struct armv7a_common *armv7a = target_to_armv7a(target);
240 struct adiv5_dap *swjdp = &armv7a->dap;
242 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
244 /* Check that DCCRX is not full */
245 retval = mem_ap_read_atomic_u32(swjdp,
246 armv7a->debug_base + CPUDBG_DSCR, &dscr);
247 if (dscr & DSCR_DTR_RX_FULL)
249 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
250 /* Clear DCCRX with MCR(p14, 0, Rd, c0, c5, 0), opcode 0xEE000E15 */
251 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
252 &dscr);
255 if (Rd > 17)
256 return retval;
258 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
259 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
260 retval = mem_ap_write_u32(swjdp,
261 armv7a->debug_base + CPUDBG_DTRRX, value);
263 if (Rd < 15)
265 /* DCCRX to Rn, "MCR p14, 0, Rn, c0, c5, 0", 0xEE00nE15 */
266 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
267 &dscr);
269 else if (Rd == 15)
271 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
272 * then "mov r15, r0"
274 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
275 &dscr);
276 cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
278 else
280 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
281 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
283 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
284 &dscr);
285 cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
286 &dscr);
288 /* "Prefetch flush" after modifying execution status in CPSR */
289 if (Rd == 16)
290 cortex_a8_exec_opcode(target,
291 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
292 &dscr);
295 return retval;
298 /* Write to memory mapped registers directly with no cache or mmu handling */
299 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
301 int retval;
302 struct armv7a_common *armv7a = target_to_armv7a(target);
303 struct adiv5_dap *swjdp = &armv7a->dap;
305 retval = mem_ap_write_atomic_u32(swjdp, address, value);
307 return retval;
311 * Cortex-A8 implementation of Debug Programmer's Model
313 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
314 * so there's no need to poll for it before executing an instruction.
316 * NOTE that in several of these cases the "stall" mode might be useful.
317 * It'd let us queue a few operations together... prepare/finish might
318 * be the places to enable/disable that mode.
321 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
323 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
326 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
328 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
329 return mem_ap_write_u32(&a8->armv7a_common.dap,
330 a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
333 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
334 uint32_t *dscr_p)
336 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
337 uint32_t dscr = DSCR_INSTR_COMP;
338 int retval;
340 if (dscr_p)
341 dscr = *dscr_p;
343 /* Wait for DTRRXfull */
344 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
345 retval = mem_ap_read_atomic_u32(swjdp,
346 a8->armv7a_common.debug_base + CPUDBG_DSCR,
347 &dscr);
350 retval = mem_ap_read_atomic_u32(swjdp,
351 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
352 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
354 if (dscr_p)
355 *dscr_p = dscr;
357 return retval;
360 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
362 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
363 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
364 uint32_t dscr;
365 int retval;
367 /* set up invariant: INSTR_COMP is set after ever DPM operation */
368 long long then = timeval_ms();
369 for (;;)
371 retval = mem_ap_read_atomic_u32(swjdp,
372 a8->armv7a_common.debug_base + CPUDBG_DSCR,
373 &dscr);
374 if (retval != ERROR_OK)
375 return retval;
376 if ((dscr & DSCR_INSTR_COMP) != 0)
377 break;
378 if (timeval_ms() > then + 1000)
380 LOG_ERROR("Timeout waiting for dpm prepare");
381 return ERROR_FAIL;
385 /* this "should never happen" ... */
386 if (dscr & DSCR_DTR_RX_FULL) {
387 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
388 /* Clear DCCRX */
389 retval = cortex_a8_exec_opcode(
390 a8->armv7a_common.armv4_5_common.target,
391 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
392 &dscr);
395 return retval;
398 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
400 /* REVISIT what could be done here? */
401 return ERROR_OK;
404 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
405 uint32_t opcode, uint32_t data)
407 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
408 int retval;
409 uint32_t dscr = DSCR_INSTR_COMP;
411 retval = cortex_a8_write_dcc(a8, data);
413 return cortex_a8_exec_opcode(
414 a8->armv7a_common.armv4_5_common.target,
415 opcode,
416 &dscr);
419 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
420 uint32_t opcode, uint32_t data)
422 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
423 uint32_t dscr = DSCR_INSTR_COMP;
424 int retval;
426 retval = cortex_a8_write_dcc(a8, data);
428 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
429 retval = cortex_a8_exec_opcode(
430 a8->armv7a_common.armv4_5_common.target,
431 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
432 &dscr);
434 /* then the opcode, taking data from R0 */
435 retval = cortex_a8_exec_opcode(
436 a8->armv7a_common.armv4_5_common.target,
437 opcode,
438 &dscr);
440 return retval;
443 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
445 struct target *target = dpm->arm->target;
446 uint32_t dscr = DSCR_INSTR_COMP;
448 /* "Prefetch flush" after modifying execution status in CPSR */
449 return cortex_a8_exec_opcode(target,
450 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
451 &dscr);
454 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
455 uint32_t opcode, uint32_t *data)
457 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
458 int retval;
459 uint32_t dscr = DSCR_INSTR_COMP;
461 /* the opcode, writing data to DCC */
462 retval = cortex_a8_exec_opcode(
463 a8->armv7a_common.armv4_5_common.target,
464 opcode,
465 &dscr);
467 return cortex_a8_read_dcc(a8, data, &dscr);
471 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
472 uint32_t opcode, uint32_t *data)
474 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
475 uint32_t dscr = DSCR_INSTR_COMP;
476 int retval;
478 /* the opcode, writing data to R0 */
479 retval = cortex_a8_exec_opcode(
480 a8->armv7a_common.armv4_5_common.target,
481 opcode,
482 &dscr);
484 /* write R0 to DCC */
485 retval = cortex_a8_exec_opcode(
486 a8->armv7a_common.armv4_5_common.target,
487 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
488 &dscr);
490 return cortex_a8_read_dcc(a8, data, &dscr);
493 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
494 uint32_t addr, uint32_t control)
496 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
497 uint32_t vr = a8->armv7a_common.debug_base;
498 uint32_t cr = a8->armv7a_common.debug_base;
499 int retval;
501 switch (index_t) {
502 case 0 ... 15: /* breakpoints */
503 vr += CPUDBG_BVR_BASE;
504 cr += CPUDBG_BCR_BASE;
505 break;
506 case 16 ... 31: /* watchpoints */
507 vr += CPUDBG_WVR_BASE;
508 cr += CPUDBG_WCR_BASE;
509 index_t -= 16;
510 break;
511 default:
512 return ERROR_FAIL;
514 vr += 4 * index_t;
515 cr += 4 * index_t;
517 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
518 (unsigned) vr, (unsigned) cr);
520 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
521 vr, addr);
522 if (retval != ERROR_OK)
523 return retval;
524 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
525 cr, control);
526 return retval;
529 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
531 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
532 uint32_t cr;
534 switch (index_t) {
535 case 0 ... 15:
536 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
537 break;
538 case 16 ... 31:
539 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
540 index_t -= 16;
541 break;
542 default:
543 return ERROR_FAIL;
545 cr += 4 * index_t;
547 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
549 /* clear control register */
550 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
553 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
555 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
556 int retval;
558 dpm->arm = &a8->armv7a_common.armv4_5_common;
559 dpm->didr = didr;
561 dpm->prepare = cortex_a8_dpm_prepare;
562 dpm->finish = cortex_a8_dpm_finish;
564 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
565 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
566 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
568 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
569 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
571 dpm->bpwp_enable = cortex_a8_bpwp_enable;
572 dpm->bpwp_disable = cortex_a8_bpwp_disable;
574 retval = arm_dpm_setup(dpm);
575 if (retval == ERROR_OK)
576 retval = arm_dpm_initialize(dpm);
578 return retval;
583 * Cortex-A8 Run control
586 static int cortex_a8_poll(struct target *target)
588 int retval = ERROR_OK;
589 uint32_t dscr;
590 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
591 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
592 struct adiv5_dap *swjdp = &armv7a->dap;
593 enum target_state prev_target_state = target->state;
594 uint8_t saved_apsel = dap_ap_get_select(swjdp);
596 dap_ap_select(swjdp, swjdp_debugap);
597 retval = mem_ap_read_atomic_u32(swjdp,
598 armv7a->debug_base + CPUDBG_DSCR, &dscr);
599 if (retval != ERROR_OK)
601 dap_ap_select(swjdp, saved_apsel);
602 return retval;
604 cortex_a8->cpudbg_dscr = dscr;
606 if ((dscr & 0x3) == 0x3)
608 if (prev_target_state != TARGET_HALTED)
610 /* We have a halting debug event */
611 LOG_DEBUG("Target halted");
612 target->state = TARGET_HALTED;
613 if ((prev_target_state == TARGET_RUNNING)
614 || (prev_target_state == TARGET_RESET))
616 retval = cortex_a8_debug_entry(target);
617 if (retval != ERROR_OK)
618 return retval;
620 target_call_event_callbacks(target,
621 TARGET_EVENT_HALTED);
623 if (prev_target_state == TARGET_DEBUG_RUNNING)
625 LOG_DEBUG(" ");
627 retval = cortex_a8_debug_entry(target);
628 if (retval != ERROR_OK)
629 return retval;
631 target_call_event_callbacks(target,
632 TARGET_EVENT_DEBUG_HALTED);
636 else if ((dscr & 0x3) == 0x2)
638 target->state = TARGET_RUNNING;
640 else
642 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
643 target->state = TARGET_UNKNOWN;
646 dap_ap_select(swjdp, saved_apsel);
648 return retval;
651 static int cortex_a8_halt(struct target *target)
653 int retval = ERROR_OK;
654 uint32_t dscr;
655 struct armv7a_common *armv7a = target_to_armv7a(target);
656 struct adiv5_dap *swjdp = &armv7a->dap;
657 uint8_t saved_apsel = dap_ap_get_select(swjdp);
658 dap_ap_select(swjdp, swjdp_debugap);
661 * Tell the core to be halted by writing DRCR with 0x1
662 * and then wait for the core to be halted.
664 retval = mem_ap_write_atomic_u32(swjdp,
665 armv7a->debug_base + CPUDBG_DRCR, 0x1);
666 if (retval != ERROR_OK)
667 goto out;
670 * enter halting debug mode
672 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DSCR, &dscr);
673 if (retval != ERROR_OK)
674 goto out;
676 retval = mem_ap_write_atomic_u32(swjdp,
677 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
678 if (retval != ERROR_OK)
679 goto out;
681 long long then = timeval_ms();
682 for (;;)
684 retval = mem_ap_read_atomic_u32(swjdp,
685 armv7a->debug_base + CPUDBG_DSCR, &dscr);
686 if (retval != ERROR_OK)
687 goto out;
688 if ((dscr & DSCR_CORE_HALTED) != 0)
690 break;
692 if (timeval_ms() > then + 1000)
694 LOG_ERROR("Timeout waiting for halt");
695 return ERROR_FAIL;
699 target->debug_reason = DBG_REASON_DBGRQ;
701 out:
702 dap_ap_select(swjdp, saved_apsel);
703 return retval;
706 static int cortex_a8_resume(struct target *target, int current,
707 uint32_t address, int handle_breakpoints, int debug_execution)
709 struct armv7a_common *armv7a = target_to_armv7a(target);
710 struct arm *armv4_5 = &armv7a->armv4_5_common;
711 struct adiv5_dap *swjdp = &armv7a->dap;
712 int retval;
714 // struct breakpoint *breakpoint = NULL;
715 uint32_t resume_pc, dscr;
717 uint8_t saved_apsel = dap_ap_get_select(swjdp);
718 dap_ap_select(swjdp, swjdp_debugap);
720 if (!debug_execution)
721 target_free_all_working_areas(target);
723 #if 0
724 if (debug_execution)
726 /* Disable interrupts */
727 /* We disable interrupts in the PRIMASK register instead of
728 * masking with C_MASKINTS,
729 * This is probably the same issue as Cortex-M3 Errata 377493:
730 * C_MASKINTS in parallel with disabled interrupts can cause
731 * local faults to not be taken. */
732 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
733 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
734 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
736 /* Make sure we are in Thumb mode */
737 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
738 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
739 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
740 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
742 #endif
744 /* current = 1: continue on current pc, otherwise continue at <address> */
745 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
746 if (!current)
747 resume_pc = address;
749 /* Make sure that the Armv7 gdb thumb fixups does not
750 * kill the return address
752 switch (armv4_5->core_state)
754 case ARM_STATE_ARM:
755 resume_pc &= 0xFFFFFFFC;
756 break;
757 case ARM_STATE_THUMB:
758 case ARM_STATE_THUMB_EE:
759 /* When the return address is loaded into PC
760 * bit 0 must be 1 to stay in Thumb state
762 resume_pc |= 0x1;
763 break;
764 case ARM_STATE_JAZELLE:
765 LOG_ERROR("How do I resume into Jazelle state??");
766 return ERROR_FAIL;
768 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
769 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
770 armv4_5->pc->dirty = 1;
771 armv4_5->pc->valid = 1;
773 cortex_a8_restore_context(target, handle_breakpoints);
775 #if 0
776 /* the front-end may request us not to handle breakpoints */
777 if (handle_breakpoints)
779 /* Single step past breakpoint at current address */
780 if ((breakpoint = breakpoint_find(target, resume_pc)))
782 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
783 cortex_m3_unset_breakpoint(target, breakpoint);
784 cortex_m3_single_step_core(target);
785 cortex_m3_set_breakpoint(target, breakpoint);
789 #endif
790 /* Restart core and wait for it to be started
791 * NOTE: this clears DSCR_ITR_EN and other bits.
793 * REVISIT: for single stepping, we probably want to
794 * disable IRQs by default, with optional override...
796 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR, 0x2);
797 if (retval != ERROR_OK)
798 return retval;
800 long long then = timeval_ms();
801 for (;;)
803 retval = mem_ap_read_atomic_u32(swjdp,
804 armv7a->debug_base + CPUDBG_DSCR, &dscr);
805 if (retval != ERROR_OK)
806 return retval;
807 if ((dscr & DSCR_CORE_RESTARTED) != 0)
808 break;
809 if (timeval_ms() > then + 1000)
811 LOG_ERROR("Timeout waiting for resume");
812 return ERROR_FAIL;
816 target->debug_reason = DBG_REASON_NOTHALTED;
817 target->state = TARGET_RUNNING;
819 /* registers are now invalid */
820 register_cache_invalidate(armv4_5->core_cache);
822 if (!debug_execution)
824 target->state = TARGET_RUNNING;
825 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
826 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
828 else
830 target->state = TARGET_DEBUG_RUNNING;
831 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
832 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
835 dap_ap_select(swjdp, saved_apsel);
837 return ERROR_OK;
840 static int cortex_a8_debug_entry(struct target *target)
842 int i;
843 uint32_t regfile[16], cpsr, dscr;
844 int retval = ERROR_OK;
845 struct working_area *regfile_working_area = NULL;
846 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
847 struct armv7a_common *armv7a = target_to_armv7a(target);
848 struct arm *armv4_5 = &armv7a->armv4_5_common;
849 struct adiv5_dap *swjdp = &armv7a->dap;
850 struct reg *reg;
852 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
854 /* REVISIT surely we should not re-read DSCR !! */
855 retval = mem_ap_read_atomic_u32(swjdp,
856 armv7a->debug_base + CPUDBG_DSCR, &dscr);
857 if (retval != ERROR_OK)
858 return retval;
860 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
861 * imprecise data aborts get discarded by issuing a Data
862 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
865 /* Enable the ITR execution once we are in debug mode */
866 dscr |= DSCR_ITR_EN;
867 retval = mem_ap_write_atomic_u32(swjdp,
868 armv7a->debug_base + CPUDBG_DSCR, dscr);
869 if (retval != ERROR_OK)
870 return retval;
872 /* Examine debug reason */
873 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
875 /* save address of instruction that triggered the watchpoint? */
876 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
877 uint32_t wfar;
879 retval = mem_ap_read_atomic_u32(swjdp,
880 armv7a->debug_base + CPUDBG_WFAR,
881 &wfar);
882 if (retval != ERROR_OK)
883 return retval;
884 arm_dpm_report_wfar(&armv7a->dpm, wfar);
887 /* REVISIT fast_reg_read is never set ... */
889 /* Examine target state and mode */
890 if (cortex_a8->fast_reg_read)
891 target_alloc_working_area(target, 64, &regfile_working_area);
893 /* First load register acessible through core debug port*/
894 if (!regfile_working_area)
896 retval = arm_dpm_read_current_registers(&armv7a->dpm);
898 else
900 dap_ap_select(swjdp, swjdp_memoryap);
901 cortex_a8_read_regs_through_mem(target,
902 regfile_working_area->address, regfile);
903 dap_ap_select(swjdp, swjdp_memoryap);
904 target_free_working_area(target, regfile_working_area);
906 /* read Current PSR */
907 cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
908 dap_ap_select(swjdp, swjdp_debugap);
909 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
911 arm_set_cpsr(armv4_5, cpsr);
913 /* update cache */
914 for (i = 0; i <= ARM_PC; i++)
916 reg = arm_reg_current(armv4_5, i);
918 buf_set_u32(reg->value, 0, 32, regfile[i]);
919 reg->valid = 1;
920 reg->dirty = 0;
923 /* Fixup PC Resume Address */
924 if (cpsr & (1 << 5))
926 // T bit set for Thumb or ThumbEE state
927 regfile[ARM_PC] -= 4;
929 else
931 // ARM state
932 regfile[ARM_PC] -= 8;
935 reg = armv4_5->pc;
936 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
937 reg->dirty = reg->valid;
940 #if 0
941 /* TODO, Move this */
942 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
943 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
944 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
946 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
947 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
949 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
950 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
951 #endif
953 /* Are we in an exception handler */
954 // armv4_5->exception_number = 0;
955 if (armv7a->post_debug_entry)
956 armv7a->post_debug_entry(target);
958 return retval;
961 static void cortex_a8_post_debug_entry(struct target *target)
963 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
964 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
965 int retval;
967 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
968 retval = armv7a->armv4_5_common.mrc(target, 15,
969 0, 0, /* op1, op2 */
970 1, 0, /* CRn, CRm */
971 &cortex_a8->cp15_control_reg);
972 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
974 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
976 uint32_t cache_type_reg;
978 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
979 retval = armv7a->armv4_5_common.mrc(target, 15,
980 0, 1, /* op1, op2 */
981 0, 0, /* CRn, CRm */
982 &cache_type_reg);
983 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
985 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
986 armv4_5_identify_cache(cache_type_reg,
987 &armv7a->armv4_5_mmu.armv4_5_cache);
990 armv7a->armv4_5_mmu.mmu_enabled =
991 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
992 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
993 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
994 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
995 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1000 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1001 int handle_breakpoints)
1003 struct armv7a_common *armv7a = target_to_armv7a(target);
1004 struct arm *armv4_5 = &armv7a->armv4_5_common;
1005 struct breakpoint *breakpoint = NULL;
1006 struct breakpoint stepbreakpoint;
1007 struct reg *r;
1009 int timeout = 100;
1011 if (target->state != TARGET_HALTED)
1013 LOG_WARNING("target not halted");
1014 return ERROR_TARGET_NOT_HALTED;
1017 /* current = 1: continue on current pc, otherwise continue at <address> */
1018 r = armv4_5->pc;
1019 if (!current)
1021 buf_set_u32(r->value, 0, 32, address);
1023 else
1025 address = buf_get_u32(r->value, 0, 32);
1028 /* The front-end may request us not to handle breakpoints.
1029 * But since Cortex-A8 uses breakpoint for single step,
1030 * we MUST handle breakpoints.
1032 handle_breakpoints = 1;
1033 if (handle_breakpoints) {
1034 breakpoint = breakpoint_find(target, address);
1035 if (breakpoint)
1036 cortex_a8_unset_breakpoint(target, breakpoint);
1039 /* Setup single step breakpoint */
1040 stepbreakpoint.address = address;
1041 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1042 ? 2 : 4;
1043 stepbreakpoint.type = BKPT_HARD;
1044 stepbreakpoint.set = 0;
1046 /* Break on IVA mismatch */
1047 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1049 target->debug_reason = DBG_REASON_SINGLESTEP;
1051 cortex_a8_resume(target, 1, address, 0, 0);
1053 while (target->state != TARGET_HALTED)
1055 cortex_a8_poll(target);
1056 if (--timeout == 0)
1058 LOG_WARNING("timeout waiting for target halt");
1059 break;
1063 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1064 if (timeout > 0)
1065 target->debug_reason = DBG_REASON_BREAKPOINT;
1067 if (breakpoint)
1068 cortex_a8_set_breakpoint(target, breakpoint, 0);
1070 if (target->state != TARGET_HALTED)
1071 LOG_DEBUG("target stepped");
1073 return ERROR_OK;
1076 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1078 struct armv7a_common *armv7a = target_to_armv7a(target);
1080 LOG_DEBUG(" ");
1082 if (armv7a->pre_restore_context)
1083 armv7a->pre_restore_context(target);
1085 arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1087 return ERROR_OK;
1092 * Cortex-A8 Breakpoint and watchpoint fuctions
1095 /* Setup hardware Breakpoint Register Pair */
1096 static int cortex_a8_set_breakpoint(struct target *target,
1097 struct breakpoint *breakpoint, uint8_t matchmode)
1099 int retval;
1100 int brp_i=0;
1101 uint32_t control;
1102 uint8_t byte_addr_select = 0x0F;
1103 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1104 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1105 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1107 if (breakpoint->set)
1109 LOG_WARNING("breakpoint already set");
1110 return ERROR_OK;
1113 if (breakpoint->type == BKPT_HARD)
1115 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1116 brp_i++ ;
1117 if (brp_i >= cortex_a8->brp_num)
1119 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1120 return ERROR_FAIL;
1122 breakpoint->set = brp_i + 1;
1123 if (breakpoint->length == 2)
1125 byte_addr_select = (3 << (breakpoint->address & 0x02));
1127 control = ((matchmode & 0x7) << 20)
1128 | (byte_addr_select << 5)
1129 | (3 << 1) | 1;
1130 brp_list[brp_i].used = 1;
1131 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1132 brp_list[brp_i].control = control;
1133 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1134 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1135 brp_list[brp_i].value);
1136 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1137 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1138 brp_list[brp_i].control);
1139 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1140 brp_list[brp_i].control,
1141 brp_list[brp_i].value);
1143 else if (breakpoint->type == BKPT_SOFT)
1145 uint8_t code[4];
1146 if (breakpoint->length == 2)
1148 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1150 else
1152 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1154 retval = target->type->read_memory(target,
1155 breakpoint->address & 0xFFFFFFFE,
1156 breakpoint->length, 1,
1157 breakpoint->orig_instr);
1158 if (retval != ERROR_OK)
1159 return retval;
1160 retval = target->type->write_memory(target,
1161 breakpoint->address & 0xFFFFFFFE,
1162 breakpoint->length, 1, code);
1163 if (retval != ERROR_OK)
1164 return retval;
1165 breakpoint->set = 0x11; /* Any nice value but 0 */
1168 return ERROR_OK;
1171 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1173 int retval;
1174 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1175 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1176 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1178 if (!breakpoint->set)
1180 LOG_WARNING("breakpoint not set");
1181 return ERROR_OK;
1184 if (breakpoint->type == BKPT_HARD)
1186 int brp_i = breakpoint->set - 1;
1187 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1189 LOG_DEBUG("Invalid BRP number in breakpoint");
1190 return ERROR_OK;
1192 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1193 brp_list[brp_i].control, brp_list[brp_i].value);
1194 brp_list[brp_i].used = 0;
1195 brp_list[brp_i].value = 0;
1196 brp_list[brp_i].control = 0;
1197 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1198 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1199 brp_list[brp_i].control);
1200 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1201 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1202 brp_list[brp_i].value);
1204 else
1206 /* restore original instruction (kept in target endianness) */
1207 if (breakpoint->length == 4)
1209 retval = target->type->write_memory(target,
1210 breakpoint->address & 0xFFFFFFFE,
1211 4, 1, breakpoint->orig_instr);
1212 if (retval != ERROR_OK)
1213 return retval;
1215 else
1217 retval = target->type->write_memory(target,
1218 breakpoint->address & 0xFFFFFFFE,
1219 2, 1, breakpoint->orig_instr);
1220 if (retval != ERROR_OK)
1221 return retval;
1224 breakpoint->set = 0;
1226 return ERROR_OK;
1229 static int cortex_a8_add_breakpoint(struct target *target,
1230 struct breakpoint *breakpoint)
1232 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1234 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1236 LOG_INFO("no hardware breakpoint available");
1237 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1240 if (breakpoint->type == BKPT_HARD)
1241 cortex_a8->brp_num_available--;
1242 cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1244 return ERROR_OK;
1247 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1249 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1251 #if 0
1252 /* It is perfectly possible to remove brakpoints while the taget is running */
1253 if (target->state != TARGET_HALTED)
1255 LOG_WARNING("target not halted");
1256 return ERROR_TARGET_NOT_HALTED;
1258 #endif
1260 if (breakpoint->set)
1262 cortex_a8_unset_breakpoint(target, breakpoint);
1263 if (breakpoint->type == BKPT_HARD)
1264 cortex_a8->brp_num_available++ ;
1268 return ERROR_OK;
1274 * Cortex-A8 Reset fuctions
1277 static int cortex_a8_assert_reset(struct target *target)
1279 struct armv7a_common *armv7a = target_to_armv7a(target);
1281 LOG_DEBUG(" ");
1283 /* FIXME when halt is requested, make it work somehow... */
1285 /* Issue some kind of warm reset. */
1286 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1287 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1288 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1289 /* REVISIT handle "pulls" cases, if there's
1290 * hardware that needs them to work.
1292 jtag_add_reset(0, 1);
1293 } else {
1294 LOG_ERROR("%s: how to reset?", target_name(target));
1295 return ERROR_FAIL;
1298 /* registers are now invalid */
1299 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1301 target->state = TARGET_RESET;
1303 return ERROR_OK;
1306 static int cortex_a8_deassert_reset(struct target *target)
1308 int retval;
1310 LOG_DEBUG(" ");
1312 /* be certain SRST is off */
1313 jtag_add_reset(0, 0);
1315 retval = cortex_a8_poll(target);
1317 if (target->reset_halt) {
1318 if (target->state != TARGET_HALTED) {
1319 LOG_WARNING("%s: ran after reset and before halt ...",
1320 target_name(target));
1321 if ((retval = target_halt(target)) != ERROR_OK)
1322 return retval;
1326 return ERROR_OK;
1330 * Cortex-A8 Memory access
1332 * This is same Cortex M3 but we must also use the correct
1333 * ap number for every access.
1336 static int cortex_a8_read_phys_memory(struct target *target,
1337 uint32_t address, uint32_t size,
1338 uint32_t count, uint8_t *buffer)
1340 struct armv7a_common *armv7a = target_to_armv7a(target);
1341 struct adiv5_dap *swjdp = &armv7a->dap;
1342 int retval = ERROR_INVALID_ARGUMENTS;
1344 /* cortex_a8 handles unaligned memory access */
1346 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1347 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address, size, count);
1348 if (count && buffer) {
1349 switch (size) {
1350 case 4:
1351 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1352 break;
1353 case 2:
1354 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1355 break;
1356 case 1:
1357 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1358 break;
1362 return retval;
1365 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1366 uint32_t size, uint32_t count, uint8_t *buffer)
1368 int enabled = 0;
1369 uint32_t virt, phys;
1371 /* cortex_a8 handles unaligned memory access */
1373 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1374 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address, size, count);
1375 cortex_a8_mmu(target, &enabled);
1376 if(enabled)
1378 virt = address;
1379 cortex_a8_virt2phys(target, virt, &phys);
1380 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1381 address = phys;
1384 return cortex_a8_read_phys_memory(target, address, size, count, buffer);
1387 static int cortex_a8_write_phys_memory(struct target *target,
1388 uint32_t address, uint32_t size,
1389 uint32_t count, uint8_t *buffer)
1391 struct armv7a_common *armv7a = target_to_armv7a(target);
1392 struct adiv5_dap *swjdp = &armv7a->dap;
1393 int retval = ERROR_INVALID_ARGUMENTS;
1395 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1397 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address, size, count);
1398 if (count && buffer) {
1399 switch (size) {
1400 case 4:
1401 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1402 break;
1403 case 2:
1404 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1405 break;
1406 case 1:
1407 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1408 break;
1412 /* REVISIT this op is generic ARMv7-A/R stuff */
1413 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1415 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1417 retval = dpm->prepare(dpm);
1418 if (retval != ERROR_OK)
1419 return retval;
1421 /* The Cache handling will NOT work with MMU active, the
1422 * wrong addresses will be invalidated!
1424 * For both ICache and DCache, walk all cache lines in the
1425 * address range. Cortex-A8 has fixed 64 byte line length.
1427 * REVISIT per ARMv7, these may trigger watchpoints ...
1430 /* invalidate I-Cache */
1431 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1433 /* ICIMVAU - Invalidate Cache single entry
1434 * with MVA to PoU
1435 * MCR p15, 0, r0, c7, c5, 1
1437 for (uint32_t cacheline = address;
1438 cacheline < address + size * count;
1439 cacheline += 64) {
1440 retval = dpm->instr_write_data_r0(dpm,
1441 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1442 cacheline);
1446 /* invalidate D-Cache */
1447 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1449 /* DCIMVAC - Invalidate data Cache line
1450 * with MVA to PoC
1451 * MCR p15, 0, r0, c7, c6, 1
1453 for (uint32_t cacheline = address;
1454 cacheline < address + size * count;
1455 cacheline += 64) {
1456 retval = dpm->instr_write_data_r0(dpm,
1457 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1458 cacheline);
1462 /* (void) */ dpm->finish(dpm);
1465 return retval;
1468 static int cortex_a8_write_memory(struct target *target, uint32_t address,
1469 uint32_t size, uint32_t count, uint8_t *buffer)
1471 int enabled = 0;
1472 uint32_t virt, phys;
1474 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1476 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1477 cortex_a8_mmu(target, &enabled);
1478 if(enabled)
1480 virt = address;
1481 cortex_a8_virt2phys(target, virt, &phys);
1482 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1483 address = phys;
1486 return cortex_a8_write_phys_memory(target, address, size,
1487 count, buffer);
1490 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
1491 uint32_t count, uint8_t *buffer)
1493 return cortex_a8_write_memory(target, address, 4, count, buffer);
1497 static int cortex_a8_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1499 #if 0
1500 u16 dcrdr;
1502 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1503 *ctrl = (uint8_t)dcrdr;
1504 *value = (uint8_t)(dcrdr >> 8);
1506 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1508 /* write ack back to software dcc register
1509 * signify we have read data */
1510 if (dcrdr & (1 << 0))
1512 dcrdr = 0;
1513 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1515 #endif
1516 return ERROR_OK;
1520 static int cortex_a8_handle_target_request(void *priv)
1522 struct target *target = priv;
1523 struct armv7a_common *armv7a = target_to_armv7a(target);
1524 struct adiv5_dap *swjdp = &armv7a->dap;
1526 if (!target_was_examined(target))
1527 return ERROR_OK;
1528 if (!target->dbg_msg_enabled)
1529 return ERROR_OK;
1531 if (target->state == TARGET_RUNNING)
1533 uint8_t data = 0;
1534 uint8_t ctrl = 0;
1536 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1538 /* check if we have data */
1539 if (ctrl & (1 << 0))
1541 uint32_t request;
1543 /* we assume target is quick enough */
1544 request = data;
1545 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1546 request |= (data << 8);
1547 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1548 request |= (data << 16);
1549 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1550 request |= (data << 24);
1551 target_request(target, request);
1555 return ERROR_OK;
1559 * Cortex-A8 target information and configuration
1562 static int cortex_a8_examine_first(struct target *target)
1564 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1565 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1566 struct adiv5_dap *swjdp = &armv7a->dap;
1567 int i;
1568 int retval = ERROR_OK;
1569 uint32_t didr, ctypr, ttypr, cpuid;
1571 /* stop assuming this is an OMAP! */
1572 LOG_DEBUG("TODO - autoconfigure");
1574 /* Here we shall insert a proper ROM Table scan */
1575 armv7a->debug_base = OMAP3530_DEBUG_BASE;
1577 /* We do one extra read to ensure DAP is configured,
1578 * we call ahbap_debugport_init(swjdp) instead
1580 retval = ahbap_debugport_init(swjdp);
1581 if (retval != ERROR_OK)
1582 return retval;
1584 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1585 if (retval != ERROR_OK)
1586 return retval;
1588 if ((retval = mem_ap_read_atomic_u32(swjdp,
1589 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1591 LOG_DEBUG("Examine %s failed", "CPUID");
1592 return retval;
1595 if ((retval = mem_ap_read_atomic_u32(swjdp,
1596 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1598 LOG_DEBUG("Examine %s failed", "CTYPR");
1599 return retval;
1602 if ((retval = mem_ap_read_atomic_u32(swjdp,
1603 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1605 LOG_DEBUG("Examine %s failed", "TTYPR");
1606 return retval;
1609 if ((retval = mem_ap_read_atomic_u32(swjdp,
1610 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1612 LOG_DEBUG("Examine %s failed", "DIDR");
1613 return retval;
1616 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1617 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1618 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1619 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1621 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1622 retval = cortex_a8_dpm_setup(cortex_a8, didr);
1623 if (retval != ERROR_OK)
1624 return retval;
1626 /* Setup Breakpoint Register Pairs */
1627 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
1628 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1629 cortex_a8->brp_num_available = cortex_a8->brp_num;
1630 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
1631 // cortex_a8->brb_enabled = ????;
1632 for (i = 0; i < cortex_a8->brp_num; i++)
1634 cortex_a8->brp_list[i].used = 0;
1635 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
1636 cortex_a8->brp_list[i].type = BRP_NORMAL;
1637 else
1638 cortex_a8->brp_list[i].type = BRP_CONTEXT;
1639 cortex_a8->brp_list[i].value = 0;
1640 cortex_a8->brp_list[i].control = 0;
1641 cortex_a8->brp_list[i].BRPn = i;
1644 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
1646 target_set_examined(target);
1647 return ERROR_OK;
1650 static int cortex_a8_examine(struct target *target)
1652 int retval = ERROR_OK;
1654 /* don't re-probe hardware after each reset */
1655 if (!target_was_examined(target))
1656 retval = cortex_a8_examine_first(target);
1658 /* Configure core debug access */
1659 if (retval == ERROR_OK)
1660 retval = cortex_a8_init_debug_access(target);
1662 return retval;
1666 * Cortex-A8 target creation and initialization
1669 static int cortex_a8_init_target(struct command_context *cmd_ctx,
1670 struct target *target)
1672 /* examine_first() does a bunch of this */
1673 return ERROR_OK;
1676 static int cortex_a8_init_arch_info(struct target *target,
1677 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
1679 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1680 struct arm *armv4_5 = &armv7a->armv4_5_common;
1681 struct adiv5_dap *dap = &armv7a->dap;
1683 armv7a->armv4_5_common.dap = dap;
1685 /* Setup struct cortex_a8_common */
1686 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
1687 armv4_5->arch_info = armv7a;
1689 /* prepare JTAG information for the new target */
1690 cortex_a8->jtag_info.tap = tap;
1691 cortex_a8->jtag_info.scann_size = 4;
1693 /* Leave (only) generic DAP stuff for debugport_init() */
1694 dap->jtag_info = &cortex_a8->jtag_info;
1695 dap->memaccess_tck = 80;
1697 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1698 dap->tar_autoincr_block = (1 << 10);
1700 cortex_a8->fast_reg_read = 0;
1702 /* Set default value */
1703 cortex_a8->current_address_mode = ARM_MODE_ANY;
1705 /* register arch-specific functions */
1706 armv7a->examine_debug_reason = NULL;
1708 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
1710 armv7a->pre_restore_context = NULL;
1711 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
1712 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
1713 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
1714 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
1715 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
1716 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
1717 armv7a->armv4_5_mmu.has_tiny_pages = 1;
1718 armv7a->armv4_5_mmu.mmu_enabled = 0;
1721 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
1723 /* REVISIT v7a setup should be in a v7a-specific routine */
1724 arm_init_arch_info(target, armv4_5);
1725 armv7a->common_magic = ARMV7_COMMON_MAGIC;
1727 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
1729 return ERROR_OK;
1732 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
1734 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
1736 cortex_a8_init_arch_info(target, cortex_a8, target->tap);
1738 return ERROR_OK;
1741 static uint32_t cortex_a8_get_ttb(struct target *target)
1743 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1744 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1745 uint32_t ttb = 0, retval = ERROR_OK;
1747 /* current_address_mode is set inside cortex_a8_virt2phys()
1748 where we can determine if address belongs to user or kernel */
1749 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
1751 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1752 retval = armv7a->armv4_5_common.mrc(target, 15,
1753 0, 1, /* op1, op2 */
1754 2, 0, /* CRn, CRm */
1755 &ttb);
1757 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
1759 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1760 retval = armv7a->armv4_5_common.mrc(target, 15,
1761 0, 0, /* op1, op2 */
1762 2, 0, /* CRn, CRm */
1763 &ttb);
1765 /* we don't know whose address is: user or kernel
1766 we assume that if we are in kernel mode then
1767 address belongs to kernel else if in user mode
1768 - to user */
1769 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
1771 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1772 retval = armv7a->armv4_5_common.mrc(target, 15,
1773 0, 1, /* op1, op2 */
1774 2, 0, /* CRn, CRm */
1775 &ttb);
1777 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
1779 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1780 retval = armv7a->armv4_5_common.mrc(target, 15,
1781 0, 0, /* op1, op2 */
1782 2, 0, /* CRn, CRm */
1783 &ttb);
1785 /* finaly we don't know whose ttb to use: user or kernel */
1786 else
1787 LOG_ERROR("Don't know how to get ttb for current mode!!!");
1789 ttb &= 0xffffc000;
1791 return ttb;
1794 static void cortex_a8_disable_mmu_caches(struct target *target, int mmu,
1795 int d_u_cache, int i_cache)
1797 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1798 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1799 uint32_t cp15_control;
1801 /* read cp15 control register */
1802 armv7a->armv4_5_common.mrc(target, 15,
1803 0, 0, /* op1, op2 */
1804 1, 0, /* CRn, CRm */
1805 &cp15_control);
1808 if (mmu)
1809 cp15_control &= ~0x1U;
1811 if (d_u_cache)
1812 cp15_control &= ~0x4U;
1814 if (i_cache)
1815 cp15_control &= ~0x1000U;
1817 armv7a->armv4_5_common.mcr(target, 15,
1818 0, 0, /* op1, op2 */
1819 1, 0, /* CRn, CRm */
1820 cp15_control);
1823 static void cortex_a8_enable_mmu_caches(struct target *target, int mmu,
1824 int d_u_cache, int i_cache)
1826 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1827 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1828 uint32_t cp15_control;
1830 /* read cp15 control register */
1831 armv7a->armv4_5_common.mrc(target, 15,
1832 0, 0, /* op1, op2 */
1833 1, 0, /* CRn, CRm */
1834 &cp15_control);
1836 if (mmu)
1837 cp15_control |= 0x1U;
1839 if (d_u_cache)
1840 cp15_control |= 0x4U;
1842 if (i_cache)
1843 cp15_control |= 0x1000U;
1845 armv7a->armv4_5_common.mcr(target, 15,
1846 0, 0, /* op1, op2 */
1847 1, 0, /* CRn, CRm */
1848 cp15_control);
1852 static int cortex_a8_mmu(struct target *target, int *enabled)
1854 if (target->state != TARGET_HALTED) {
1855 LOG_ERROR("%s: target not halted", __func__);
1856 return ERROR_TARGET_INVALID;
1859 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
1860 return ERROR_OK;
1863 static int cortex_a8_virt2phys(struct target *target,
1864 uint32_t virt, uint32_t *phys)
1866 uint32_t cb;
1867 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1868 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1869 struct armv7a_common *armv7a = target_to_armv7a(target);
1871 /* We assume that virtual address is separated
1872 between user and kernel in Linux style:
1873 0x00000000-0xbfffffff - User space
1874 0xc0000000-0xffffffff - Kernel space */
1875 if( virt < 0xc0000000 ) /* Linux user space */
1876 cortex_a8->current_address_mode = ARM_MODE_USR;
1877 else /* Linux kernel */
1878 cortex_a8->current_address_mode = ARM_MODE_SVC;
1879 uint32_t ret;
1880 int retval = armv4_5_mmu_translate_va(target,
1881 &armv7a->armv4_5_mmu, virt, &cb, &ret);
1882 if (retval != ERROR_OK)
1883 return retval;
1884 /* Reset the flag. We don't want someone else to use it by error */
1885 cortex_a8->current_address_mode = ARM_MODE_ANY;
1887 *phys = ret;
1888 return ERROR_OK;
1891 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
1893 struct target *target = get_current_target(CMD_CTX);
1894 struct armv7a_common *armv7a = target_to_armv7a(target);
1896 return armv4_5_handle_cache_info_command(CMD_CTX,
1897 &armv7a->armv4_5_mmu.armv4_5_cache);
1901 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
1903 struct target *target = get_current_target(CMD_CTX);
1904 if (!target_was_examined(target))
1906 LOG_ERROR("target not examined yet");
1907 return ERROR_FAIL;
1910 return cortex_a8_init_debug_access(target);
1913 static const struct command_registration cortex_a8_exec_command_handlers[] = {
1915 .name = "cache_info",
1916 .handler = cortex_a8_handle_cache_info_command,
1917 .mode = COMMAND_EXEC,
1918 .help = "display information about target caches",
1921 .name = "dbginit",
1922 .handler = cortex_a8_handle_dbginit_command,
1923 .mode = COMMAND_EXEC,
1924 .help = "Initialize core debug",
1926 COMMAND_REGISTRATION_DONE
1928 static const struct command_registration cortex_a8_command_handlers[] = {
1930 .chain = arm_command_handlers,
1933 .chain = armv7a_command_handlers,
1936 .name = "cortex_a8",
1937 .mode = COMMAND_ANY,
1938 .help = "Cortex-A8 command group",
1939 .chain = cortex_a8_exec_command_handlers,
1941 COMMAND_REGISTRATION_DONE
1944 struct target_type cortexa8_target = {
1945 .name = "cortex_a8",
1947 .poll = cortex_a8_poll,
1948 .arch_state = armv7a_arch_state,
1950 .target_request_data = NULL,
1952 .halt = cortex_a8_halt,
1953 .resume = cortex_a8_resume,
1954 .step = cortex_a8_step,
1956 .assert_reset = cortex_a8_assert_reset,
1957 .deassert_reset = cortex_a8_deassert_reset,
1958 .soft_reset_halt = NULL,
1960 /* REVISIT allow exporting VFP3 registers ... */
1961 .get_gdb_reg_list = arm_get_gdb_reg_list,
1963 .read_memory = cortex_a8_read_memory,
1964 .write_memory = cortex_a8_write_memory,
1965 .bulk_write_memory = cortex_a8_bulk_write_memory,
1967 .checksum_memory = arm_checksum_memory,
1968 .blank_check_memory = arm_blank_check_memory,
1970 .run_algorithm = armv4_5_run_algorithm,
1972 .add_breakpoint = cortex_a8_add_breakpoint,
1973 .remove_breakpoint = cortex_a8_remove_breakpoint,
1974 .add_watchpoint = NULL,
1975 .remove_watchpoint = NULL,
1977 .commands = cortex_a8_command_handlers,
1978 .target_create = cortex_a8_target_create,
1979 .init_target = cortex_a8_init_target,
1980 .examine = cortex_a8_examine,
1982 .read_phys_memory = cortex_a8_read_phys_memory,
1983 .write_phys_memory = cortex_a8_write_phys_memory,
1984 .mmu = cortex_a8_mmu,
1985 .virt2phys = cortex_a8_virt2phys,