jlink: use tap buffer as 2k
[openocd/openocdswd.git] / src / target / cortex_a9.c
blobce72ecd8c1db00194c02511d7a75a7e31a501736
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
21 * *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
26 * *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
31 * *
32 * Cortex-A9(tm) TRM, ARM DDI 0407F *
33 * *
34 ***************************************************************************/
35 #ifdef HAVE_CONFIG_H
36 #include "config.h"
37 #endif
39 #include "breakpoints.h"
40 #include "cortex_a9.h"
41 #include "register.h"
42 #include "target_request.h"
43 #include "target_type.h"
44 #include "arm_opcodes.h"
45 #include <helper/time_support.h>
47 static int cortex_a9_poll(struct target *target);
48 static int cortex_a9_debug_entry(struct target *target);
49 static int cortex_a9_restore_context(struct target *target, bool bpwp);
50 static int cortex_a9_set_breakpoint(struct target *target,
51 struct breakpoint *breakpoint, uint8_t matchmode);
52 static int cortex_a9_unset_breakpoint(struct target *target,
53 struct breakpoint *breakpoint);
54 static int cortex_a9_dap_read_coreregister_u32(struct target *target,
55 uint32_t *value, int regnum);
56 static int cortex_a9_dap_write_coreregister_u32(struct target *target,
57 uint32_t value, int regnum);
58 static int cortex_a9_mmu(struct target *target, int *enabled);
59 static int cortex_a9_virt2phys(struct target *target,
60 uint32_t virt, uint32_t *phys);
61 static int cortex_a9_disable_mmu_caches(struct target *target, int mmu,
62 int d_u_cache, int i_cache);
63 static int cortex_a9_enable_mmu_caches(struct target *target, int mmu,
64 int d_u_cache, int i_cache);
65 static int cortex_a9_get_ttb(struct target *target, uint32_t *result);
69 * FIXME do topology discovery using the ROM; don't
70 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
71 * cores, with different AP numbering ... don't use a #define
72 * for these numbers, use per-core armv7a state.
74 #define swjdp_memoryap 0
75 #define swjdp_debugap 1
78 * Cortex-A9 Basic debug access, very low level assumes state is saved
80 static int cortex_a9_init_debug_access(struct target *target)
82 struct armv7a_common *armv7a = target_to_armv7a(target);
83 struct adiv5_dap *swjdp = &armv7a->dap;
84 int retval;
85 uint32_t dummy;
87 LOG_DEBUG(" ");
89 /* Unlocking the debug registers for modification */
90 /* The debugport might be uninitialised so try twice */
91 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
92 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
93 if (retval != ERROR_OK)
95 /* try again */
96 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
97 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
98 if (retval == ERROR_OK)
100 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
103 if (retval != ERROR_OK)
104 return retval;
105 /* Clear Sticky Power Down status Bit in PRSR to enable access to
106 the registers in the Core Power Domain */
107 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
108 armv7a->debug_base + CPUDBG_PRSR, &dummy);
109 if (retval != ERROR_OK)
110 return retval;
112 /* Enabling of instruction execution in debug mode is done in debug_entry code */
114 /* Resync breakpoint registers */
116 /* Since this is likely called from init or reset, update target state information*/
117 return cortex_a9_poll(target);
120 /* To reduce needless round-trips, pass in a pointer to the current
121 * DSCR value. Initialize it to zero if you just need to know the
122 * value on return from this function; or DSCR_INSTR_COMP if you
123 * happen to know that no instruction is pending.
125 static int cortex_a9_exec_opcode(struct target *target,
126 uint32_t opcode, uint32_t *dscr_p)
128 uint32_t dscr;
129 int retval;
130 struct armv7a_common *armv7a = target_to_armv7a(target);
131 struct adiv5_dap *swjdp = &armv7a->dap;
133 dscr = dscr_p ? *dscr_p : 0;
135 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
137 /* Wait for InstrCompl bit to be set */
138 long long then = timeval_ms();
139 while ((dscr & DSCR_INSTR_COMP) == 0)
141 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
142 armv7a->debug_base + CPUDBG_DSCR, &dscr);
143 if (retval != ERROR_OK)
145 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
146 return retval;
148 if (timeval_ms() > then + 1000)
150 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
151 return ERROR_FAIL;
155 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
156 armv7a->debug_base + CPUDBG_ITR, opcode);
157 if (retval != ERROR_OK)
158 return retval;
160 then = timeval_ms();
163 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
164 armv7a->debug_base + CPUDBG_DSCR, &dscr);
165 if (retval != ERROR_OK)
167 LOG_ERROR("Could not read DSCR register");
168 return retval;
170 if (timeval_ms() > then + 1000)
172 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
173 return ERROR_FAIL;
176 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
178 if (dscr_p)
179 *dscr_p = dscr;
181 return retval;
184 /**************************************************************************
185 Read core register with very few exec_opcode, fast but needs work_area.
186 This can cause problems with MMU active.
187 **************************************************************************/
188 static int cortex_a9_read_regs_through_mem(struct target *target, uint32_t address,
189 uint32_t * regfile)
191 int retval = ERROR_OK;
192 struct armv7a_common *armv7a = target_to_armv7a(target);
193 struct adiv5_dap *swjdp = &armv7a->dap;
195 retval = cortex_a9_dap_read_coreregister_u32(target, regfile, 0);
196 if (retval != ERROR_OK)
197 return retval;
198 retval = cortex_a9_dap_write_coreregister_u32(target, address, 0);
199 if (retval != ERROR_OK)
200 return retval;
201 retval = cortex_a9_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
202 if (retval != ERROR_OK)
203 return retval;
205 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
206 (uint8_t *)(&regfile[1]), 4*15, address);
208 return retval;
211 static int cortex_a9_dap_read_coreregister_u32(struct target *target,
212 uint32_t *value, int regnum)
214 int retval = ERROR_OK;
215 uint8_t reg = regnum&0xFF;
216 uint32_t dscr = 0;
217 struct armv7a_common *armv7a = target_to_armv7a(target);
218 struct adiv5_dap *swjdp = &armv7a->dap;
220 if (reg > 17)
221 return retval;
223 if (reg < 15)
225 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
226 retval = cortex_a9_exec_opcode(target,
227 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
228 &dscr);
229 if (retval != ERROR_OK)
230 return retval;
232 else if (reg == 15)
234 /* "MOV r0, r15"; then move r0 to DCCTX */
235 retval = cortex_a9_exec_opcode(target, 0xE1A0000F, &dscr);
236 if (retval != ERROR_OK)
237 return retval;
238 retval = cortex_a9_exec_opcode(target,
239 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
240 &dscr);
241 if (retval != ERROR_OK)
242 return retval;
244 else
246 /* "MRS r0, CPSR" or "MRS r0, SPSR"
247 * then move r0 to DCCTX
249 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
250 if (retval != ERROR_OK)
251 return retval;
252 retval = cortex_a9_exec_opcode(target,
253 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
254 &dscr);
255 if (retval != ERROR_OK)
256 return retval;
259 /* Wait for DTRRXfull then read DTRRTX */
260 long long then = timeval_ms();
261 while ((dscr & DSCR_DTR_TX_FULL) == 0)
263 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
264 armv7a->debug_base + CPUDBG_DSCR, &dscr);
265 if (retval != ERROR_OK)
266 return retval;
267 if (timeval_ms() > then + 1000)
269 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
270 return ERROR_FAIL;
274 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
275 armv7a->debug_base + CPUDBG_DTRTX, value);
276 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
278 return retval;
281 static int cortex_a9_dap_write_coreregister_u32(struct target *target,
282 uint32_t value, int regnum)
284 int retval = ERROR_OK;
285 uint8_t Rd = regnum&0xFF;
286 uint32_t dscr;
287 struct armv7a_common *armv7a = target_to_armv7a(target);
288 struct adiv5_dap *swjdp = &armv7a->dap;
290 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
292 /* Check that DCCRX is not full */
293 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
294 armv7a->debug_base + CPUDBG_DSCR, &dscr);
295 if (retval != ERROR_OK)
296 return retval;
297 if (dscr & DSCR_DTR_RX_FULL)
299 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
300 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
301 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
302 &dscr);
303 if (retval != ERROR_OK)
304 return retval;
307 if (Rd > 17)
308 return retval;
310 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
311 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
312 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
313 armv7a->debug_base + CPUDBG_DTRRX, value);
314 if (retval != ERROR_OK)
315 return retval;
317 if (Rd < 15)
319 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
320 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
321 &dscr);
322 if (retval != ERROR_OK)
323 return retval;
325 else if (Rd == 15)
327 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
328 * then "mov r15, r0"
330 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
331 &dscr);
332 if (retval != ERROR_OK)
333 return retval;
334 retval = cortex_a9_exec_opcode(target, 0xE1A0F000, &dscr);
335 if (retval != ERROR_OK)
336 return retval;
338 else
340 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
341 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
343 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
344 &dscr);
345 if (retval != ERROR_OK)
346 return retval;
347 retval = cortex_a9_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
348 &dscr);
349 if (retval != ERROR_OK)
350 return retval;
352 /* "Prefetch flush" after modifying execution status in CPSR */
353 if (Rd == 16)
355 retval = cortex_a9_exec_opcode(target,
356 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
357 &dscr);
358 if (retval != ERROR_OK)
359 return retval;
363 return retval;
366 /* Write to memory mapped registers directly with no cache or mmu handling */
367 static int cortex_a9_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
369 int retval;
370 struct armv7a_common *armv7a = target_to_armv7a(target);
371 struct adiv5_dap *swjdp = &armv7a->dap;
373 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap, address, value);
375 return retval;
379 * Cortex-A9 implementation of Debug Programmer's Model
381 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
382 * so there's no need to poll for it before executing an instruction.
384 * NOTE that in several of these cases the "stall" mode might be useful.
385 * It'd let us queue a few operations together... prepare/finish might
386 * be the places to enable/disable that mode.
389 static inline struct cortex_a9_common *dpm_to_a9(struct arm_dpm *dpm)
391 return container_of(dpm, struct cortex_a9_common, armv7a_common.dpm);
394 static int cortex_a9_write_dcc(struct cortex_a9_common *a9, uint32_t data)
396 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
397 return mem_ap_sel_write_u32(&a9->armv7a_common.dap, swjdp_debugap,
398 a9->armv7a_common.debug_base + CPUDBG_DTRRX, data);
401 static int cortex_a9_read_dcc(struct cortex_a9_common *a9, uint32_t *data,
402 uint32_t *dscr_p)
404 struct adiv5_dap *swjdp = &a9->armv7a_common.dap;
405 uint32_t dscr = DSCR_INSTR_COMP;
406 int retval;
408 if (dscr_p)
409 dscr = *dscr_p;
411 /* Wait for DTRRXfull */
412 long long then = timeval_ms();
413 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
414 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
415 a9->armv7a_common.debug_base + CPUDBG_DSCR,
416 &dscr);
417 if (retval != ERROR_OK)
418 return retval;
419 if (timeval_ms() > then + 1000)
421 LOG_ERROR("Timeout waiting for read dcc");
422 return ERROR_FAIL;
426 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
427 a9->armv7a_common.debug_base + CPUDBG_DTRTX, data);
428 if (retval != ERROR_OK)
429 return retval;
430 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
432 if (dscr_p)
433 *dscr_p = dscr;
435 return retval;
438 static int cortex_a9_dpm_prepare(struct arm_dpm *dpm)
440 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
441 struct adiv5_dap *swjdp = &a9->armv7a_common.dap;
442 uint32_t dscr;
443 int retval;
445 /* set up invariant: INSTR_COMP is set after ever DPM operation */
446 long long then = timeval_ms();
447 for (;;)
449 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
450 a9->armv7a_common.debug_base + CPUDBG_DSCR,
451 &dscr);
452 if (retval != ERROR_OK)
453 return retval;
454 if ((dscr & DSCR_INSTR_COMP) != 0)
455 break;
456 if (timeval_ms() > then + 1000)
458 LOG_ERROR("Timeout waiting for dpm prepare");
459 return ERROR_FAIL;
463 /* this "should never happen" ... */
464 if (dscr & DSCR_DTR_RX_FULL) {
465 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
466 /* Clear DCCRX */
467 retval = cortex_a9_exec_opcode(
468 a9->armv7a_common.armv4_5_common.target,
469 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
470 &dscr);
471 if (retval != ERROR_OK)
472 return retval;
475 return retval;
478 static int cortex_a9_dpm_finish(struct arm_dpm *dpm)
480 /* REVISIT what could be done here? */
481 return ERROR_OK;
484 static int cortex_a9_instr_write_data_dcc(struct arm_dpm *dpm,
485 uint32_t opcode, uint32_t data)
487 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
488 int retval;
489 uint32_t dscr = DSCR_INSTR_COMP;
491 retval = cortex_a9_write_dcc(a9, data);
492 if (retval != ERROR_OK)
493 return retval;
495 return cortex_a9_exec_opcode(
496 a9->armv7a_common.armv4_5_common.target,
497 opcode,
498 &dscr);
501 static int cortex_a9_instr_write_data_r0(struct arm_dpm *dpm,
502 uint32_t opcode, uint32_t data)
504 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
505 uint32_t dscr = DSCR_INSTR_COMP;
506 int retval;
508 retval = cortex_a9_write_dcc(a9, data);
509 if (retval != ERROR_OK)
510 return retval;
512 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
513 retval = cortex_a9_exec_opcode(
514 a9->armv7a_common.armv4_5_common.target,
515 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
516 &dscr);
517 if (retval != ERROR_OK)
518 return retval;
520 /* then the opcode, taking data from R0 */
521 retval = cortex_a9_exec_opcode(
522 a9->armv7a_common.armv4_5_common.target,
523 opcode,
524 &dscr);
526 return retval;
529 static int cortex_a9_instr_cpsr_sync(struct arm_dpm *dpm)
531 struct target *target = dpm->arm->target;
532 uint32_t dscr = DSCR_INSTR_COMP;
534 /* "Prefetch flush" after modifying execution status in CPSR */
535 return cortex_a9_exec_opcode(target,
536 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
537 &dscr);
540 static int cortex_a9_instr_read_data_dcc(struct arm_dpm *dpm,
541 uint32_t opcode, uint32_t *data)
543 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
544 int retval;
545 uint32_t dscr = DSCR_INSTR_COMP;
547 /* the opcode, writing data to DCC */
548 retval = cortex_a9_exec_opcode(
549 a9->armv7a_common.armv4_5_common.target,
550 opcode,
551 &dscr);
552 if (retval != ERROR_OK)
553 return retval;
555 return cortex_a9_read_dcc(a9, data, &dscr);
559 static int cortex_a9_instr_read_data_r0(struct arm_dpm *dpm,
560 uint32_t opcode, uint32_t *data)
562 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
563 uint32_t dscr = DSCR_INSTR_COMP;
564 int retval;
566 /* the opcode, writing data to R0 */
567 retval = cortex_a9_exec_opcode(
568 a9->armv7a_common.armv4_5_common.target,
569 opcode,
570 &dscr);
571 if (retval != ERROR_OK)
572 return retval;
574 /* write R0 to DCC */
575 retval = cortex_a9_exec_opcode(
576 a9->armv7a_common.armv4_5_common.target,
577 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
578 &dscr);
579 if (retval != ERROR_OK)
580 return retval;
582 return cortex_a9_read_dcc(a9, data, &dscr);
585 static int cortex_a9_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
586 uint32_t addr, uint32_t control)
588 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
589 uint32_t vr = a9->armv7a_common.debug_base;
590 uint32_t cr = a9->armv7a_common.debug_base;
591 int retval;
593 switch (index_t) {
594 case 0 ... 15: /* breakpoints */
595 vr += CPUDBG_BVR_BASE;
596 cr += CPUDBG_BCR_BASE;
597 break;
598 case 16 ... 31: /* watchpoints */
599 vr += CPUDBG_WVR_BASE;
600 cr += CPUDBG_WCR_BASE;
601 index_t -= 16;
602 break;
603 default:
604 return ERROR_FAIL;
606 vr += 4 * index_t;
607 cr += 4 * index_t;
609 LOG_DEBUG("A9: bpwp enable, vr %08x cr %08x",
610 (unsigned) vr, (unsigned) cr);
612 retval = cortex_a9_dap_write_memap_register_u32(dpm->arm->target,
613 vr, addr);
614 if (retval != ERROR_OK)
615 return retval;
616 retval = cortex_a9_dap_write_memap_register_u32(dpm->arm->target,
617 cr, control);
618 return retval;
621 static int cortex_a9_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
623 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
624 uint32_t cr;
626 switch (index_t) {
627 case 0 ... 15:
628 cr = a9->armv7a_common.debug_base + CPUDBG_BCR_BASE;
629 break;
630 case 16 ... 31:
631 cr = a9->armv7a_common.debug_base + CPUDBG_WCR_BASE;
632 index_t -= 16;
633 break;
634 default:
635 return ERROR_FAIL;
637 cr += 4 * index_t;
639 LOG_DEBUG("A9: bpwp disable, cr %08x", (unsigned) cr);
641 /* clear control register */
642 return cortex_a9_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
645 static int cortex_a9_dpm_setup(struct cortex_a9_common *a9, uint32_t didr)
647 struct arm_dpm *dpm = &a9->armv7a_common.dpm;
648 int retval;
650 dpm->arm = &a9->armv7a_common.armv4_5_common;
651 dpm->didr = didr;
653 dpm->prepare = cortex_a9_dpm_prepare;
654 dpm->finish = cortex_a9_dpm_finish;
656 dpm->instr_write_data_dcc = cortex_a9_instr_write_data_dcc;
657 dpm->instr_write_data_r0 = cortex_a9_instr_write_data_r0;
658 dpm->instr_cpsr_sync = cortex_a9_instr_cpsr_sync;
660 dpm->instr_read_data_dcc = cortex_a9_instr_read_data_dcc;
661 dpm->instr_read_data_r0 = cortex_a9_instr_read_data_r0;
663 dpm->bpwp_enable = cortex_a9_bpwp_enable;
664 dpm->bpwp_disable = cortex_a9_bpwp_disable;
666 retval = arm_dpm_setup(dpm);
667 if (retval == ERROR_OK)
668 retval = arm_dpm_initialize(dpm);
670 return retval;
675 * Cortex-A9 Run control
678 static int cortex_a9_poll(struct target *target)
680 int retval = ERROR_OK;
681 uint32_t dscr;
682 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
683 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
684 struct adiv5_dap *swjdp = &armv7a->dap;
685 enum target_state prev_target_state = target->state;
687 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
688 armv7a->debug_base + CPUDBG_DSCR, &dscr);
689 if (retval != ERROR_OK)
691 return retval;
693 cortex_a9->cpudbg_dscr = dscr;
695 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED))
697 if (prev_target_state != TARGET_HALTED)
699 /* We have a halting debug event */
700 LOG_DEBUG("Target halted");
701 target->state = TARGET_HALTED;
702 if ((prev_target_state == TARGET_RUNNING)
703 || (prev_target_state == TARGET_RESET))
705 retval = cortex_a9_debug_entry(target);
706 if (retval != ERROR_OK)
707 return retval;
709 target_call_event_callbacks(target,
710 TARGET_EVENT_HALTED);
712 if (prev_target_state == TARGET_DEBUG_RUNNING)
714 LOG_DEBUG(" ");
716 retval = cortex_a9_debug_entry(target);
717 if (retval != ERROR_OK)
718 return retval;
720 target_call_event_callbacks(target,
721 TARGET_EVENT_DEBUG_HALTED);
725 else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
727 target->state = TARGET_RUNNING;
729 else
731 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
732 target->state = TARGET_UNKNOWN;
735 return retval;
738 static int cortex_a9_halt(struct target *target)
740 int retval = ERROR_OK;
741 uint32_t dscr;
742 struct armv7a_common *armv7a = target_to_armv7a(target);
743 struct adiv5_dap *swjdp = &armv7a->dap;
746 * Tell the core to be halted by writing DRCR with 0x1
747 * and then wait for the core to be halted.
749 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
750 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
751 if (retval != ERROR_OK)
752 return retval;
755 * enter halting debug mode
757 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
758 armv7a->debug_base + CPUDBG_DSCR, &dscr);
759 if (retval != ERROR_OK)
760 return retval;
762 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
763 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
764 if (retval != ERROR_OK)
765 return retval;
767 long long then = timeval_ms();
768 for (;;)
770 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
771 armv7a->debug_base + CPUDBG_DSCR, &dscr);
772 if (retval != ERROR_OK)
773 return retval;
774 if ((dscr & DSCR_CORE_HALTED) != 0)
776 break;
778 if (timeval_ms() > then + 1000)
780 LOG_ERROR("Timeout waiting for halt");
781 return ERROR_FAIL;
785 target->debug_reason = DBG_REASON_DBGRQ;
787 return ERROR_OK;
790 static int cortex_a9_resume(struct target *target, int current,
791 uint32_t address, int handle_breakpoints, int debug_execution)
793 struct armv7a_common *armv7a = target_to_armv7a(target);
794 struct arm *armv4_5 = &armv7a->armv4_5_common;
795 struct adiv5_dap *swjdp = &armv7a->dap;
796 int retval;
798 // struct breakpoint *breakpoint = NULL;
799 uint32_t resume_pc, dscr;
801 if (!debug_execution)
802 target_free_all_working_areas(target);
804 #if 0
805 if (debug_execution)
807 /* Disable interrupts */
808 /* We disable interrupts in the PRIMASK register instead of
809 * masking with C_MASKINTS,
810 * This is probably the same issue as Cortex-M3 Errata 377493:
811 * C_MASKINTS in parallel with disabled interrupts can cause
812 * local faults to not be taken. */
813 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
814 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
815 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
817 /* Make sure we are in Thumb mode */
818 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
819 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
820 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
821 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
823 #endif
825 /* current = 1: continue on current pc, otherwise continue at <address> */
826 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
827 if (!current)
828 resume_pc = address;
830 /* Make sure that the Armv7 gdb thumb fixups does not
831 * kill the return address
833 switch (armv4_5->core_state)
835 case ARM_STATE_ARM:
836 resume_pc &= 0xFFFFFFFC;
837 break;
838 case ARM_STATE_THUMB:
839 case ARM_STATE_THUMB_EE:
840 /* When the return address is loaded into PC
841 * bit 0 must be 1 to stay in Thumb state
843 resume_pc |= 0x1;
844 break;
845 case ARM_STATE_JAZELLE:
846 LOG_ERROR("How do I resume into Jazelle state??");
847 return ERROR_FAIL;
849 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
850 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
851 armv4_5->pc->dirty = 1;
852 armv4_5->pc->valid = 1;
854 retval = cortex_a9_restore_context(target, handle_breakpoints);
855 if (retval != ERROR_OK)
856 return retval;
858 #if 0
859 /* the front-end may request us not to handle breakpoints */
860 if (handle_breakpoints)
862 /* Single step past breakpoint at current address */
863 if ((breakpoint = breakpoint_find(target, resume_pc)))
865 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
866 cortex_m3_unset_breakpoint(target, breakpoint);
867 cortex_m3_single_step_core(target);
868 cortex_m3_set_breakpoint(target, breakpoint);
872 #endif
875 * Restart core and wait for it to be started. Clear ITRen and sticky
876 * exception flags: see ARMv7 ARM, C5.9.
878 * REVISIT: for single stepping, we probably want to
879 * disable IRQs by default, with optional override...
882 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
883 armv7a->debug_base + CPUDBG_DSCR, &dscr);
884 if (retval != ERROR_OK)
885 return retval;
887 if ((dscr & DSCR_INSTR_COMP) == 0)
888 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
890 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
891 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
892 if (retval != ERROR_OK)
893 return retval;
895 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
896 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART | DRCR_CLEAR_EXCEPTIONS);
897 if (retval != ERROR_OK)
898 return retval;
900 long long then = timeval_ms();
901 for (;;)
903 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
904 armv7a->debug_base + CPUDBG_DSCR, &dscr);
905 if (retval != ERROR_OK)
906 return retval;
907 if ((dscr & DSCR_CORE_RESTARTED) != 0)
908 break;
909 if (timeval_ms() > then + 1000)
911 LOG_ERROR("Timeout waiting for resume");
912 return ERROR_FAIL;
916 target->debug_reason = DBG_REASON_NOTHALTED;
917 target->state = TARGET_RUNNING;
919 /* registers are now invalid */
920 register_cache_invalidate(armv4_5->core_cache);
922 if (!debug_execution)
924 target->state = TARGET_RUNNING;
925 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
926 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
928 else
930 target->state = TARGET_DEBUG_RUNNING;
931 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
932 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
935 return ERROR_OK;
938 static int cortex_a9_debug_entry(struct target *target)
940 int i;
941 uint32_t regfile[16], cpsr, dscr;
942 int retval = ERROR_OK;
943 struct working_area *regfile_working_area = NULL;
944 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
945 struct armv7a_common *armv7a = target_to_armv7a(target);
946 struct arm *armv4_5 = &armv7a->armv4_5_common;
947 struct adiv5_dap *swjdp = &armv7a->dap;
948 struct reg *reg;
950 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a9->cpudbg_dscr);
952 /* REVISIT surely we should not re-read DSCR !! */
953 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
954 armv7a->debug_base + CPUDBG_DSCR, &dscr);
955 if (retval != ERROR_OK)
956 return retval;
958 /* REVISIT see A9 TRM 12.11.4 steps 2..3 -- make sure that any
959 * imprecise data aborts get discarded by issuing a Data
960 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
963 /* Enable the ITR execution once we are in debug mode */
964 dscr |= DSCR_ITR_EN;
965 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
966 armv7a->debug_base + CPUDBG_DSCR, dscr);
967 if (retval != ERROR_OK)
968 return retval;
970 /* Examine debug reason */
971 arm_dpm_report_dscr(&armv7a->dpm, cortex_a9->cpudbg_dscr);
973 /* save address of instruction that triggered the watchpoint? */
974 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
975 uint32_t wfar;
977 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
978 armv7a->debug_base + CPUDBG_WFAR,
979 &wfar);
980 if (retval != ERROR_OK)
981 return retval;
982 arm_dpm_report_wfar(&armv7a->dpm, wfar);
985 /* REVISIT fast_reg_read is never set ... */
987 /* Examine target state and mode */
988 if (cortex_a9->fast_reg_read)
989 target_alloc_working_area(target, 64, &regfile_working_area);
991 /* First load register acessible through core debug port*/
992 if (!regfile_working_area)
994 retval = arm_dpm_read_current_registers(&armv7a->dpm);
996 else
998 retval = cortex_a9_read_regs_through_mem(target,
999 regfile_working_area->address, regfile);
1001 target_free_working_area(target, regfile_working_area);
1002 if (retval != ERROR_OK)
1004 return retval;
1007 /* read Current PSR */
1008 retval = cortex_a9_dap_read_coreregister_u32(target, &cpsr, 16);
1009 if (retval != ERROR_OK)
1010 return retval;
1012 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1014 arm_set_cpsr(armv4_5, cpsr);
1016 /* update cache */
1017 for (i = 0; i <= ARM_PC; i++)
1019 reg = arm_reg_current(armv4_5, i);
1021 buf_set_u32(reg->value, 0, 32, regfile[i]);
1022 reg->valid = 1;
1023 reg->dirty = 0;
1026 /* Fixup PC Resume Address */
1027 if (cpsr & (1 << 5))
1029 // T bit set for Thumb or ThumbEE state
1030 regfile[ARM_PC] -= 4;
1032 else
1034 // ARM state
1035 regfile[ARM_PC] -= 8;
1038 reg = armv4_5->pc;
1039 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1040 reg->dirty = reg->valid;
1043 #if 0
1044 /* TODO, Move this */
1045 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1046 cortex_a9_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1047 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1049 cortex_a9_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1050 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1052 cortex_a9_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1053 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1054 #endif
1056 /* Are we in an exception handler */
1057 // armv4_5->exception_number = 0;
1058 if (armv7a->post_debug_entry)
1060 retval = armv7a->post_debug_entry(target);
1061 if (retval != ERROR_OK)
1062 return retval;
1065 return retval;
1068 static int cortex_a9_post_debug_entry(struct target *target)
1070 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1071 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1072 int retval;
1074 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1075 retval = armv7a->armv4_5_common.mrc(target, 15,
1076 0, 0, /* op1, op2 */
1077 1, 0, /* CRn, CRm */
1078 &cortex_a9->cp15_control_reg);
1079 if (retval != ERROR_OK)
1080 return retval;
1081 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a9->cp15_control_reg);
1083 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
1085 uint32_t cache_type_reg;
1087 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1088 retval = armv7a->armv4_5_common.mrc(target, 15,
1089 0, 1, /* op1, op2 */
1090 0, 0, /* CRn, CRm */
1091 &cache_type_reg);
1092 if (retval != ERROR_OK)
1093 return retval;
1094 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
1096 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A9 */
1097 armv4_5_identify_cache(cache_type_reg,
1098 &armv7a->armv4_5_mmu.armv4_5_cache);
1101 armv7a->armv4_5_mmu.mmu_enabled =
1102 (cortex_a9->cp15_control_reg & 0x1U) ? 1 : 0;
1103 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1104 (cortex_a9->cp15_control_reg & 0x4U) ? 1 : 0;
1105 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1106 (cortex_a9->cp15_control_reg & 0x1000U) ? 1 : 0;
1108 return ERROR_OK;
1111 static int cortex_a9_step(struct target *target, int current, uint32_t address,
1112 int handle_breakpoints)
1114 struct armv7a_common *armv7a = target_to_armv7a(target);
1115 struct arm *armv4_5 = &armv7a->armv4_5_common;
1116 struct breakpoint *breakpoint = NULL;
1117 struct breakpoint stepbreakpoint;
1118 struct reg *r;
1119 int retval;
1121 if (target->state != TARGET_HALTED)
1123 LOG_WARNING("target not halted");
1124 return ERROR_TARGET_NOT_HALTED;
1127 /* current = 1: continue on current pc, otherwise continue at <address> */
1128 r = armv4_5->pc;
1129 if (!current)
1131 buf_set_u32(r->value, 0, 32, address);
1133 else
1135 address = buf_get_u32(r->value, 0, 32);
1138 /* The front-end may request us not to handle breakpoints.
1139 * But since Cortex-A9 uses breakpoint for single step,
1140 * we MUST handle breakpoints.
1142 handle_breakpoints = 1;
1143 if (handle_breakpoints) {
1144 breakpoint = breakpoint_find(target, address);
1145 if (breakpoint)
1146 cortex_a9_unset_breakpoint(target, breakpoint);
1149 /* Setup single step breakpoint */
1150 stepbreakpoint.address = address;
1151 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1152 ? 2 : 4;
1153 stepbreakpoint.type = BKPT_HARD;
1154 stepbreakpoint.set = 0;
1156 /* Break on IVA mismatch */
1157 cortex_a9_set_breakpoint(target, &stepbreakpoint, 0x04);
1159 target->debug_reason = DBG_REASON_SINGLESTEP;
1161 retval = cortex_a9_resume(target, 1, address, 0, 0);
1162 if (retval != ERROR_OK)
1163 return retval;
1165 long long then = timeval_ms();
1166 while (target->state != TARGET_HALTED)
1168 retval = cortex_a9_poll(target);
1169 if (retval != ERROR_OK)
1170 return retval;
1171 if (timeval_ms() > then + 1000)
1173 LOG_ERROR("timeout waiting for target halt");
1174 return ERROR_FAIL;
1178 cortex_a9_unset_breakpoint(target, &stepbreakpoint);
1180 target->debug_reason = DBG_REASON_BREAKPOINT;
1182 if (breakpoint)
1183 cortex_a9_set_breakpoint(target, breakpoint, 0);
1185 if (target->state != TARGET_HALTED)
1186 LOG_DEBUG("target stepped");
1188 return ERROR_OK;
1191 static int cortex_a9_restore_context(struct target *target, bool bpwp)
1193 struct armv7a_common *armv7a = target_to_armv7a(target);
1195 LOG_DEBUG(" ");
1197 if (armv7a->pre_restore_context)
1198 armv7a->pre_restore_context(target);
1200 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1205 * Cortex-A9 Breakpoint and watchpoint functions
1208 /* Setup hardware Breakpoint Register Pair */
1209 static int cortex_a9_set_breakpoint(struct target *target,
1210 struct breakpoint *breakpoint, uint8_t matchmode)
1212 int retval;
1213 int brp_i=0;
1214 uint32_t control;
1215 uint8_t byte_addr_select = 0x0F;
1216 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1217 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1218 struct cortex_a9_brp * brp_list = cortex_a9->brp_list;
1220 if (breakpoint->set)
1222 LOG_WARNING("breakpoint already set");
1223 return ERROR_OK;
1226 if (breakpoint->type == BKPT_HARD)
1228 while (brp_list[brp_i].used && (brp_i < cortex_a9->brp_num))
1229 brp_i++ ;
1230 if (brp_i >= cortex_a9->brp_num)
1232 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1233 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1235 breakpoint->set = brp_i + 1;
1236 if (breakpoint->length == 2)
1238 byte_addr_select = (3 << (breakpoint->address & 0x02));
1240 control = ((matchmode & 0x7) << 20)
1241 | (byte_addr_select << 5)
1242 | (3 << 1) | 1;
1243 brp_list[brp_i].used = 1;
1244 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1245 brp_list[brp_i].control = control;
1246 retval = cortex_a9_dap_write_memap_register_u32(target, armv7a->debug_base
1247 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1248 brp_list[brp_i].value);
1249 if (retval != ERROR_OK)
1250 return retval;
1251 retval = cortex_a9_dap_write_memap_register_u32(target, armv7a->debug_base
1252 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1253 brp_list[brp_i].control);
1254 if (retval != ERROR_OK)
1255 return retval;
1256 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1257 brp_list[brp_i].control,
1258 brp_list[brp_i].value);
1260 else if (breakpoint->type == BKPT_SOFT)
1262 uint8_t code[4];
1263 if (breakpoint->length == 2)
1265 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1267 else
1269 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1271 retval = target->type->read_memory(target,
1272 breakpoint->address & 0xFFFFFFFE,
1273 breakpoint->length, 1,
1274 breakpoint->orig_instr);
1275 if (retval != ERROR_OK)
1276 return retval;
1277 retval = target->type->write_memory(target,
1278 breakpoint->address & 0xFFFFFFFE,
1279 breakpoint->length, 1, code);
1280 if (retval != ERROR_OK)
1281 return retval;
1282 breakpoint->set = 0x11; /* Any nice value but 0 */
1285 return ERROR_OK;
1288 static int cortex_a9_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1290 int retval;
1291 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1292 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1293 struct cortex_a9_brp * brp_list = cortex_a9->brp_list;
1295 if (!breakpoint->set)
1297 LOG_WARNING("breakpoint not set");
1298 return ERROR_OK;
1301 if (breakpoint->type == BKPT_HARD)
1303 int brp_i = breakpoint->set - 1;
1304 if ((brp_i < 0) || (brp_i >= cortex_a9->brp_num))
1306 LOG_DEBUG("Invalid BRP number in breakpoint");
1307 return ERROR_OK;
1309 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1310 brp_list[brp_i].control, brp_list[brp_i].value);
1311 brp_list[brp_i].used = 0;
1312 brp_list[brp_i].value = 0;
1313 brp_list[brp_i].control = 0;
1314 retval = cortex_a9_dap_write_memap_register_u32(target, armv7a->debug_base
1315 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1316 brp_list[brp_i].control);
1317 if (retval != ERROR_OK)
1318 return retval;
1319 retval = cortex_a9_dap_write_memap_register_u32(target, armv7a->debug_base
1320 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1321 brp_list[brp_i].value);
1322 if (retval != ERROR_OK)
1323 return retval;
1325 else
1327 /* restore original instruction (kept in target endianness) */
1328 if (breakpoint->length == 4)
1330 retval = target->type->write_memory(target,
1331 breakpoint->address & 0xFFFFFFFE,
1332 4, 1, breakpoint->orig_instr);
1333 if (retval != ERROR_OK)
1334 return retval;
1336 else
1338 retval = target->type->write_memory(target,
1339 breakpoint->address & 0xFFFFFFFE,
1340 2, 1, breakpoint->orig_instr);
1341 if (retval != ERROR_OK)
1342 return retval;
1345 breakpoint->set = 0;
1347 return ERROR_OK;
1350 static int cortex_a9_add_breakpoint(struct target *target,
1351 struct breakpoint *breakpoint)
1353 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1355 if ((breakpoint->type == BKPT_HARD) && (cortex_a9->brp_num_available < 1))
1357 LOG_INFO("no hardware breakpoint available");
1358 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1361 if (breakpoint->type == BKPT_HARD)
1362 cortex_a9->brp_num_available--;
1364 return cortex_a9_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1367 static int cortex_a9_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1369 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1371 #if 0
1372 /* It is perfectly possible to remove breakpoints while the target is running */
1373 if (target->state != TARGET_HALTED)
1375 LOG_WARNING("target not halted");
1376 return ERROR_TARGET_NOT_HALTED;
1378 #endif
1380 if (breakpoint->set)
1382 cortex_a9_unset_breakpoint(target, breakpoint);
1383 if (breakpoint->type == BKPT_HARD)
1384 cortex_a9->brp_num_available++ ;
1388 return ERROR_OK;
1394 * Cortex-A9 Reset functions
1397 static int cortex_a9_assert_reset(struct target *target)
1399 struct armv7a_common *armv7a = target_to_armv7a(target);
1401 LOG_DEBUG(" ");
1403 /* FIXME when halt is requested, make it work somehow... */
1405 /* Issue some kind of warm reset. */
1406 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1407 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1408 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1409 /* REVISIT handle "pulls" cases, if there's
1410 * hardware that needs them to work.
1412 jtag_add_reset(0, 1);
1413 } else {
1414 LOG_ERROR("%s: how to reset?", target_name(target));
1415 return ERROR_FAIL;
1418 /* registers are now invalid */
1419 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1421 target->state = TARGET_RESET;
1423 return ERROR_OK;
1426 static int cortex_a9_deassert_reset(struct target *target)
1428 int retval;
1430 LOG_DEBUG(" ");
1432 /* be certain SRST is off */
1433 jtag_add_reset(0, 0);
1435 retval = cortex_a9_poll(target);
1436 if (retval != ERROR_OK)
1437 return retval;
1439 if (target->reset_halt) {
1440 if (target->state != TARGET_HALTED) {
1441 LOG_WARNING("%s: ran after reset and before halt ...",
1442 target_name(target));
1443 if ((retval = target_halt(target)) != ERROR_OK)
1444 return retval;
1448 return ERROR_OK;
1452 * Cortex-A9 Memory access
1454 * This is same Cortex M3 but we must also use the correct
1455 * ap number for every access.
1458 static int cortex_a9_read_phys_memory(struct target *target,
1459 uint32_t address, uint32_t size,
1460 uint32_t count, uint8_t *buffer)
1462 struct armv7a_common *armv7a = target_to_armv7a(target);
1463 struct adiv5_dap *swjdp = &armv7a->dap;
1464 int retval = ERROR_INVALID_ARGUMENTS;
1465 uint8_t apsel = swjdp->apsel;
1467 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address, size, count);
1469 if (count && buffer) {
1471 if ( apsel == swjdp_memoryap ) {
1473 /* read memory through AHB-AP */
1475 switch (size) {
1476 case 4:
1477 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
1478 buffer, 4 * count, address);
1479 break;
1480 case 2:
1481 retval = mem_ap_sel_read_buf_u16(swjdp, swjdp_memoryap,
1482 buffer, 2 * count, address);
1483 break;
1484 case 1:
1485 retval = mem_ap_sel_read_buf_u8(swjdp, swjdp_memoryap,
1486 buffer, count, address);
1487 break;
1490 } else {
1492 /* read memory through APB-AP */
1494 uint32_t saved_r0, saved_r1;
1495 int nbytes = count * size;
1496 uint32_t data;
1497 int enabled = 0;
1499 if (target->state != TARGET_HALTED)
1501 LOG_WARNING("target not halted");
1502 return ERROR_TARGET_NOT_HALTED;
1505 retval = cortex_a9_mmu(target, &enabled);
1506 if (retval != ERROR_OK)
1507 return retval;
1509 if (enabled)
1511 LOG_WARNING("Reading physical memory through APB with MMU enabled is not yet implemented");
1512 return ERROR_TARGET_FAILURE;
1515 /* save registers r0 and r1, we are going to corrupt them */
1516 retval = cortex_a9_dap_read_coreregister_u32(target, &saved_r0, 0);
1517 if (retval != ERROR_OK)
1518 return retval;
1520 retval = cortex_a9_dap_read_coreregister_u32(target, &saved_r1, 1);
1521 if (retval != ERROR_OK)
1522 return retval;
1524 retval = cortex_a9_dap_write_coreregister_u32(target, address, 0);
1525 if (retval != ERROR_OK)
1526 return retval;
1528 while (nbytes > 0) {
1530 /* execute instruction LDRB r1, [r0], 1 (0xe4d01001) */
1531 retval = cortex_a9_exec_opcode(target, ARMV4_5_LDRB_IP(1, 0) , NULL);
1532 if (retval != ERROR_OK)
1533 return retval;
1535 retval = cortex_a9_dap_read_coreregister_u32(target, &data, 1);
1536 if (retval != ERROR_OK)
1537 return retval;
1539 *buffer++ = data;
1540 --nbytes;
1544 /* restore corrupted registers r0 and r1 */
1545 retval = cortex_a9_dap_write_coreregister_u32(target, saved_r0, 0);
1546 if (retval != ERROR_OK)
1547 return retval;
1549 retval = cortex_a9_dap_write_coreregister_u32(target, saved_r1, 1);
1550 if (retval != ERROR_OK)
1551 return retval;
1556 return retval;
1559 static int cortex_a9_read_memory(struct target *target, uint32_t address,
1560 uint32_t size, uint32_t count, uint8_t *buffer)
1562 int enabled = 0;
1563 uint32_t virt, phys;
1564 int retval;
1566 /* cortex_a9 handles unaligned memory access */
1568 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address, size, count);
1569 retval = cortex_a9_mmu(target, &enabled);
1570 if (retval != ERROR_OK)
1571 return retval;
1573 if (enabled)
1575 virt = address;
1576 retval = cortex_a9_virt2phys(target, virt, &phys);
1577 if (retval != ERROR_OK)
1578 return retval;
1580 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1581 address = phys;
1584 return cortex_a9_read_phys_memory(target, address, size, count, buffer);
1587 static int cortex_a9_write_phys_memory(struct target *target,
1588 uint32_t address, uint32_t size,
1589 uint32_t count, uint8_t *buffer)
1591 struct armv7a_common *armv7a = target_to_armv7a(target);
1592 struct adiv5_dap *swjdp = &armv7a->dap;
1593 int retval = ERROR_INVALID_ARGUMENTS;
1594 uint8_t apsel = swjdp->apsel;
1596 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address, size, count);
1598 if (count && buffer) {
1600 if ( apsel == swjdp_memoryap ) {
1602 /* write memory through AHB-AP */
1604 switch (size) {
1605 case 4:
1606 retval = mem_ap_sel_write_buf_u32(swjdp, swjdp_memoryap,
1607 buffer, 4 * count, address);
1608 break;
1609 case 2:
1610 retval = mem_ap_sel_write_buf_u16(swjdp, swjdp_memoryap,
1611 buffer, 2 * count, address);
1612 break;
1613 case 1:
1614 retval = mem_ap_sel_write_buf_u8(swjdp, swjdp_memoryap,
1615 buffer, count, address);
1616 break;
1619 } else {
1621 /* write memory through APB-AP */
1623 uint32_t saved_r0, saved_r1;
1624 int nbytes = count * size;
1625 uint32_t data;
1626 int enabled = 0;
1628 if (target->state != TARGET_HALTED)
1630 LOG_WARNING("target not halted");
1631 return ERROR_TARGET_NOT_HALTED;
1634 retval = cortex_a9_mmu(target, &enabled);
1635 if (retval != ERROR_OK)
1636 return retval;
1638 if (enabled)
1640 LOG_WARNING("Writing physical memory through APB with MMU enabled is not yet implemented");
1641 return ERROR_TARGET_FAILURE;
1644 /* save registers r0 and r1, we are going to corrupt them */
1645 retval = cortex_a9_dap_read_coreregister_u32(target, &saved_r0, 0);
1646 if (retval != ERROR_OK)
1647 return retval;
1649 retval = cortex_a9_dap_read_coreregister_u32(target, &saved_r1, 1);
1650 if (retval != ERROR_OK)
1651 return retval;
1653 retval = cortex_a9_dap_write_coreregister_u32(target, address, 0);
1654 if (retval != ERROR_OK)
1655 return retval;
1657 while (nbytes > 0) {
1659 data = *buffer++;
1661 retval = cortex_a9_dap_write_coreregister_u32(target, data, 1);
1662 if (retval != ERROR_OK)
1663 return retval;
1665 /* execute instruction STRB r1, [r0], 1 (0xe4c01001) */
1666 retval = cortex_a9_exec_opcode(target, ARMV4_5_STRB_IP(1, 0) , NULL);
1667 if (retval != ERROR_OK)
1668 return retval;
1670 --nbytes;
1673 /* restore corrupted registers r0 and r1 */
1674 retval = cortex_a9_dap_write_coreregister_u32(target, saved_r0, 0);
1675 if (retval != ERROR_OK)
1676 return retval;
1678 retval = cortex_a9_dap_write_coreregister_u32(target, saved_r1, 1);
1679 if (retval != ERROR_OK)
1680 return retval;
1682 /* we can return here without invalidating D/I-cache because */
1683 /* access through APB maintains cache coherency */
1684 return retval;
1689 /* REVISIT this op is generic ARMv7-A/R stuff */
1690 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1692 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1694 retval = dpm->prepare(dpm);
1695 if (retval != ERROR_OK)
1696 return retval;
1698 /* The Cache handling will NOT work with MMU active, the
1699 * wrong addresses will be invalidated!
1701 * For both ICache and DCache, walk all cache lines in the
1702 * address range. Cortex-A9 has fixed 64 byte line length.
1704 * REVISIT per ARMv7, these may trigger watchpoints ...
1707 /* invalidate I-Cache */
1708 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1710 /* ICIMVAU - Invalidate Cache single entry
1711 * with MVA to PoU
1712 * MCR p15, 0, r0, c7, c5, 1
1714 for (uint32_t cacheline = address;
1715 cacheline < address + size * count;
1716 cacheline += 64) {
1717 retval = dpm->instr_write_data_r0(dpm,
1718 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1719 cacheline);
1720 if (retval != ERROR_OK)
1721 return retval;
1725 /* invalidate D-Cache */
1726 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1728 /* DCIMVAC - Invalidate data Cache line
1729 * with MVA to PoC
1730 * MCR p15, 0, r0, c7, c6, 1
1732 for (uint32_t cacheline = address;
1733 cacheline < address + size * count;
1734 cacheline += 64) {
1735 retval = dpm->instr_write_data_r0(dpm,
1736 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1737 cacheline);
1738 if (retval != ERROR_OK)
1739 return retval;
1743 /* (void) */ dpm->finish(dpm);
1746 return retval;
1749 static int cortex_a9_write_memory(struct target *target, uint32_t address,
1750 uint32_t size, uint32_t count, uint8_t *buffer)
1752 int enabled = 0;
1753 uint32_t virt, phys;
1754 int retval;
1756 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1757 retval = cortex_a9_mmu(target, &enabled);
1758 if (retval != ERROR_OK)
1759 return retval;
1761 if (enabled)
1763 virt = address;
1764 retval = cortex_a9_virt2phys(target, virt, &phys);
1765 if (retval != ERROR_OK)
1766 return retval;
1767 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1768 address = phys;
1771 return cortex_a9_write_phys_memory(target, address, size,
1772 count, buffer);
1775 static int cortex_a9_bulk_write_memory(struct target *target, uint32_t address,
1776 uint32_t count, uint8_t *buffer)
1778 return cortex_a9_write_memory(target, address, 4, count, buffer);
1781 static int cortex_a9_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1783 #if 0
1784 u16 dcrdr;
1786 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1787 *ctrl = (uint8_t)dcrdr;
1788 *value = (uint8_t)(dcrdr >> 8);
1790 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1792 /* write ack back to software dcc register
1793 * signify we have read data */
1794 if (dcrdr & (1 << 0))
1796 dcrdr = 0;
1797 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1799 #endif
1800 return ERROR_OK;
1804 static int cortex_a9_handle_target_request(void *priv)
1806 struct target *target = priv;
1807 struct armv7a_common *armv7a = target_to_armv7a(target);
1808 struct adiv5_dap *swjdp = &armv7a->dap;
1809 int retval;
1811 if (!target_was_examined(target))
1812 return ERROR_OK;
1813 if (!target->dbg_msg_enabled)
1814 return ERROR_OK;
1816 if (target->state == TARGET_RUNNING)
1818 uint8_t data = 0;
1819 uint8_t ctrl = 0;
1821 retval = cortex_a9_dcc_read(swjdp, &data, &ctrl);
1822 if (retval != ERROR_OK)
1823 return retval;
1825 /* check if we have data */
1826 if (ctrl & (1 << 0))
1828 uint32_t request;
1830 /* we assume target is quick enough */
1831 request = data;
1832 retval = cortex_a9_dcc_read(swjdp, &data, &ctrl);
1833 if (retval != ERROR_OK)
1834 return retval;
1835 request |= (data << 8);
1836 retval = cortex_a9_dcc_read(swjdp, &data, &ctrl);
1837 if (retval != ERROR_OK)
1838 return retval;
1839 request |= (data << 16);
1840 retval = cortex_a9_dcc_read(swjdp, &data, &ctrl);
1841 if (retval != ERROR_OK)
1842 return retval;
1843 request |= (data << 24);
1844 target_request(target, request);
1848 return ERROR_OK;
1852 * Cortex-A9 target information and configuration
1855 static int cortex_a9_examine_first(struct target *target)
1857 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1858 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1859 struct adiv5_dap *swjdp = &armv7a->dap;
1860 int i;
1861 int retval = ERROR_OK;
1862 uint32_t didr, ctypr, ttypr, cpuid;
1864 /* We do one extra read to ensure DAP is configured,
1865 * we call ahbap_debugport_init(swjdp) instead
1867 retval = ahbap_debugport_init(swjdp);
1868 if (retval != ERROR_OK)
1869 return retval;
1872 * FIXME: assuming omap4430
1874 * APB DBGBASE reads 0x80040000, but this points to an empty ROM table.
1875 * 0x80000000 is cpu0 coresight region
1877 if (target->coreid > 3) {
1878 LOG_ERROR("cortex_a9 supports up to 4 cores");
1879 return ERROR_INVALID_ARGUMENTS;
1881 armv7a->debug_base = 0x80000000 |
1882 ((target->coreid & 0x3) << CORTEX_A9_PADDRDBG_CPU_SHIFT);
1884 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1885 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1886 if (retval != ERROR_OK)
1887 return retval;
1889 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1890 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1892 LOG_DEBUG("Examine %s failed", "CPUID");
1893 return retval;
1896 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1897 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1899 LOG_DEBUG("Examine %s failed", "CTYPR");
1900 return retval;
1903 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1904 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1906 LOG_DEBUG("Examine %s failed", "TTYPR");
1907 return retval;
1910 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1911 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1913 LOG_DEBUG("Examine %s failed", "DIDR");
1914 return retval;
1917 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1918 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1919 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1920 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1922 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1923 retval = cortex_a9_dpm_setup(cortex_a9, didr);
1924 if (retval != ERROR_OK)
1925 return retval;
1927 /* Setup Breakpoint Register Pairs */
1928 cortex_a9->brp_num = ((didr >> 24) & 0x0F) + 1;
1929 cortex_a9->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1930 cortex_a9->brp_num_available = cortex_a9->brp_num;
1931 cortex_a9->brp_list = calloc(cortex_a9->brp_num, sizeof(struct cortex_a9_brp));
1932 // cortex_a9->brb_enabled = ????;
1933 for (i = 0; i < cortex_a9->brp_num; i++)
1935 cortex_a9->brp_list[i].used = 0;
1936 if (i < (cortex_a9->brp_num-cortex_a9->brp_num_context))
1937 cortex_a9->brp_list[i].type = BRP_NORMAL;
1938 else
1939 cortex_a9->brp_list[i].type = BRP_CONTEXT;
1940 cortex_a9->brp_list[i].value = 0;
1941 cortex_a9->brp_list[i].control = 0;
1942 cortex_a9->brp_list[i].BRPn = i;
1945 LOG_DEBUG("Configured %i hw breakpoints", cortex_a9->brp_num);
1947 target_set_examined(target);
1948 return ERROR_OK;
1951 static int cortex_a9_examine(struct target *target)
1953 int retval = ERROR_OK;
1955 /* don't re-probe hardware after each reset */
1956 if (!target_was_examined(target))
1957 retval = cortex_a9_examine_first(target);
1959 /* Configure core debug access */
1960 if (retval == ERROR_OK)
1961 retval = cortex_a9_init_debug_access(target);
1963 return retval;
1967 * Cortex-A9 target creation and initialization
1970 static int cortex_a9_init_target(struct command_context *cmd_ctx,
1971 struct target *target)
1973 /* examine_first() does a bunch of this */
1974 return ERROR_OK;
1977 static int cortex_a9_init_arch_info(struct target *target,
1978 struct cortex_a9_common *cortex_a9, struct jtag_tap *tap)
1980 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1981 struct arm *armv4_5 = &armv7a->armv4_5_common;
1982 struct adiv5_dap *dap = &armv7a->dap;
1984 armv7a->armv4_5_common.dap = dap;
1986 /* Setup struct cortex_a9_common */
1987 cortex_a9->common_magic = CORTEX_A9_COMMON_MAGIC;
1988 armv4_5->arch_info = armv7a;
1990 /* prepare JTAG information for the new target */
1991 cortex_a9->jtag_info.tap = tap;
1992 cortex_a9->jtag_info.scann_size = 4;
1994 /* Leave (only) generic DAP stuff for debugport_init() */
1995 dap->jtag_info = &cortex_a9->jtag_info;
1996 dap->memaccess_tck = 80;
1998 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1999 dap->tar_autoincr_block = (1 << 10);
2001 cortex_a9->fast_reg_read = 0;
2003 /* Set default value */
2004 cortex_a9->current_address_mode = ARM_MODE_ANY;
2006 /* register arch-specific functions */
2007 armv7a->examine_debug_reason = NULL;
2009 armv7a->post_debug_entry = cortex_a9_post_debug_entry;
2011 armv7a->pre_restore_context = NULL;
2012 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
2013 armv7a->armv4_5_mmu.get_ttb = cortex_a9_get_ttb;
2014 armv7a->armv4_5_mmu.read_memory = cortex_a9_read_phys_memory;
2015 armv7a->armv4_5_mmu.write_memory = cortex_a9_write_phys_memory;
2016 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a9_disable_mmu_caches;
2017 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a9_enable_mmu_caches;
2018 armv7a->armv4_5_mmu.has_tiny_pages = 1;
2019 armv7a->armv4_5_mmu.mmu_enabled = 0;
2022 // arm7_9->handle_target_request = cortex_a9_handle_target_request;
2024 /* REVISIT v7a setup should be in a v7a-specific routine */
2025 arm_init_arch_info(target, armv4_5);
2026 armv7a->common_magic = ARMV7_COMMON_MAGIC;
2028 target_register_timer_callback(cortex_a9_handle_target_request, 1, 1, target);
2030 return ERROR_OK;
2033 static int cortex_a9_target_create(struct target *target, Jim_Interp *interp)
2035 struct cortex_a9_common *cortex_a9 = calloc(1, sizeof(struct cortex_a9_common));
2037 return cortex_a9_init_arch_info(target, cortex_a9, target->tap);
2040 static int cortex_a9_get_ttb(struct target *target, uint32_t *result)
2042 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
2043 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2044 uint32_t ttb = 0, retval = ERROR_OK;
2046 /* current_address_mode is set inside cortex_a9_virt2phys()
2047 where we can determine if address belongs to user or kernel */
2048 if(cortex_a9->current_address_mode == ARM_MODE_SVC)
2050 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2051 retval = armv7a->armv4_5_common.mrc(target, 15,
2052 0, 1, /* op1, op2 */
2053 2, 0, /* CRn, CRm */
2054 &ttb);
2055 if (retval != ERROR_OK)
2056 return retval;
2058 else if(cortex_a9->current_address_mode == ARM_MODE_USR)
2060 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2061 retval = armv7a->armv4_5_common.mrc(target, 15,
2062 0, 0, /* op1, op2 */
2063 2, 0, /* CRn, CRm */
2064 &ttb);
2065 if (retval != ERROR_OK)
2066 return retval;
2068 /* we don't know whose address is: user or kernel
2069 we assume that if we are in kernel mode then
2070 address belongs to kernel else if in user mode
2071 - to user */
2072 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
2074 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2075 retval = armv7a->armv4_5_common.mrc(target, 15,
2076 0, 1, /* op1, op2 */
2077 2, 0, /* CRn, CRm */
2078 &ttb);
2079 if (retval != ERROR_OK)
2080 return retval;
2082 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
2084 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2085 retval = armv7a->armv4_5_common.mrc(target, 15,
2086 0, 0, /* op1, op2 */
2087 2, 0, /* CRn, CRm */
2088 &ttb);
2089 if (retval != ERROR_OK)
2090 return retval;
2092 /* finally we don't know whose ttb to use: user or kernel */
2093 else
2094 LOG_ERROR("Don't know how to get ttb for current mode!!!");
2096 ttb &= 0xffffc000;
2098 *result = ttb;
2100 return ERROR_OK;
2103 static int cortex_a9_disable_mmu_caches(struct target *target, int mmu,
2104 int d_u_cache, int i_cache)
2106 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
2107 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2108 uint32_t cp15_control;
2109 int retval;
2111 /* read cp15 control register */
2112 retval = armv7a->armv4_5_common.mrc(target, 15,
2113 0, 0, /* op1, op2 */
2114 1, 0, /* CRn, CRm */
2115 &cp15_control);
2116 if (retval != ERROR_OK)
2117 return retval;
2120 if (mmu)
2121 cp15_control &= ~0x1U;
2123 if (d_u_cache)
2124 cp15_control &= ~0x4U;
2126 if (i_cache)
2127 cp15_control &= ~0x1000U;
2129 retval = armv7a->armv4_5_common.mcr(target, 15,
2130 0, 0, /* op1, op2 */
2131 1, 0, /* CRn, CRm */
2132 cp15_control);
2133 return retval;
2136 static int cortex_a9_enable_mmu_caches(struct target *target, int mmu,
2137 int d_u_cache, int i_cache)
2139 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
2140 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2141 uint32_t cp15_control;
2142 int retval;
2144 /* read cp15 control register */
2145 retval = armv7a->armv4_5_common.mrc(target, 15,
2146 0, 0, /* op1, op2 */
2147 1, 0, /* CRn, CRm */
2148 &cp15_control);
2149 if (retval != ERROR_OK)
2150 return retval;
2152 if (mmu)
2153 cp15_control |= 0x1U;
2155 if (d_u_cache)
2156 cp15_control |= 0x4U;
2158 if (i_cache)
2159 cp15_control |= 0x1000U;
2161 retval = armv7a->armv4_5_common.mcr(target, 15,
2162 0, 0, /* op1, op2 */
2163 1, 0, /* CRn, CRm */
2164 cp15_control);
2165 return retval;
2169 static int cortex_a9_mmu(struct target *target, int *enabled)
2171 if (target->state != TARGET_HALTED) {
2172 LOG_ERROR("%s: target not halted", __func__);
2173 return ERROR_TARGET_INVALID;
2176 *enabled = target_to_cortex_a9(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
2177 return ERROR_OK;
2180 static int cortex_a9_virt2phys(struct target *target,
2181 uint32_t virt, uint32_t *phys)
2183 uint32_t cb;
2184 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
2185 // struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2186 struct armv7a_common *armv7a = target_to_armv7a(target);
2188 /* We assume that virtual address is separated
2189 between user and kernel in Linux style:
2190 0x00000000-0xbfffffff - User space
2191 0xc0000000-0xffffffff - Kernel space */
2192 if( virt < 0xc0000000 ) /* Linux user space */
2193 cortex_a9->current_address_mode = ARM_MODE_USR;
2194 else /* Linux kernel */
2195 cortex_a9->current_address_mode = ARM_MODE_SVC;
2196 uint32_t ret;
2197 int retval = armv4_5_mmu_translate_va(target,
2198 &armv7a->armv4_5_mmu, virt, &cb, &ret);
2199 if (retval != ERROR_OK)
2200 return retval;
2201 /* Reset the flag. We don't want someone else to use it by error */
2202 cortex_a9->current_address_mode = ARM_MODE_ANY;
2204 *phys = ret;
2205 return ERROR_OK;
2208 COMMAND_HANDLER(cortex_a9_handle_cache_info_command)
2210 struct target *target = get_current_target(CMD_CTX);
2211 struct armv7a_common *armv7a = target_to_armv7a(target);
2213 return armv4_5_handle_cache_info_command(CMD_CTX,
2214 &armv7a->armv4_5_mmu.armv4_5_cache);
2218 COMMAND_HANDLER(cortex_a9_handle_dbginit_command)
2220 struct target *target = get_current_target(CMD_CTX);
2221 if (!target_was_examined(target))
2223 LOG_ERROR("target not examined yet");
2224 return ERROR_FAIL;
2227 return cortex_a9_init_debug_access(target);
2230 static const struct command_registration cortex_a9_exec_command_handlers[] = {
2232 .name = "cache_info",
2233 .handler = cortex_a9_handle_cache_info_command,
2234 .mode = COMMAND_EXEC,
2235 .help = "display information about target caches",
2238 .name = "dbginit",
2239 .handler = cortex_a9_handle_dbginit_command,
2240 .mode = COMMAND_EXEC,
2241 .help = "Initialize core debug",
2243 COMMAND_REGISTRATION_DONE
2245 static const struct command_registration cortex_a9_command_handlers[] = {
2247 .chain = arm_command_handlers,
2250 .chain = armv7a_command_handlers,
2253 .name = "cortex_a9",
2254 .mode = COMMAND_ANY,
2255 .help = "Cortex-A9 command group",
2256 .chain = cortex_a9_exec_command_handlers,
2258 COMMAND_REGISTRATION_DONE
2261 struct target_type cortexa9_target = {
2262 .name = "cortex_a9",
2264 .poll = cortex_a9_poll,
2265 .arch_state = armv7a_arch_state,
2267 .target_request_data = NULL,
2269 .halt = cortex_a9_halt,
2270 .resume = cortex_a9_resume,
2271 .step = cortex_a9_step,
2273 .assert_reset = cortex_a9_assert_reset,
2274 .deassert_reset = cortex_a9_deassert_reset,
2275 .soft_reset_halt = NULL,
2277 /* REVISIT allow exporting VFP3 registers ... */
2278 .get_gdb_reg_list = arm_get_gdb_reg_list,
2280 .read_memory = cortex_a9_read_memory,
2281 .write_memory = cortex_a9_write_memory,
2282 .bulk_write_memory = cortex_a9_bulk_write_memory,
2284 .checksum_memory = arm_checksum_memory,
2285 .blank_check_memory = arm_blank_check_memory,
2287 .run_algorithm = armv4_5_run_algorithm,
2289 .add_breakpoint = cortex_a9_add_breakpoint,
2290 .remove_breakpoint = cortex_a9_remove_breakpoint,
2291 .add_watchpoint = NULL,
2292 .remove_watchpoint = NULL,
2294 .commands = cortex_a9_command_handlers,
2295 .target_create = cortex_a9_target_create,
2296 .init_target = cortex_a9_init_target,
2297 .examine = cortex_a9_examine,
2299 .read_phys_memory = cortex_a9_read_phys_memory,
2300 .write_phys_memory = cortex_a9_write_phys_memory,
2301 .mmu = cortex_a9_mmu,
2302 .virt2phys = cortex_a9_virt2phys,