cortex_a9: check target halted on APB read/write memory
[openocd/jflash.git] / src / target / cortex_a9.c
blob3ad7a7d3dcabd43dfe4c0c7c9f0f5ceecf0eebdb
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
21 * *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
26 * *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
31 * *
32 * Cortex-A9(tm) TRM, ARM DDI 0407F *
33 * *
34 ***************************************************************************/
35 #ifdef HAVE_CONFIG_H
36 #include "config.h"
37 #endif
39 #include "breakpoints.h"
40 #include "cortex_a9.h"
41 #include "register.h"
42 #include "target_request.h"
43 #include "target_type.h"
44 #include "arm_opcodes.h"
45 #include <helper/time_support.h>
47 static int cortex_a9_poll(struct target *target);
48 static int cortex_a9_debug_entry(struct target *target);
49 static int cortex_a9_restore_context(struct target *target, bool bpwp);
50 static int cortex_a9_set_breakpoint(struct target *target,
51 struct breakpoint *breakpoint, uint8_t matchmode);
52 static int cortex_a9_unset_breakpoint(struct target *target,
53 struct breakpoint *breakpoint);
54 static int cortex_a9_dap_read_coreregister_u32(struct target *target,
55 uint32_t *value, int regnum);
56 static int cortex_a9_dap_write_coreregister_u32(struct target *target,
57 uint32_t value, int regnum);
58 static int cortex_a9_mmu(struct target *target, int *enabled);
59 static int cortex_a9_virt2phys(struct target *target,
60 uint32_t virt, uint32_t *phys);
61 static int cortex_a9_disable_mmu_caches(struct target *target, int mmu,
62 int d_u_cache, int i_cache);
63 static int cortex_a9_enable_mmu_caches(struct target *target, int mmu,
64 int d_u_cache, int i_cache);
65 static int cortex_a9_get_ttb(struct target *target, uint32_t *result);
69 * FIXME do topology discovery using the ROM; don't
70 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
71 * cores, with different AP numbering ... don't use a #define
72 * for these numbers, use per-core armv7a state.
74 #define swjdp_memoryap 0
75 #define swjdp_debugap 1
78 * Cortex-A9 Basic debug access, very low level assumes state is saved
80 static int cortex_a9_init_debug_access(struct target *target)
82 struct armv7a_common *armv7a = target_to_armv7a(target);
83 struct adiv5_dap *swjdp = &armv7a->dap;
84 uint8_t saved_apsel = dap_ap_get_select(swjdp);
86 int retval;
87 uint32_t dummy;
89 dap_ap_select(swjdp, swjdp_debugap);
91 LOG_DEBUG(" ");
93 /* Unlocking the debug registers for modification */
94 /* The debugport might be uninitialised so try twice */
95 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
96 if (retval != ERROR_OK)
98 /* try again */
99 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
100 if (retval == ERROR_OK)
102 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
105 if (retval != ERROR_OK)
106 goto out;
107 /* Clear Sticky Power Down status Bit in PRSR to enable access to
108 the registers in the Core Power Domain */
109 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_PRSR, &dummy);
110 if (retval != ERROR_OK)
111 goto out;
113 /* Enabling of instruction execution in debug mode is done in debug_entry code */
115 /* Resync breakpoint registers */
117 /* Since this is likely called from init or reset, update target state information*/
118 retval = cortex_a9_poll(target);
120 out:
121 dap_ap_select(swjdp, saved_apsel);
122 return retval;
125 /* To reduce needless round-trips, pass in a pointer to the current
126 * DSCR value. Initialize it to zero if you just need to know the
127 * value on return from this function; or DSCR_INSTR_COMP if you
128 * happen to know that no instruction is pending.
130 static int cortex_a9_exec_opcode(struct target *target,
131 uint32_t opcode, uint32_t *dscr_p)
133 uint32_t dscr;
134 int retval;
135 struct armv7a_common *armv7a = target_to_armv7a(target);
136 struct adiv5_dap *swjdp = &armv7a->dap;
138 dscr = dscr_p ? *dscr_p : 0;
140 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
142 /* Wait for InstrCompl bit to be set */
143 long long then = timeval_ms();
144 while ((dscr & DSCR_INSTR_COMP) == 0)
146 retval = mem_ap_read_atomic_u32(swjdp,
147 armv7a->debug_base + CPUDBG_DSCR, &dscr);
148 if (retval != ERROR_OK)
150 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
151 return retval;
153 if (timeval_ms() > then + 1000)
155 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
156 return ERROR_FAIL;
160 retval = mem_ap_write_u32(swjdp, armv7a->debug_base + CPUDBG_ITR, opcode);
161 if (retval != ERROR_OK)
162 return retval;
164 then = timeval_ms();
167 retval = mem_ap_read_atomic_u32(swjdp,
168 armv7a->debug_base + CPUDBG_DSCR, &dscr);
169 if (retval != ERROR_OK)
171 LOG_ERROR("Could not read DSCR register");
172 return retval;
174 if (timeval_ms() > then + 1000)
176 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
177 return ERROR_FAIL;
180 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
182 if (dscr_p)
183 *dscr_p = dscr;
185 return retval;
188 /**************************************************************************
189 Read core register with very few exec_opcode, fast but needs work_area.
190 This can cause problems with MMU active.
191 **************************************************************************/
192 static int cortex_a9_read_regs_through_mem(struct target *target, uint32_t address,
193 uint32_t * regfile)
195 int retval = ERROR_OK;
196 struct armv7a_common *armv7a = target_to_armv7a(target);
197 struct adiv5_dap *swjdp = &armv7a->dap;
199 retval = cortex_a9_dap_read_coreregister_u32(target, regfile, 0);
200 if (retval != ERROR_OK)
201 return retval;
202 retval = cortex_a9_dap_write_coreregister_u32(target, address, 0);
203 if (retval != ERROR_OK)
204 return retval;
205 retval = cortex_a9_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
206 if (retval != ERROR_OK)
207 return retval;
209 dap_ap_select(swjdp, swjdp_memoryap);
210 retval = mem_ap_read_buf_u32(swjdp, (uint8_t *)(&regfile[1]), 4*15, address);
211 if (retval != ERROR_OK)
212 return retval;
213 dap_ap_select(swjdp, swjdp_debugap);
215 return retval;
218 static int cortex_a9_dap_read_coreregister_u32(struct target *target,
219 uint32_t *value, int regnum)
221 int retval = ERROR_OK;
222 uint8_t reg = regnum&0xFF;
223 uint32_t dscr = 0;
224 struct armv7a_common *armv7a = target_to_armv7a(target);
225 struct adiv5_dap *swjdp = &armv7a->dap;
227 if (reg > 17)
228 return retval;
230 if (reg < 15)
232 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
233 retval = cortex_a9_exec_opcode(target,
234 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
235 &dscr);
236 if (retval != ERROR_OK)
237 return retval;
239 else if (reg == 15)
241 /* "MOV r0, r15"; then move r0 to DCCTX */
242 retval = cortex_a9_exec_opcode(target, 0xE1A0000F, &dscr);
243 if (retval != ERROR_OK)
244 return retval;
245 retval = cortex_a9_exec_opcode(target,
246 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
247 &dscr);
248 if (retval != ERROR_OK)
249 return retval;
251 else
253 /* "MRS r0, CPSR" or "MRS r0, SPSR"
254 * then move r0 to DCCTX
256 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
257 if (retval != ERROR_OK)
258 return retval;
259 retval = cortex_a9_exec_opcode(target,
260 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
261 &dscr);
262 if (retval != ERROR_OK)
263 return retval;
266 /* Wait for DTRRXfull then read DTRRTX */
267 long long then = timeval_ms();
268 while ((dscr & DSCR_DTR_TX_FULL) == 0)
270 retval = mem_ap_read_atomic_u32(swjdp,
271 armv7a->debug_base + CPUDBG_DSCR, &dscr);
272 if (retval != ERROR_OK)
273 return retval;
274 if (timeval_ms() > then + 1000)
276 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
277 return ERROR_FAIL;
281 retval = mem_ap_read_atomic_u32(swjdp,
282 armv7a->debug_base + CPUDBG_DTRTX, value);
283 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
285 return retval;
288 static int cortex_a9_dap_write_coreregister_u32(struct target *target,
289 uint32_t value, int regnum)
291 int retval = ERROR_OK;
292 uint8_t Rd = regnum&0xFF;
293 uint32_t dscr;
294 struct armv7a_common *armv7a = target_to_armv7a(target);
295 struct adiv5_dap *swjdp = &armv7a->dap;
297 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
299 /* Check that DCCRX is not full */
300 retval = mem_ap_read_atomic_u32(swjdp,
301 armv7a->debug_base + CPUDBG_DSCR, &dscr);
302 if (retval != ERROR_OK)
303 return retval;
304 if (dscr & DSCR_DTR_RX_FULL)
306 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
307 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
308 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
309 &dscr);
310 if (retval != ERROR_OK)
311 return retval;
314 if (Rd > 17)
315 return retval;
317 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
318 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
319 retval = mem_ap_write_u32(swjdp,
320 armv7a->debug_base + CPUDBG_DTRRX, value);
321 if (retval != ERROR_OK)
322 return retval;
324 if (Rd < 15)
326 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
327 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
328 &dscr);
329 if (retval != ERROR_OK)
330 return retval;
332 else if (Rd == 15)
334 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
335 * then "mov r15, r0"
337 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
338 &dscr);
339 if (retval != ERROR_OK)
340 return retval;
341 retval = cortex_a9_exec_opcode(target, 0xE1A0F000, &dscr);
342 if (retval != ERROR_OK)
343 return retval;
345 else
347 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
348 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
350 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
351 &dscr);
352 if (retval != ERROR_OK)
353 return retval;
354 retval = cortex_a9_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
355 &dscr);
356 if (retval != ERROR_OK)
357 return retval;
359 /* "Prefetch flush" after modifying execution status in CPSR */
360 if (Rd == 16)
362 retval = cortex_a9_exec_opcode(target,
363 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
364 &dscr);
365 if (retval != ERROR_OK)
366 return retval;
370 return retval;
373 /* Write to memory mapped registers directly with no cache or mmu handling */
374 static int cortex_a9_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
376 int retval;
377 struct armv7a_common *armv7a = target_to_armv7a(target);
378 struct adiv5_dap *swjdp = &armv7a->dap;
380 retval = mem_ap_write_atomic_u32(swjdp, address, value);
382 return retval;
386 * Cortex-A9 implementation of Debug Programmer's Model
388 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
389 * so there's no need to poll for it before executing an instruction.
391 * NOTE that in several of these cases the "stall" mode might be useful.
392 * It'd let us queue a few operations together... prepare/finish might
393 * be the places to enable/disable that mode.
396 static inline struct cortex_a9_common *dpm_to_a9(struct arm_dpm *dpm)
398 return container_of(dpm, struct cortex_a9_common, armv7a_common.dpm);
401 static int cortex_a9_write_dcc(struct cortex_a9_common *a9, uint32_t data)
403 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
404 return mem_ap_write_u32(&a9->armv7a_common.dap,
405 a9->armv7a_common.debug_base + CPUDBG_DTRRX, data);
408 static int cortex_a9_read_dcc(struct cortex_a9_common *a9, uint32_t *data,
409 uint32_t *dscr_p)
411 struct adiv5_dap *swjdp = &a9->armv7a_common.dap;
412 uint32_t dscr = DSCR_INSTR_COMP;
413 int retval;
415 if (dscr_p)
416 dscr = *dscr_p;
418 /* Wait for DTRRXfull */
419 long long then = timeval_ms();
420 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
421 retval = mem_ap_read_atomic_u32(swjdp,
422 a9->armv7a_common.debug_base + CPUDBG_DSCR,
423 &dscr);
424 if (retval != ERROR_OK)
425 return retval;
426 if (timeval_ms() > then + 1000)
428 LOG_ERROR("Timeout waiting for read dcc");
429 return ERROR_FAIL;
433 retval = mem_ap_read_atomic_u32(swjdp,
434 a9->armv7a_common.debug_base + CPUDBG_DTRTX, data);
435 if (retval != ERROR_OK)
436 return retval;
437 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
439 if (dscr_p)
440 *dscr_p = dscr;
442 return retval;
445 static int cortex_a9_dpm_prepare(struct arm_dpm *dpm)
447 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
448 struct adiv5_dap *swjdp = &a9->armv7a_common.dap;
449 uint32_t dscr;
450 int retval;
452 /* set up invariant: INSTR_COMP is set after ever DPM operation */
453 long long then = timeval_ms();
454 for (;;)
456 retval = mem_ap_read_atomic_u32(swjdp,
457 a9->armv7a_common.debug_base + CPUDBG_DSCR,
458 &dscr);
459 if (retval != ERROR_OK)
460 return retval;
461 if ((dscr & DSCR_INSTR_COMP) != 0)
462 break;
463 if (timeval_ms() > then + 1000)
465 LOG_ERROR("Timeout waiting for dpm prepare");
466 return ERROR_FAIL;
470 /* this "should never happen" ... */
471 if (dscr & DSCR_DTR_RX_FULL) {
472 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
473 /* Clear DCCRX */
474 retval = cortex_a9_exec_opcode(
475 a9->armv7a_common.armv4_5_common.target,
476 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
477 &dscr);
478 if (retval != ERROR_OK)
479 return retval;
482 return retval;
485 static int cortex_a9_dpm_finish(struct arm_dpm *dpm)
487 /* REVISIT what could be done here? */
488 return ERROR_OK;
491 static int cortex_a9_instr_write_data_dcc(struct arm_dpm *dpm,
492 uint32_t opcode, uint32_t data)
494 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
495 int retval;
496 uint32_t dscr = DSCR_INSTR_COMP;
498 retval = cortex_a9_write_dcc(a9, data);
499 if (retval != ERROR_OK)
500 return retval;
502 return cortex_a9_exec_opcode(
503 a9->armv7a_common.armv4_5_common.target,
504 opcode,
505 &dscr);
508 static int cortex_a9_instr_write_data_r0(struct arm_dpm *dpm,
509 uint32_t opcode, uint32_t data)
511 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
512 uint32_t dscr = DSCR_INSTR_COMP;
513 int retval;
515 retval = cortex_a9_write_dcc(a9, data);
516 if (retval != ERROR_OK)
517 return retval;
519 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
520 retval = cortex_a9_exec_opcode(
521 a9->armv7a_common.armv4_5_common.target,
522 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
523 &dscr);
524 if (retval != ERROR_OK)
525 return retval;
527 /* then the opcode, taking data from R0 */
528 retval = cortex_a9_exec_opcode(
529 a9->armv7a_common.armv4_5_common.target,
530 opcode,
531 &dscr);
533 return retval;
536 static int cortex_a9_instr_cpsr_sync(struct arm_dpm *dpm)
538 struct target *target = dpm->arm->target;
539 uint32_t dscr = DSCR_INSTR_COMP;
541 /* "Prefetch flush" after modifying execution status in CPSR */
542 return cortex_a9_exec_opcode(target,
543 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
544 &dscr);
547 static int cortex_a9_instr_read_data_dcc(struct arm_dpm *dpm,
548 uint32_t opcode, uint32_t *data)
550 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
551 int retval;
552 uint32_t dscr = DSCR_INSTR_COMP;
554 /* the opcode, writing data to DCC */
555 retval = cortex_a9_exec_opcode(
556 a9->armv7a_common.armv4_5_common.target,
557 opcode,
558 &dscr);
559 if (retval != ERROR_OK)
560 return retval;
562 return cortex_a9_read_dcc(a9, data, &dscr);
566 static int cortex_a9_instr_read_data_r0(struct arm_dpm *dpm,
567 uint32_t opcode, uint32_t *data)
569 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
570 uint32_t dscr = DSCR_INSTR_COMP;
571 int retval;
573 /* the opcode, writing data to R0 */
574 retval = cortex_a9_exec_opcode(
575 a9->armv7a_common.armv4_5_common.target,
576 opcode,
577 &dscr);
578 if (retval != ERROR_OK)
579 return retval;
581 /* write R0 to DCC */
582 retval = cortex_a9_exec_opcode(
583 a9->armv7a_common.armv4_5_common.target,
584 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
585 &dscr);
586 if (retval != ERROR_OK)
587 return retval;
589 return cortex_a9_read_dcc(a9, data, &dscr);
592 static int cortex_a9_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
593 uint32_t addr, uint32_t control)
595 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
596 uint32_t vr = a9->armv7a_common.debug_base;
597 uint32_t cr = a9->armv7a_common.debug_base;
598 int retval;
600 switch (index_t) {
601 case 0 ... 15: /* breakpoints */
602 vr += CPUDBG_BVR_BASE;
603 cr += CPUDBG_BCR_BASE;
604 break;
605 case 16 ... 31: /* watchpoints */
606 vr += CPUDBG_WVR_BASE;
607 cr += CPUDBG_WCR_BASE;
608 index_t -= 16;
609 break;
610 default:
611 return ERROR_FAIL;
613 vr += 4 * index_t;
614 cr += 4 * index_t;
616 LOG_DEBUG("A9: bpwp enable, vr %08x cr %08x",
617 (unsigned) vr, (unsigned) cr);
619 retval = cortex_a9_dap_write_memap_register_u32(dpm->arm->target,
620 vr, addr);
621 if (retval != ERROR_OK)
622 return retval;
623 retval = cortex_a9_dap_write_memap_register_u32(dpm->arm->target,
624 cr, control);
625 return retval;
628 static int cortex_a9_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
630 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
631 uint32_t cr;
633 switch (index_t) {
634 case 0 ... 15:
635 cr = a9->armv7a_common.debug_base + CPUDBG_BCR_BASE;
636 break;
637 case 16 ... 31:
638 cr = a9->armv7a_common.debug_base + CPUDBG_WCR_BASE;
639 index_t -= 16;
640 break;
641 default:
642 return ERROR_FAIL;
644 cr += 4 * index_t;
646 LOG_DEBUG("A9: bpwp disable, cr %08x", (unsigned) cr);
648 /* clear control register */
649 return cortex_a9_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
652 static int cortex_a9_dpm_setup(struct cortex_a9_common *a9, uint32_t didr)
654 struct arm_dpm *dpm = &a9->armv7a_common.dpm;
655 int retval;
657 dpm->arm = &a9->armv7a_common.armv4_5_common;
658 dpm->didr = didr;
660 dpm->prepare = cortex_a9_dpm_prepare;
661 dpm->finish = cortex_a9_dpm_finish;
663 dpm->instr_write_data_dcc = cortex_a9_instr_write_data_dcc;
664 dpm->instr_write_data_r0 = cortex_a9_instr_write_data_r0;
665 dpm->instr_cpsr_sync = cortex_a9_instr_cpsr_sync;
667 dpm->instr_read_data_dcc = cortex_a9_instr_read_data_dcc;
668 dpm->instr_read_data_r0 = cortex_a9_instr_read_data_r0;
670 dpm->bpwp_enable = cortex_a9_bpwp_enable;
671 dpm->bpwp_disable = cortex_a9_bpwp_disable;
673 retval = arm_dpm_setup(dpm);
674 if (retval == ERROR_OK)
675 retval = arm_dpm_initialize(dpm);
677 return retval;
682 * Cortex-A9 Run control
685 static int cortex_a9_poll(struct target *target)
687 int retval = ERROR_OK;
688 uint32_t dscr;
689 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
690 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
691 struct adiv5_dap *swjdp = &armv7a->dap;
692 enum target_state prev_target_state = target->state;
693 uint8_t saved_apsel = dap_ap_get_select(swjdp);
695 dap_ap_select(swjdp, swjdp_debugap);
696 retval = mem_ap_read_atomic_u32(swjdp,
697 armv7a->debug_base + CPUDBG_DSCR, &dscr);
698 if (retval != ERROR_OK)
700 dap_ap_select(swjdp, saved_apsel);
701 return retval;
703 cortex_a9->cpudbg_dscr = dscr;
705 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED))
707 if (prev_target_state != TARGET_HALTED)
709 /* We have a halting debug event */
710 LOG_DEBUG("Target halted");
711 target->state = TARGET_HALTED;
712 if ((prev_target_state == TARGET_RUNNING)
713 || (prev_target_state == TARGET_RESET))
715 retval = cortex_a9_debug_entry(target);
716 if (retval != ERROR_OK)
717 return retval;
719 target_call_event_callbacks(target,
720 TARGET_EVENT_HALTED);
722 if (prev_target_state == TARGET_DEBUG_RUNNING)
724 LOG_DEBUG(" ");
726 retval = cortex_a9_debug_entry(target);
727 if (retval != ERROR_OK)
728 return retval;
730 target_call_event_callbacks(target,
731 TARGET_EVENT_DEBUG_HALTED);
735 else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
737 target->state = TARGET_RUNNING;
739 else
741 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
742 target->state = TARGET_UNKNOWN;
745 dap_ap_select(swjdp, saved_apsel);
747 return retval;
750 static int cortex_a9_halt(struct target *target)
752 int retval = ERROR_OK;
753 uint32_t dscr;
754 struct armv7a_common *armv7a = target_to_armv7a(target);
755 struct adiv5_dap *swjdp = &armv7a->dap;
756 uint8_t saved_apsel = dap_ap_get_select(swjdp);
757 dap_ap_select(swjdp, swjdp_debugap);
760 * Tell the core to be halted by writing DRCR with 0x1
761 * and then wait for the core to be halted.
763 retval = mem_ap_write_atomic_u32(swjdp,
764 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
765 if (retval != ERROR_OK)
766 goto out;
769 * enter halting debug mode
771 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DSCR, &dscr);
772 if (retval != ERROR_OK)
773 goto out;
775 retval = mem_ap_write_atomic_u32(swjdp,
776 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
777 if (retval != ERROR_OK)
778 goto out;
780 long long then = timeval_ms();
781 for (;;)
783 retval = mem_ap_read_atomic_u32(swjdp,
784 armv7a->debug_base + CPUDBG_DSCR, &dscr);
785 if (retval != ERROR_OK)
786 goto out;
787 if ((dscr & DSCR_CORE_HALTED) != 0)
789 break;
791 if (timeval_ms() > then + 1000)
793 LOG_ERROR("Timeout waiting for halt");
794 return ERROR_FAIL;
798 target->debug_reason = DBG_REASON_DBGRQ;
800 out:
801 dap_ap_select(swjdp, saved_apsel);
802 return retval;
805 static int cortex_a9_resume(struct target *target, int current,
806 uint32_t address, int handle_breakpoints, int debug_execution)
808 struct armv7a_common *armv7a = target_to_armv7a(target);
809 struct arm *armv4_5 = &armv7a->armv4_5_common;
810 struct adiv5_dap *swjdp = &armv7a->dap;
811 int retval;
813 // struct breakpoint *breakpoint = NULL;
814 uint32_t resume_pc, dscr;
816 uint8_t saved_apsel = dap_ap_get_select(swjdp);
817 dap_ap_select(swjdp, swjdp_debugap);
819 if (!debug_execution)
820 target_free_all_working_areas(target);
822 #if 0
823 if (debug_execution)
825 /* Disable interrupts */
826 /* We disable interrupts in the PRIMASK register instead of
827 * masking with C_MASKINTS,
828 * This is probably the same issue as Cortex-M3 Errata 377493:
829 * C_MASKINTS in parallel with disabled interrupts can cause
830 * local faults to not be taken. */
831 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
832 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
833 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
835 /* Make sure we are in Thumb mode */
836 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
837 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
838 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
839 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
841 #endif
843 /* current = 1: continue on current pc, otherwise continue at <address> */
844 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
845 if (!current)
846 resume_pc = address;
848 /* Make sure that the Armv7 gdb thumb fixups does not
849 * kill the return address
851 switch (armv4_5->core_state)
853 case ARM_STATE_ARM:
854 resume_pc &= 0xFFFFFFFC;
855 break;
856 case ARM_STATE_THUMB:
857 case ARM_STATE_THUMB_EE:
858 /* When the return address is loaded into PC
859 * bit 0 must be 1 to stay in Thumb state
861 resume_pc |= 0x1;
862 break;
863 case ARM_STATE_JAZELLE:
864 LOG_ERROR("How do I resume into Jazelle state??");
865 return ERROR_FAIL;
867 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
868 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
869 armv4_5->pc->dirty = 1;
870 armv4_5->pc->valid = 1;
872 retval = cortex_a9_restore_context(target, handle_breakpoints);
873 if (retval != ERROR_OK)
874 return retval;
876 #if 0
877 /* the front-end may request us not to handle breakpoints */
878 if (handle_breakpoints)
880 /* Single step past breakpoint at current address */
881 if ((breakpoint = breakpoint_find(target, resume_pc)))
883 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
884 cortex_m3_unset_breakpoint(target, breakpoint);
885 cortex_m3_single_step_core(target);
886 cortex_m3_set_breakpoint(target, breakpoint);
890 #endif
893 * Restart core and wait for it to be started. Clear ITRen and sticky
894 * exception flags: see ARMv7 ARM, C5.9.
896 * REVISIT: for single stepping, we probably want to
897 * disable IRQs by default, with optional override...
900 retval = mem_ap_read_atomic_u32(swjdp,
901 armv7a->debug_base + CPUDBG_DSCR, &dscr);
902 if (retval != ERROR_OK)
903 return retval;
905 if ((dscr & DSCR_INSTR_COMP) == 0)
906 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
908 retval = mem_ap_write_atomic_u32(swjdp,
909 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
910 if (retval != ERROR_OK)
911 return retval;
913 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR,
914 DRCR_RESTART | DRCR_CLEAR_EXCEPTIONS);
915 if (retval != ERROR_OK)
916 return retval;
918 long long then = timeval_ms();
919 for (;;)
921 retval = mem_ap_read_atomic_u32(swjdp,
922 armv7a->debug_base + CPUDBG_DSCR, &dscr);
923 if (retval != ERROR_OK)
924 return retval;
925 if ((dscr & DSCR_CORE_RESTARTED) != 0)
926 break;
927 if (timeval_ms() > then + 1000)
929 LOG_ERROR("Timeout waiting for resume");
930 return ERROR_FAIL;
934 target->debug_reason = DBG_REASON_NOTHALTED;
935 target->state = TARGET_RUNNING;
937 /* registers are now invalid */
938 register_cache_invalidate(armv4_5->core_cache);
940 if (!debug_execution)
942 target->state = TARGET_RUNNING;
943 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
944 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
946 else
948 target->state = TARGET_DEBUG_RUNNING;
949 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
950 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
953 dap_ap_select(swjdp, saved_apsel);
955 return ERROR_OK;
958 static int cortex_a9_debug_entry(struct target *target)
960 int i;
961 uint32_t regfile[16], cpsr, dscr;
962 int retval = ERROR_OK;
963 struct working_area *regfile_working_area = NULL;
964 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
965 struct armv7a_common *armv7a = target_to_armv7a(target);
966 struct arm *armv4_5 = &armv7a->armv4_5_common;
967 struct adiv5_dap *swjdp = &armv7a->dap;
968 struct reg *reg;
970 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a9->cpudbg_dscr);
972 /* REVISIT surely we should not re-read DSCR !! */
973 retval = mem_ap_read_atomic_u32(swjdp,
974 armv7a->debug_base + CPUDBG_DSCR, &dscr);
975 if (retval != ERROR_OK)
976 return retval;
978 /* REVISIT see A9 TRM 12.11.4 steps 2..3 -- make sure that any
979 * imprecise data aborts get discarded by issuing a Data
980 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
983 /* Enable the ITR execution once we are in debug mode */
984 dscr |= DSCR_ITR_EN;
985 retval = mem_ap_write_atomic_u32(swjdp,
986 armv7a->debug_base + CPUDBG_DSCR, dscr);
987 if (retval != ERROR_OK)
988 return retval;
990 /* Examine debug reason */
991 arm_dpm_report_dscr(&armv7a->dpm, cortex_a9->cpudbg_dscr);
993 /* save address of instruction that triggered the watchpoint? */
994 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
995 uint32_t wfar;
997 retval = mem_ap_read_atomic_u32(swjdp,
998 armv7a->debug_base + CPUDBG_WFAR,
999 &wfar);
1000 if (retval != ERROR_OK)
1001 return retval;
1002 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1005 /* REVISIT fast_reg_read is never set ... */
1007 /* Examine target state and mode */
1008 if (cortex_a9->fast_reg_read)
1009 target_alloc_working_area(target, 64, &regfile_working_area);
1011 /* First load register acessible through core debug port*/
1012 if (!regfile_working_area)
1014 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1016 else
1018 dap_ap_select(swjdp, swjdp_memoryap);
1019 retval = cortex_a9_read_regs_through_mem(target,
1020 regfile_working_area->address, regfile);
1021 dap_ap_select(swjdp, swjdp_memoryap);
1022 target_free_working_area(target, regfile_working_area);
1023 if (retval != ERROR_OK)
1025 return retval;
1028 /* read Current PSR */
1029 retval = cortex_a9_dap_read_coreregister_u32(target, &cpsr, 16);
1030 if (retval != ERROR_OK)
1031 return retval;
1032 dap_ap_select(swjdp, swjdp_debugap);
1033 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1035 arm_set_cpsr(armv4_5, cpsr);
1037 /* update cache */
1038 for (i = 0; i <= ARM_PC; i++)
1040 reg = arm_reg_current(armv4_5, i);
1042 buf_set_u32(reg->value, 0, 32, regfile[i]);
1043 reg->valid = 1;
1044 reg->dirty = 0;
1047 /* Fixup PC Resume Address */
1048 if (cpsr & (1 << 5))
1050 // T bit set for Thumb or ThumbEE state
1051 regfile[ARM_PC] -= 4;
1053 else
1055 // ARM state
1056 regfile[ARM_PC] -= 8;
1059 reg = armv4_5->pc;
1060 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1061 reg->dirty = reg->valid;
1064 #if 0
1065 /* TODO, Move this */
1066 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1067 cortex_a9_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1068 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1070 cortex_a9_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1071 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1073 cortex_a9_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1074 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1075 #endif
1077 /* Are we in an exception handler */
1078 // armv4_5->exception_number = 0;
1079 if (armv7a->post_debug_entry)
1081 retval = armv7a->post_debug_entry(target);
1082 if (retval != ERROR_OK)
1083 return retval;
1086 return retval;
1089 static int cortex_a9_post_debug_entry(struct target *target)
1091 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1092 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1093 int retval;
1095 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1096 retval = armv7a->armv4_5_common.mrc(target, 15,
1097 0, 0, /* op1, op2 */
1098 1, 0, /* CRn, CRm */
1099 &cortex_a9->cp15_control_reg);
1100 if (retval != ERROR_OK)
1101 return retval;
1102 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a9->cp15_control_reg);
1104 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
1106 uint32_t cache_type_reg;
1108 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1109 retval = armv7a->armv4_5_common.mrc(target, 15,
1110 0, 1, /* op1, op2 */
1111 0, 0, /* CRn, CRm */
1112 &cache_type_reg);
1113 if (retval != ERROR_OK)
1114 return retval;
1115 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
1117 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A9 */
1118 armv4_5_identify_cache(cache_type_reg,
1119 &armv7a->armv4_5_mmu.armv4_5_cache);
1122 armv7a->armv4_5_mmu.mmu_enabled =
1123 (cortex_a9->cp15_control_reg & 0x1U) ? 1 : 0;
1124 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1125 (cortex_a9->cp15_control_reg & 0x4U) ? 1 : 0;
1126 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1127 (cortex_a9->cp15_control_reg & 0x1000U) ? 1 : 0;
1129 return ERROR_OK;
1132 static int cortex_a9_step(struct target *target, int current, uint32_t address,
1133 int handle_breakpoints)
1135 struct armv7a_common *armv7a = target_to_armv7a(target);
1136 struct arm *armv4_5 = &armv7a->armv4_5_common;
1137 struct adiv5_dap *swjdp = &armv7a->dap;
1138 struct breakpoint *breakpoint = NULL;
1139 struct breakpoint stepbreakpoint;
1140 struct reg *r;
1141 int retval;
1142 uint8_t saved_apsel = dap_ap_get_select(swjdp);
1144 if (target->state != TARGET_HALTED)
1146 LOG_WARNING("target not halted");
1147 return ERROR_TARGET_NOT_HALTED;
1150 dap_ap_select(swjdp, swjdp_debugap);
1152 /* current = 1: continue on current pc, otherwise continue at <address> */
1153 r = armv4_5->pc;
1154 if (!current)
1156 buf_set_u32(r->value, 0, 32, address);
1158 else
1160 address = buf_get_u32(r->value, 0, 32);
1163 /* The front-end may request us not to handle breakpoints.
1164 * But since Cortex-A9 uses breakpoint for single step,
1165 * we MUST handle breakpoints.
1167 handle_breakpoints = 1;
1168 if (handle_breakpoints) {
1169 breakpoint = breakpoint_find(target, address);
1170 if (breakpoint)
1171 cortex_a9_unset_breakpoint(target, breakpoint);
1174 /* Setup single step breakpoint */
1175 stepbreakpoint.address = address;
1176 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1177 ? 2 : 4;
1178 stepbreakpoint.type = BKPT_HARD;
1179 stepbreakpoint.set = 0;
1181 /* Break on IVA mismatch */
1182 cortex_a9_set_breakpoint(target, &stepbreakpoint, 0x04);
1184 target->debug_reason = DBG_REASON_SINGLESTEP;
1186 retval = cortex_a9_resume(target, 1, address, 0, 0);
1187 if (retval != ERROR_OK)
1188 goto out;
1190 long long then = timeval_ms();
1191 while (target->state != TARGET_HALTED)
1193 retval = cortex_a9_poll(target);
1194 if (retval != ERROR_OK)
1195 goto out;
1196 if (timeval_ms() > then + 1000)
1198 LOG_ERROR("timeout waiting for target halt");
1199 retval = ERROR_FAIL;
1200 goto out;
1204 cortex_a9_unset_breakpoint(target, &stepbreakpoint);
1206 target->debug_reason = DBG_REASON_BREAKPOINT;
1208 if (breakpoint)
1209 cortex_a9_set_breakpoint(target, breakpoint, 0);
1211 if (target->state != TARGET_HALTED)
1212 LOG_DEBUG("target stepped");
1214 retval = ERROR_OK;
1216 out:
1217 dap_ap_select(swjdp, saved_apsel);
1218 return retval;
1221 static int cortex_a9_restore_context(struct target *target, bool bpwp)
1223 struct armv7a_common *armv7a = target_to_armv7a(target);
1225 LOG_DEBUG(" ");
1227 if (armv7a->pre_restore_context)
1228 armv7a->pre_restore_context(target);
1230 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1235 * Cortex-A9 Breakpoint and watchpoint functions
1238 /* Setup hardware Breakpoint Register Pair */
1239 static int cortex_a9_set_breakpoint(struct target *target,
1240 struct breakpoint *breakpoint, uint8_t matchmode)
1242 int retval;
1243 int brp_i=0;
1244 uint32_t control;
1245 uint8_t byte_addr_select = 0x0F;
1246 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1247 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1248 struct cortex_a9_brp * brp_list = cortex_a9->brp_list;
1250 if (breakpoint->set)
1252 LOG_WARNING("breakpoint already set");
1253 return ERROR_OK;
1256 if (breakpoint->type == BKPT_HARD)
1258 while (brp_list[brp_i].used && (brp_i < cortex_a9->brp_num))
1259 brp_i++ ;
1260 if (brp_i >= cortex_a9->brp_num)
1262 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1263 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1265 breakpoint->set = brp_i + 1;
1266 if (breakpoint->length == 2)
1268 byte_addr_select = (3 << (breakpoint->address & 0x02));
1270 control = ((matchmode & 0x7) << 20)
1271 | (byte_addr_select << 5)
1272 | (3 << 1) | 1;
1273 brp_list[brp_i].used = 1;
1274 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1275 brp_list[brp_i].control = control;
1276 retval = cortex_a9_dap_write_memap_register_u32(target, armv7a->debug_base
1277 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1278 brp_list[brp_i].value);
1279 if (retval != ERROR_OK)
1280 return retval;
1281 retval = cortex_a9_dap_write_memap_register_u32(target, armv7a->debug_base
1282 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1283 brp_list[brp_i].control);
1284 if (retval != ERROR_OK)
1285 return retval;
1286 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1287 brp_list[brp_i].control,
1288 brp_list[brp_i].value);
1290 else if (breakpoint->type == BKPT_SOFT)
1292 uint8_t code[4];
1293 if (breakpoint->length == 2)
1295 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1297 else
1299 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1301 retval = target->type->read_memory(target,
1302 breakpoint->address & 0xFFFFFFFE,
1303 breakpoint->length, 1,
1304 breakpoint->orig_instr);
1305 if (retval != ERROR_OK)
1306 return retval;
1307 retval = target->type->write_memory(target,
1308 breakpoint->address & 0xFFFFFFFE,
1309 breakpoint->length, 1, code);
1310 if (retval != ERROR_OK)
1311 return retval;
1312 breakpoint->set = 0x11; /* Any nice value but 0 */
1315 return ERROR_OK;
1318 static int cortex_a9_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1320 int retval;
1321 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1322 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1323 struct cortex_a9_brp * brp_list = cortex_a9->brp_list;
1325 if (!breakpoint->set)
1327 LOG_WARNING("breakpoint not set");
1328 return ERROR_OK;
1331 if (breakpoint->type == BKPT_HARD)
1333 int brp_i = breakpoint->set - 1;
1334 if ((brp_i < 0) || (brp_i >= cortex_a9->brp_num))
1336 LOG_DEBUG("Invalid BRP number in breakpoint");
1337 return ERROR_OK;
1339 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1340 brp_list[brp_i].control, brp_list[brp_i].value);
1341 brp_list[brp_i].used = 0;
1342 brp_list[brp_i].value = 0;
1343 brp_list[brp_i].control = 0;
1344 retval = cortex_a9_dap_write_memap_register_u32(target, armv7a->debug_base
1345 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1346 brp_list[brp_i].control);
1347 if (retval != ERROR_OK)
1348 return retval;
1349 retval = cortex_a9_dap_write_memap_register_u32(target, armv7a->debug_base
1350 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1351 brp_list[brp_i].value);
1352 if (retval != ERROR_OK)
1353 return retval;
1355 else
1357 /* restore original instruction (kept in target endianness) */
1358 if (breakpoint->length == 4)
1360 retval = target->type->write_memory(target,
1361 breakpoint->address & 0xFFFFFFFE,
1362 4, 1, breakpoint->orig_instr);
1363 if (retval != ERROR_OK)
1364 return retval;
1366 else
1368 retval = target->type->write_memory(target,
1369 breakpoint->address & 0xFFFFFFFE,
1370 2, 1, breakpoint->orig_instr);
1371 if (retval != ERROR_OK)
1372 return retval;
1375 breakpoint->set = 0;
1377 return ERROR_OK;
1380 static int cortex_a9_add_breakpoint(struct target *target,
1381 struct breakpoint *breakpoint)
1383 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1385 if ((breakpoint->type == BKPT_HARD) && (cortex_a9->brp_num_available < 1))
1387 LOG_INFO("no hardware breakpoint available");
1388 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1391 if (breakpoint->type == BKPT_HARD)
1392 cortex_a9->brp_num_available--;
1394 return cortex_a9_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1397 static int cortex_a9_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1399 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1401 #if 0
1402 /* It is perfectly possible to remove breakpoints while the target is running */
1403 if (target->state != TARGET_HALTED)
1405 LOG_WARNING("target not halted");
1406 return ERROR_TARGET_NOT_HALTED;
1408 #endif
1410 if (breakpoint->set)
1412 cortex_a9_unset_breakpoint(target, breakpoint);
1413 if (breakpoint->type == BKPT_HARD)
1414 cortex_a9->brp_num_available++ ;
1418 return ERROR_OK;
1424 * Cortex-A9 Reset functions
1427 static int cortex_a9_assert_reset(struct target *target)
1429 struct armv7a_common *armv7a = target_to_armv7a(target);
1431 LOG_DEBUG(" ");
1433 /* FIXME when halt is requested, make it work somehow... */
1435 /* Issue some kind of warm reset. */
1436 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1437 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1438 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1439 /* REVISIT handle "pulls" cases, if there's
1440 * hardware that needs them to work.
1442 jtag_add_reset(0, 1);
1443 } else {
1444 LOG_ERROR("%s: how to reset?", target_name(target));
1445 return ERROR_FAIL;
1448 /* registers are now invalid */
1449 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1451 target->state = TARGET_RESET;
1453 return ERROR_OK;
1456 static int cortex_a9_deassert_reset(struct target *target)
1458 int retval;
1460 LOG_DEBUG(" ");
1462 /* be certain SRST is off */
1463 jtag_add_reset(0, 0);
1465 retval = cortex_a9_poll(target);
1466 if (retval != ERROR_OK)
1467 return retval;
1469 if (target->reset_halt) {
1470 if (target->state != TARGET_HALTED) {
1471 LOG_WARNING("%s: ran after reset and before halt ...",
1472 target_name(target));
1473 if ((retval = target_halt(target)) != ERROR_OK)
1474 return retval;
1478 return ERROR_OK;
1482 * Cortex-A9 Memory access
1484 * This is same Cortex M3 but we must also use the correct
1485 * ap number for every access.
1488 static int cortex_a9_read_phys_memory(struct target *target,
1489 uint32_t address, uint32_t size,
1490 uint32_t count, uint8_t *buffer)
1492 struct armv7a_common *armv7a = target_to_armv7a(target);
1493 struct adiv5_dap *swjdp = &armv7a->dap;
1494 int retval = ERROR_INVALID_ARGUMENTS;
1495 uint8_t apsel = dap_ap_get_select(swjdp);
1497 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address, size, count);
1499 if (count && buffer) {
1501 if ( apsel == swjdp_memoryap ) {
1503 /* read memory through AHB-AP */
1505 switch (size) {
1506 case 4:
1507 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1508 break;
1509 case 2:
1510 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1511 break;
1512 case 1:
1513 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1514 break;
1517 } else {
1519 /* read memory through APB-AP */
1521 uint32_t saved_r0, saved_r1;
1522 int nbytes = count * size;
1523 uint32_t data;
1525 if (target->state != TARGET_HALTED)
1527 LOG_WARNING("target not halted");
1528 return ERROR_TARGET_NOT_HALTED;
1531 /* save registers r0 and r1, we are going to corrupt them */
1532 retval = cortex_a9_dap_read_coreregister_u32(target, &saved_r0, 0);
1533 if (retval != ERROR_OK)
1534 return retval;
1536 retval = cortex_a9_dap_read_coreregister_u32(target, &saved_r1, 1);
1537 if (retval != ERROR_OK)
1538 return retval;
1540 retval = cortex_a9_dap_write_coreregister_u32(target, address, 0);
1541 if (retval != ERROR_OK)
1542 return retval;
1544 while (nbytes > 0) {
1546 /* execute instruction LDRB r1, [r0], 1 (0xe4d01001) */
1547 retval = cortex_a9_exec_opcode(target, ARMV4_5_LDRB_IP(1, 0) , NULL);
1548 if (retval != ERROR_OK)
1549 return retval;
1551 retval = cortex_a9_dap_read_coreregister_u32(target, &data, 1);
1552 if (retval != ERROR_OK)
1553 return retval;
1555 *buffer++ = data;
1556 --nbytes;
1560 /* restore corrupted registers r0 and r1 */
1561 retval = cortex_a9_dap_write_coreregister_u32(target, saved_r0, 0);
1562 if (retval != ERROR_OK)
1563 return retval;
1565 retval = cortex_a9_dap_write_coreregister_u32(target, saved_r1, 1);
1566 if (retval != ERROR_OK)
1567 return retval;
1572 return retval;
1575 static int cortex_a9_read_memory(struct target *target, uint32_t address,
1576 uint32_t size, uint32_t count, uint8_t *buffer)
1578 int enabled = 0;
1579 uint32_t virt, phys;
1580 int retval;
1582 /* cortex_a9 handles unaligned memory access */
1584 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address, size, count);
1585 retval = cortex_a9_mmu(target, &enabled);
1586 if (retval != ERROR_OK)
1587 return retval;
1589 if (enabled)
1591 virt = address;
1592 retval = cortex_a9_virt2phys(target, virt, &phys);
1593 if (retval != ERROR_OK)
1594 return retval;
1596 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1597 address = phys;
1600 return cortex_a9_read_phys_memory(target, address, size, count, buffer);
1603 static int cortex_a9_write_phys_memory(struct target *target,
1604 uint32_t address, uint32_t size,
1605 uint32_t count, uint8_t *buffer)
1607 struct armv7a_common *armv7a = target_to_armv7a(target);
1608 struct adiv5_dap *swjdp = &armv7a->dap;
1609 int retval = ERROR_INVALID_ARGUMENTS;
1611 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address, size, count);
1613 if (count && buffer) {
1614 uint8_t apsel = dap_ap_get_select(swjdp);
1616 if ( apsel == swjdp_memoryap ) {
1618 /* write memory through AHB-AP */
1619 switch (size) {
1620 case 4:
1621 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1622 break;
1623 case 2:
1624 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1625 break;
1626 case 1:
1627 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1628 break;
1631 } else {
1633 /* write memory through APB-AP */
1635 uint32_t saved_r0, saved_r1;
1636 int nbytes = count * size;
1637 uint32_t data;
1639 if (target->state != TARGET_HALTED)
1641 LOG_WARNING("target not halted");
1642 return ERROR_TARGET_NOT_HALTED;
1645 /* save registers r0 and r1, we are going to corrupt them */
1646 retval = cortex_a9_dap_read_coreregister_u32(target, &saved_r0, 0);
1647 if (retval != ERROR_OK)
1648 return retval;
1650 retval = cortex_a9_dap_read_coreregister_u32(target, &saved_r1, 1);
1651 if (retval != ERROR_OK)
1652 return retval;
1654 retval = cortex_a9_dap_write_coreregister_u32(target, address, 0);
1655 if (retval != ERROR_OK)
1656 return retval;
1658 while (nbytes > 0) {
1660 data = *buffer++;
1662 retval = cortex_a9_dap_write_coreregister_u32(target, data, 1);
1663 if (retval != ERROR_OK)
1664 return retval;
1666 /* execute instruction STRB r1, [r0], 1 (0xe4c01001) */
1667 retval = cortex_a9_exec_opcode(target, ARMV4_5_STRB_IP(1, 0) , NULL);
1668 if (retval != ERROR_OK)
1669 return retval;
1671 --nbytes;
1674 /* restore corrupted registers r0 and r1 */
1675 retval = cortex_a9_dap_write_coreregister_u32(target, saved_r0, 0);
1676 if (retval != ERROR_OK)
1677 return retval;
1679 retval = cortex_a9_dap_write_coreregister_u32(target, saved_r1, 1);
1680 if (retval != ERROR_OK)
1681 return retval;
1683 /* we can return here without invalidating D/I-cache because */
1684 /* access through APB maintains cache coherency */
1685 return retval;
1690 /* REVISIT this op is generic ARMv7-A/R stuff */
1691 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1693 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1695 retval = dpm->prepare(dpm);
1696 if (retval != ERROR_OK)
1697 return retval;
1699 /* The Cache handling will NOT work with MMU active, the
1700 * wrong addresses will be invalidated!
1702 * For both ICache and DCache, walk all cache lines in the
1703 * address range. Cortex-A9 has fixed 64 byte line length.
1705 * REVISIT per ARMv7, these may trigger watchpoints ...
1708 /* invalidate I-Cache */
1709 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1711 /* ICIMVAU - Invalidate Cache single entry
1712 * with MVA to PoU
1713 * MCR p15, 0, r0, c7, c5, 1
1715 for (uint32_t cacheline = address;
1716 cacheline < address + size * count;
1717 cacheline += 64) {
1718 retval = dpm->instr_write_data_r0(dpm,
1719 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1720 cacheline);
1721 if (retval != ERROR_OK)
1722 return retval;
1726 /* invalidate D-Cache */
1727 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1729 /* DCIMVAC - Invalidate data Cache line
1730 * with MVA to PoC
1731 * MCR p15, 0, r0, c7, c6, 1
1733 for (uint32_t cacheline = address;
1734 cacheline < address + size * count;
1735 cacheline += 64) {
1736 retval = dpm->instr_write_data_r0(dpm,
1737 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1738 cacheline);
1739 if (retval != ERROR_OK)
1740 return retval;
1744 /* (void) */ dpm->finish(dpm);
1747 return retval;
1750 static int cortex_a9_write_memory(struct target *target, uint32_t address,
1751 uint32_t size, uint32_t count, uint8_t *buffer)
1753 int enabled = 0;
1754 uint32_t virt, phys;
1755 int retval;
1757 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1758 retval = cortex_a9_mmu(target, &enabled);
1759 if (retval != ERROR_OK)
1760 return retval;
1762 if (enabled)
1764 virt = address;
1765 retval = cortex_a9_virt2phys(target, virt, &phys);
1766 if (retval != ERROR_OK)
1767 return retval;
1768 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1769 address = phys;
1772 return cortex_a9_write_phys_memory(target, address, size,
1773 count, buffer);
1776 static int cortex_a9_bulk_write_memory(struct target *target, uint32_t address,
1777 uint32_t count, uint8_t *buffer)
1779 return cortex_a9_write_memory(target, address, 4, count, buffer);
1782 static int cortex_a9_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1784 #if 0
1785 u16 dcrdr;
1787 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1788 *ctrl = (uint8_t)dcrdr;
1789 *value = (uint8_t)(dcrdr >> 8);
1791 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1793 /* write ack back to software dcc register
1794 * signify we have read data */
1795 if (dcrdr & (1 << 0))
1797 dcrdr = 0;
1798 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1800 #endif
1801 return ERROR_OK;
1805 static int cortex_a9_handle_target_request(void *priv)
1807 struct target *target = priv;
1808 struct armv7a_common *armv7a = target_to_armv7a(target);
1809 struct adiv5_dap *swjdp = &armv7a->dap;
1810 int retval;
1812 if (!target_was_examined(target))
1813 return ERROR_OK;
1814 if (!target->dbg_msg_enabled)
1815 return ERROR_OK;
1817 if (target->state == TARGET_RUNNING)
1819 uint8_t data = 0;
1820 uint8_t ctrl = 0;
1822 retval = cortex_a9_dcc_read(swjdp, &data, &ctrl);
1823 if (retval != ERROR_OK)
1824 return retval;
1826 /* check if we have data */
1827 if (ctrl & (1 << 0))
1829 uint32_t request;
1831 /* we assume target is quick enough */
1832 request = data;
1833 retval = cortex_a9_dcc_read(swjdp, &data, &ctrl);
1834 if (retval != ERROR_OK)
1835 return retval;
1836 request |= (data << 8);
1837 retval = cortex_a9_dcc_read(swjdp, &data, &ctrl);
1838 if (retval != ERROR_OK)
1839 return retval;
1840 request |= (data << 16);
1841 retval = cortex_a9_dcc_read(swjdp, &data, &ctrl);
1842 if (retval != ERROR_OK)
1843 return retval;
1844 request |= (data << 24);
1845 target_request(target, request);
1849 return ERROR_OK;
1853 * Cortex-A9 target information and configuration
1856 static int cortex_a9_examine_first(struct target *target)
1858 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1859 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1860 struct adiv5_dap *swjdp = &armv7a->dap;
1861 int i;
1862 int retval = ERROR_OK;
1863 uint32_t didr, ctypr, ttypr, cpuid;
1865 /* We do one extra read to ensure DAP is configured,
1866 * we call ahbap_debugport_init(swjdp) instead
1868 retval = ahbap_debugport_init(swjdp);
1869 if (retval != ERROR_OK)
1870 return retval;
1872 dap_ap_select(swjdp, swjdp_debugap);
1875 * FIXME: assuming omap4430
1877 * APB DBGBASE reads 0x80040000, but this points to an empty ROM table.
1878 * 0x80000000 is cpu0 coresight region
1880 if (target->coreid > 3) {
1881 LOG_ERROR("cortex_a9 supports up to 4 cores");
1882 return ERROR_INVALID_ARGUMENTS;
1884 armv7a->debug_base = 0x80000000 |
1885 ((target->coreid & 0x3) << CORTEX_A9_PADDRDBG_CPU_SHIFT);
1887 retval = mem_ap_read_atomic_u32(swjdp,
1888 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1889 if (retval != ERROR_OK)
1890 return retval;
1892 if ((retval = mem_ap_read_atomic_u32(swjdp,
1893 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1895 LOG_DEBUG("Examine %s failed", "CPUID");
1896 return retval;
1899 if ((retval = mem_ap_read_atomic_u32(swjdp,
1900 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1902 LOG_DEBUG("Examine %s failed", "CTYPR");
1903 return retval;
1906 if ((retval = mem_ap_read_atomic_u32(swjdp,
1907 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1909 LOG_DEBUG("Examine %s failed", "TTYPR");
1910 return retval;
1913 if ((retval = mem_ap_read_atomic_u32(swjdp,
1914 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1916 LOG_DEBUG("Examine %s failed", "DIDR");
1917 return retval;
1920 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1921 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1922 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1923 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1925 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1926 retval = cortex_a9_dpm_setup(cortex_a9, didr);
1927 if (retval != ERROR_OK)
1928 return retval;
1930 /* Setup Breakpoint Register Pairs */
1931 cortex_a9->brp_num = ((didr >> 24) & 0x0F) + 1;
1932 cortex_a9->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1933 cortex_a9->brp_num_available = cortex_a9->brp_num;
1934 cortex_a9->brp_list = calloc(cortex_a9->brp_num, sizeof(struct cortex_a9_brp));
1935 // cortex_a9->brb_enabled = ????;
1936 for (i = 0; i < cortex_a9->brp_num; i++)
1938 cortex_a9->brp_list[i].used = 0;
1939 if (i < (cortex_a9->brp_num-cortex_a9->brp_num_context))
1940 cortex_a9->brp_list[i].type = BRP_NORMAL;
1941 else
1942 cortex_a9->brp_list[i].type = BRP_CONTEXT;
1943 cortex_a9->brp_list[i].value = 0;
1944 cortex_a9->brp_list[i].control = 0;
1945 cortex_a9->brp_list[i].BRPn = i;
1948 LOG_DEBUG("Configured %i hw breakpoints", cortex_a9->brp_num);
1950 target_set_examined(target);
1951 return ERROR_OK;
1954 static int cortex_a9_examine(struct target *target)
1956 int retval = ERROR_OK;
1958 /* don't re-probe hardware after each reset */
1959 if (!target_was_examined(target))
1960 retval = cortex_a9_examine_first(target);
1962 /* Configure core debug access */
1963 if (retval == ERROR_OK)
1964 retval = cortex_a9_init_debug_access(target);
1966 return retval;
1970 * Cortex-A9 target creation and initialization
1973 static int cortex_a9_init_target(struct command_context *cmd_ctx,
1974 struct target *target)
1976 /* examine_first() does a bunch of this */
1977 return ERROR_OK;
1980 static int cortex_a9_init_arch_info(struct target *target,
1981 struct cortex_a9_common *cortex_a9, struct jtag_tap *tap)
1983 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1984 struct arm *armv4_5 = &armv7a->armv4_5_common;
1985 struct adiv5_dap *dap = &armv7a->dap;
1987 armv7a->armv4_5_common.dap = dap;
1989 /* Setup struct cortex_a9_common */
1990 cortex_a9->common_magic = CORTEX_A9_COMMON_MAGIC;
1991 armv4_5->arch_info = armv7a;
1993 /* prepare JTAG information for the new target */
1994 cortex_a9->jtag_info.tap = tap;
1995 cortex_a9->jtag_info.scann_size = 4;
1997 /* Leave (only) generic DAP stuff for debugport_init() */
1998 dap->jtag_info = &cortex_a9->jtag_info;
1999 dap->memaccess_tck = 80;
2001 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
2002 dap->tar_autoincr_block = (1 << 10);
2004 cortex_a9->fast_reg_read = 0;
2006 /* Set default value */
2007 cortex_a9->current_address_mode = ARM_MODE_ANY;
2009 /* register arch-specific functions */
2010 armv7a->examine_debug_reason = NULL;
2012 armv7a->post_debug_entry = cortex_a9_post_debug_entry;
2014 armv7a->pre_restore_context = NULL;
2015 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
2016 armv7a->armv4_5_mmu.get_ttb = cortex_a9_get_ttb;
2017 armv7a->armv4_5_mmu.read_memory = cortex_a9_read_phys_memory;
2018 armv7a->armv4_5_mmu.write_memory = cortex_a9_write_phys_memory;
2019 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a9_disable_mmu_caches;
2020 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a9_enable_mmu_caches;
2021 armv7a->armv4_5_mmu.has_tiny_pages = 1;
2022 armv7a->armv4_5_mmu.mmu_enabled = 0;
2025 // arm7_9->handle_target_request = cortex_a9_handle_target_request;
2027 /* REVISIT v7a setup should be in a v7a-specific routine */
2028 arm_init_arch_info(target, armv4_5);
2029 armv7a->common_magic = ARMV7_COMMON_MAGIC;
2031 target_register_timer_callback(cortex_a9_handle_target_request, 1, 1, target);
2033 return ERROR_OK;
2036 static int cortex_a9_target_create(struct target *target, Jim_Interp *interp)
2038 struct cortex_a9_common *cortex_a9 = calloc(1, sizeof(struct cortex_a9_common));
2040 return cortex_a9_init_arch_info(target, cortex_a9, target->tap);
2043 static int cortex_a9_get_ttb(struct target *target, uint32_t *result)
2045 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
2046 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2047 uint32_t ttb = 0, retval = ERROR_OK;
2049 /* current_address_mode is set inside cortex_a9_virt2phys()
2050 where we can determine if address belongs to user or kernel */
2051 if(cortex_a9->current_address_mode == ARM_MODE_SVC)
2053 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2054 retval = armv7a->armv4_5_common.mrc(target, 15,
2055 0, 1, /* op1, op2 */
2056 2, 0, /* CRn, CRm */
2057 &ttb);
2058 if (retval != ERROR_OK)
2059 return retval;
2061 else if(cortex_a9->current_address_mode == ARM_MODE_USR)
2063 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2064 retval = armv7a->armv4_5_common.mrc(target, 15,
2065 0, 0, /* op1, op2 */
2066 2, 0, /* CRn, CRm */
2067 &ttb);
2068 if (retval != ERROR_OK)
2069 return retval;
2071 /* we don't know whose address is: user or kernel
2072 we assume that if we are in kernel mode then
2073 address belongs to kernel else if in user mode
2074 - to user */
2075 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
2077 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2078 retval = armv7a->armv4_5_common.mrc(target, 15,
2079 0, 1, /* op1, op2 */
2080 2, 0, /* CRn, CRm */
2081 &ttb);
2082 if (retval != ERROR_OK)
2083 return retval;
2085 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
2087 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2088 retval = armv7a->armv4_5_common.mrc(target, 15,
2089 0, 0, /* op1, op2 */
2090 2, 0, /* CRn, CRm */
2091 &ttb);
2092 if (retval != ERROR_OK)
2093 return retval;
2095 /* finally we don't know whose ttb to use: user or kernel */
2096 else
2097 LOG_ERROR("Don't know how to get ttb for current mode!!!");
2099 ttb &= 0xffffc000;
2101 *result = ttb;
2103 return ERROR_OK;
2106 static int cortex_a9_disable_mmu_caches(struct target *target, int mmu,
2107 int d_u_cache, int i_cache)
2109 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
2110 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2111 uint32_t cp15_control;
2112 int retval;
2114 /* read cp15 control register */
2115 retval = armv7a->armv4_5_common.mrc(target, 15,
2116 0, 0, /* op1, op2 */
2117 1, 0, /* CRn, CRm */
2118 &cp15_control);
2119 if (retval != ERROR_OK)
2120 return retval;
2123 if (mmu)
2124 cp15_control &= ~0x1U;
2126 if (d_u_cache)
2127 cp15_control &= ~0x4U;
2129 if (i_cache)
2130 cp15_control &= ~0x1000U;
2132 retval = armv7a->armv4_5_common.mcr(target, 15,
2133 0, 0, /* op1, op2 */
2134 1, 0, /* CRn, CRm */
2135 cp15_control);
2136 return retval;
2139 static int cortex_a9_enable_mmu_caches(struct target *target, int mmu,
2140 int d_u_cache, int i_cache)
2142 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
2143 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2144 uint32_t cp15_control;
2145 int retval;
2147 /* read cp15 control register */
2148 retval = armv7a->armv4_5_common.mrc(target, 15,
2149 0, 0, /* op1, op2 */
2150 1, 0, /* CRn, CRm */
2151 &cp15_control);
2152 if (retval != ERROR_OK)
2153 return retval;
2155 if (mmu)
2156 cp15_control |= 0x1U;
2158 if (d_u_cache)
2159 cp15_control |= 0x4U;
2161 if (i_cache)
2162 cp15_control |= 0x1000U;
2164 retval = armv7a->armv4_5_common.mcr(target, 15,
2165 0, 0, /* op1, op2 */
2166 1, 0, /* CRn, CRm */
2167 cp15_control);
2168 return retval;
2172 static int cortex_a9_mmu(struct target *target, int *enabled)
2174 if (target->state != TARGET_HALTED) {
2175 LOG_ERROR("%s: target not halted", __func__);
2176 return ERROR_TARGET_INVALID;
2179 *enabled = target_to_cortex_a9(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
2180 return ERROR_OK;
2183 static int cortex_a9_virt2phys(struct target *target,
2184 uint32_t virt, uint32_t *phys)
2186 uint32_t cb;
2187 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
2188 // struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2189 struct armv7a_common *armv7a = target_to_armv7a(target);
2191 /* We assume that virtual address is separated
2192 between user and kernel in Linux style:
2193 0x00000000-0xbfffffff - User space
2194 0xc0000000-0xffffffff - Kernel space */
2195 if( virt < 0xc0000000 ) /* Linux user space */
2196 cortex_a9->current_address_mode = ARM_MODE_USR;
2197 else /* Linux kernel */
2198 cortex_a9->current_address_mode = ARM_MODE_SVC;
2199 uint32_t ret;
2200 int retval = armv4_5_mmu_translate_va(target,
2201 &armv7a->armv4_5_mmu, virt, &cb, &ret);
2202 if (retval != ERROR_OK)
2203 return retval;
2204 /* Reset the flag. We don't want someone else to use it by error */
2205 cortex_a9->current_address_mode = ARM_MODE_ANY;
2207 *phys = ret;
2208 return ERROR_OK;
2211 COMMAND_HANDLER(cortex_a9_handle_cache_info_command)
2213 struct target *target = get_current_target(CMD_CTX);
2214 struct armv7a_common *armv7a = target_to_armv7a(target);
2216 return armv4_5_handle_cache_info_command(CMD_CTX,
2217 &armv7a->armv4_5_mmu.armv4_5_cache);
2221 COMMAND_HANDLER(cortex_a9_handle_dbginit_command)
2223 struct target *target = get_current_target(CMD_CTX);
2224 if (!target_was_examined(target))
2226 LOG_ERROR("target not examined yet");
2227 return ERROR_FAIL;
2230 return cortex_a9_init_debug_access(target);
2233 static const struct command_registration cortex_a9_exec_command_handlers[] = {
2235 .name = "cache_info",
2236 .handler = cortex_a9_handle_cache_info_command,
2237 .mode = COMMAND_EXEC,
2238 .help = "display information about target caches",
2241 .name = "dbginit",
2242 .handler = cortex_a9_handle_dbginit_command,
2243 .mode = COMMAND_EXEC,
2244 .help = "Initialize core debug",
2246 COMMAND_REGISTRATION_DONE
2248 static const struct command_registration cortex_a9_command_handlers[] = {
2250 .chain = arm_command_handlers,
2253 .chain = armv7a_command_handlers,
2256 .name = "cortex_a9",
2257 .mode = COMMAND_ANY,
2258 .help = "Cortex-A9 command group",
2259 .chain = cortex_a9_exec_command_handlers,
2261 COMMAND_REGISTRATION_DONE
2264 struct target_type cortexa9_target = {
2265 .name = "cortex_a9",
2267 .poll = cortex_a9_poll,
2268 .arch_state = armv7a_arch_state,
2270 .target_request_data = NULL,
2272 .halt = cortex_a9_halt,
2273 .resume = cortex_a9_resume,
2274 .step = cortex_a9_step,
2276 .assert_reset = cortex_a9_assert_reset,
2277 .deassert_reset = cortex_a9_deassert_reset,
2278 .soft_reset_halt = NULL,
2280 /* REVISIT allow exporting VFP3 registers ... */
2281 .get_gdb_reg_list = arm_get_gdb_reg_list,
2283 .read_memory = cortex_a9_read_memory,
2284 .write_memory = cortex_a9_write_memory,
2285 .bulk_write_memory = cortex_a9_bulk_write_memory,
2287 .checksum_memory = arm_checksum_memory,
2288 .blank_check_memory = arm_blank_check_memory,
2290 .run_algorithm = armv4_5_run_algorithm,
2292 .add_breakpoint = cortex_a9_add_breakpoint,
2293 .remove_breakpoint = cortex_a9_remove_breakpoint,
2294 .add_watchpoint = NULL,
2295 .remove_watchpoint = NULL,
2297 .commands = cortex_a9_command_handlers,
2298 .target_create = cortex_a9_target_create,
2299 .init_target = cortex_a9_init_target,
2300 .examine = cortex_a9_examine,
2302 .read_phys_memory = cortex_a9_read_phys_memory,
2303 .write_phys_memory = cortex_a9_write_phys_memory,
2304 .mmu = cortex_a9_mmu,
2305 .virt2phys = cortex_a9_virt2phys,