1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
5 * Copyright (C) 2006 by Magnus Lundin *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
14 * This program is free software; you can redistribute it and/or modify *
15 * it under the terms of the GNU General Public License as published by *
16 * the Free Software Foundation; either version 2 of the License, or *
17 * (at your option) any later version. *
19 * This program is distributed in the hope that it will be useful, *
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
22 * GNU General Public License for more details. *
24 * You should have received a copy of the GNU General Public License *
25 * along with this program; if not, write to the *
26 * Free Software Foundation, Inc., *
27 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
29 * Cortex-A8(tm) TRM, ARM DDI 0344H *
31 ***************************************************************************/
36 #include "breakpoints.h"
37 #include "cortex_a8.h"
39 #include "target_request.h"
40 #include "target_type.h"
41 #include "arm_opcodes.h"
43 static int cortex_a8_poll(struct target
*target
);
44 static int cortex_a8_debug_entry(struct target
*target
);
45 static int cortex_a8_restore_context(struct target
*target
, bool bpwp
);
46 static int cortex_a8_set_breakpoint(struct target
*target
,
47 struct breakpoint
*breakpoint
, uint8_t matchmode
);
48 static int cortex_a8_unset_breakpoint(struct target
*target
,
49 struct breakpoint
*breakpoint
);
50 static int cortex_a8_dap_read_coreregister_u32(struct target
*target
,
51 uint32_t *value
, int regnum
);
52 static int cortex_a8_dap_write_coreregister_u32(struct target
*target
,
53 uint32_t value
, int regnum
);
54 static int cortex_a8_mmu(struct target
*target
, int *enabled
);
55 static int cortex_a8_virt2phys(struct target
*target
,
56 uint32_t virt
, uint32_t *phys
);
57 static void cortex_a8_disable_mmu_caches(struct target
*target
, int mmu
,
58 int d_u_cache
, int i_cache
);
59 static void cortex_a8_enable_mmu_caches(struct target
*target
, int mmu
,
60 int d_u_cache
, int i_cache
);
61 static uint32_t cortex_a8_get_ttb(struct target
*target
);
65 * FIXME do topology discovery using the ROM; don't
66 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
67 * cores, with different AP numbering ... don't use a #define
68 * for these numbers, use per-core armv7a state.
70 #define swjdp_memoryap 0
71 #define swjdp_debugap 1
72 #define OMAP3530_DEBUG_BASE 0x54011000
75 * Cortex-A8 Basic debug access, very low level assumes state is saved
77 static int cortex_a8_init_debug_access(struct target
*target
)
79 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
80 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
87 /* Unlocking the debug registers for modification */
88 /* The debugport might be uninitialised so try twice */
89 retval
= mem_ap_write_atomic_u32(swjdp
, armv7a
->debug_base
+ CPUDBG_LOCKACCESS
, 0xC5ACCE55);
90 if (retval
!= ERROR_OK
)
91 mem_ap_write_atomic_u32(swjdp
, armv7a
->debug_base
+ CPUDBG_LOCKACCESS
, 0xC5ACCE55);
92 /* Clear Sticky Power Down status Bit in PRSR to enable access to
93 the registers in the Core Power Domain */
94 retval
= mem_ap_read_atomic_u32(swjdp
, armv7a
->debug_base
+ CPUDBG_PRSR
, &dummy
);
95 /* Enabling of instruction execution in debug mode is done in debug_entry code */
97 /* Resync breakpoint registers */
99 /* Since this is likley called from init or reset, update targtet state information*/
100 cortex_a8_poll(target
);
105 /* To reduce needless round-trips, pass in a pointer to the current
106 * DSCR value. Initialize it to zero if you just need to know the
107 * value on return from this function; or DSCR_INSTR_COMP if you
108 * happen to know that no instruction is pending.
110 static int cortex_a8_exec_opcode(struct target
*target
,
111 uint32_t opcode
, uint32_t *dscr_p
)
115 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
116 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
118 dscr
= dscr_p
? *dscr_p
: 0;
120 LOG_DEBUG("exec opcode 0x%08" PRIx32
, opcode
);
122 /* Wait for InstrCompl bit to be set */
123 while ((dscr
& DSCR_INSTR_COMP
) == 0)
125 retval
= mem_ap_read_atomic_u32(swjdp
,
126 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
127 if (retval
!= ERROR_OK
)
129 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32
, opcode
);
134 mem_ap_write_u32(swjdp
, armv7a
->debug_base
+ CPUDBG_ITR
, opcode
);
138 retval
= mem_ap_read_atomic_u32(swjdp
,
139 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
140 if (retval
!= ERROR_OK
)
142 LOG_ERROR("Could not read DSCR register");
146 while ((dscr
& DSCR_INSTR_COMP
) == 0); /* Wait for InstrCompl bit to be set */
154 /**************************************************************************
155 Read core register with very few exec_opcode, fast but needs work_area.
156 This can cause problems with MMU active.
157 **************************************************************************/
158 static int cortex_a8_read_regs_through_mem(struct target
*target
, uint32_t address
,
161 int retval
= ERROR_OK
;
162 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
163 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
165 cortex_a8_dap_read_coreregister_u32(target
, regfile
, 0);
166 cortex_a8_dap_write_coreregister_u32(target
, address
, 0);
167 cortex_a8_exec_opcode(target
, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL
);
168 dap_ap_select(swjdp
, swjdp_memoryap
);
169 mem_ap_read_buf_u32(swjdp
, (uint8_t *)(®file
[1]), 4*15, address
);
170 dap_ap_select(swjdp
, swjdp_debugap
);
175 static int cortex_a8_dap_read_coreregister_u32(struct target
*target
,
176 uint32_t *value
, int regnum
)
178 int retval
= ERROR_OK
;
179 uint8_t reg
= regnum
&0xFF;
181 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
182 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
189 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
190 cortex_a8_exec_opcode(target
,
191 ARMV4_5_MCR(14, 0, reg
, 0, 5, 0),
196 /* "MOV r0, r15"; then move r0 to DCCTX */
197 cortex_a8_exec_opcode(target
, 0xE1A0000F, &dscr
);
198 cortex_a8_exec_opcode(target
,
199 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
204 /* "MRS r0, CPSR" or "MRS r0, SPSR"
205 * then move r0 to DCCTX
207 cortex_a8_exec_opcode(target
, ARMV4_5_MRS(0, reg
& 1), &dscr
);
208 cortex_a8_exec_opcode(target
,
209 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
213 /* Wait for DTRRXfull then read DTRRTX */
214 while ((dscr
& DSCR_DTR_TX_FULL
) == 0)
216 retval
= mem_ap_read_atomic_u32(swjdp
,
217 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
220 retval
= mem_ap_read_atomic_u32(swjdp
,
221 armv7a
->debug_base
+ CPUDBG_DTRTX
, value
);
222 LOG_DEBUG("read DCC 0x%08" PRIx32
, *value
);
227 static int cortex_a8_dap_write_coreregister_u32(struct target
*target
,
228 uint32_t value
, int regnum
)
230 int retval
= ERROR_OK
;
231 uint8_t Rd
= regnum
&0xFF;
233 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
234 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
236 LOG_DEBUG("register %i, value 0x%08" PRIx32
, regnum
, value
);
238 /* Check that DCCRX is not full */
239 retval
= mem_ap_read_atomic_u32(swjdp
,
240 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
241 if (dscr
& DSCR_DTR_RX_FULL
)
243 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32
, dscr
);
244 /* Clear DCCRX with MCR(p14, 0, Rd, c0, c5, 0), opcode 0xEE000E15 */
245 cortex_a8_exec_opcode(target
, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
252 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
253 LOG_DEBUG("write DCC 0x%08" PRIx32
, value
);
254 retval
= mem_ap_write_u32(swjdp
,
255 armv7a
->debug_base
+ CPUDBG_DTRRX
, value
);
259 /* DCCRX to Rn, "MCR p14, 0, Rn, c0, c5, 0", 0xEE00nE15 */
260 cortex_a8_exec_opcode(target
, ARMV4_5_MRC(14, 0, Rd
, 0, 5, 0),
265 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
268 cortex_a8_exec_opcode(target
, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
270 cortex_a8_exec_opcode(target
, 0xE1A0F000, &dscr
);
274 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
275 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
277 cortex_a8_exec_opcode(target
, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
279 cortex_a8_exec_opcode(target
, ARMV4_5_MSR_GP(0, 0xF, Rd
& 1),
282 /* "Prefetch flush" after modifying execution status in CPSR */
284 cortex_a8_exec_opcode(target
,
285 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
292 /* Write to memory mapped registers directly with no cache or mmu handling */
293 static int cortex_a8_dap_write_memap_register_u32(struct target
*target
, uint32_t address
, uint32_t value
)
296 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
297 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
299 retval
= mem_ap_write_atomic_u32(swjdp
, address
, value
);
305 * Cortex-A8 implementation of Debug Programmer's Model
307 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
308 * so there's no need to poll for it before executing an instruction.
310 * NOTE that in several of these cases the "stall" mode might be useful.
311 * It'd let us queue a few operations together... prepare/finish might
312 * be the places to enable/disable that mode.
315 static inline struct cortex_a8_common
*dpm_to_a8(struct arm_dpm
*dpm
)
317 return container_of(dpm
, struct cortex_a8_common
, armv7a_common
.dpm
);
320 static int cortex_a8_write_dcc(struct cortex_a8_common
*a8
, uint32_t data
)
322 LOG_DEBUG("write DCC 0x%08" PRIx32
, data
);
323 return mem_ap_write_u32(&a8
->armv7a_common
.dap
,
324 a8
->armv7a_common
.debug_base
+ CPUDBG_DTRRX
, data
);
327 static int cortex_a8_read_dcc(struct cortex_a8_common
*a8
, uint32_t *data
,
330 struct adiv5_dap
*swjdp
= &a8
->armv7a_common
.dap
;
331 uint32_t dscr
= DSCR_INSTR_COMP
;
337 /* Wait for DTRRXfull */
338 while ((dscr
& DSCR_DTR_TX_FULL
) == 0) {
339 retval
= mem_ap_read_atomic_u32(swjdp
,
340 a8
->armv7a_common
.debug_base
+ CPUDBG_DSCR
,
344 retval
= mem_ap_read_atomic_u32(swjdp
,
345 a8
->armv7a_common
.debug_base
+ CPUDBG_DTRTX
, data
);
346 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
354 static int cortex_a8_dpm_prepare(struct arm_dpm
*dpm
)
356 struct cortex_a8_common
*a8
= dpm_to_a8(dpm
);
357 struct adiv5_dap
*swjdp
= &a8
->armv7a_common
.dap
;
361 /* set up invariant: INSTR_COMP is set after ever DPM operation */
363 retval
= mem_ap_read_atomic_u32(swjdp
,
364 a8
->armv7a_common
.debug_base
+ CPUDBG_DSCR
,
366 } while ((dscr
& DSCR_INSTR_COMP
) == 0);
368 /* this "should never happen" ... */
369 if (dscr
& DSCR_DTR_RX_FULL
) {
370 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32
, dscr
);
372 retval
= cortex_a8_exec_opcode(
373 a8
->armv7a_common
.armv4_5_common
.target
,
374 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
381 static int cortex_a8_dpm_finish(struct arm_dpm
*dpm
)
383 /* REVISIT what could be done here? */
387 static int cortex_a8_instr_write_data_dcc(struct arm_dpm
*dpm
,
388 uint32_t opcode
, uint32_t data
)
390 struct cortex_a8_common
*a8
= dpm_to_a8(dpm
);
392 uint32_t dscr
= DSCR_INSTR_COMP
;
394 retval
= cortex_a8_write_dcc(a8
, data
);
396 return cortex_a8_exec_opcode(
397 a8
->armv7a_common
.armv4_5_common
.target
,
402 static int cortex_a8_instr_write_data_r0(struct arm_dpm
*dpm
,
403 uint32_t opcode
, uint32_t data
)
405 struct cortex_a8_common
*a8
= dpm_to_a8(dpm
);
406 uint32_t dscr
= DSCR_INSTR_COMP
;
409 retval
= cortex_a8_write_dcc(a8
, data
);
411 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
412 retval
= cortex_a8_exec_opcode(
413 a8
->armv7a_common
.armv4_5_common
.target
,
414 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
417 /* then the opcode, taking data from R0 */
418 retval
= cortex_a8_exec_opcode(
419 a8
->armv7a_common
.armv4_5_common
.target
,
426 static int cortex_a8_instr_cpsr_sync(struct arm_dpm
*dpm
)
428 struct target
*target
= dpm
->arm
->target
;
429 uint32_t dscr
= DSCR_INSTR_COMP
;
431 /* "Prefetch flush" after modifying execution status in CPSR */
432 return cortex_a8_exec_opcode(target
,
433 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
437 static int cortex_a8_instr_read_data_dcc(struct arm_dpm
*dpm
,
438 uint32_t opcode
, uint32_t *data
)
440 struct cortex_a8_common
*a8
= dpm_to_a8(dpm
);
442 uint32_t dscr
= DSCR_INSTR_COMP
;
444 /* the opcode, writing data to DCC */
445 retval
= cortex_a8_exec_opcode(
446 a8
->armv7a_common
.armv4_5_common
.target
,
450 return cortex_a8_read_dcc(a8
, data
, &dscr
);
454 static int cortex_a8_instr_read_data_r0(struct arm_dpm
*dpm
,
455 uint32_t opcode
, uint32_t *data
)
457 struct cortex_a8_common
*a8
= dpm_to_a8(dpm
);
458 uint32_t dscr
= DSCR_INSTR_COMP
;
461 /* the opcode, writing data to R0 */
462 retval
= cortex_a8_exec_opcode(
463 a8
->armv7a_common
.armv4_5_common
.target
,
467 /* write R0 to DCC */
468 retval
= cortex_a8_exec_opcode(
469 a8
->armv7a_common
.armv4_5_common
.target
,
470 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
473 return cortex_a8_read_dcc(a8
, data
, &dscr
);
476 static int cortex_a8_bpwp_enable(struct arm_dpm
*dpm
, unsigned index
,
477 uint32_t addr
, uint32_t control
)
479 struct cortex_a8_common
*a8
= dpm_to_a8(dpm
);
480 uint32_t vr
= a8
->armv7a_common
.debug_base
;
481 uint32_t cr
= a8
->armv7a_common
.debug_base
;
485 case 0 ... 15: /* breakpoints */
486 vr
+= CPUDBG_BVR_BASE
;
487 cr
+= CPUDBG_BCR_BASE
;
489 case 16 ... 31: /* watchpoints */
490 vr
+= CPUDBG_WVR_BASE
;
491 cr
+= CPUDBG_WCR_BASE
;
500 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
501 (unsigned) vr
, (unsigned) cr
);
503 retval
= cortex_a8_dap_write_memap_register_u32(dpm
->arm
->target
,
505 if (retval
!= ERROR_OK
)
507 retval
= cortex_a8_dap_write_memap_register_u32(dpm
->arm
->target
,
512 static int cortex_a8_bpwp_disable(struct arm_dpm
*dpm
, unsigned index
)
514 struct cortex_a8_common
*a8
= dpm_to_a8(dpm
);
519 cr
= a8
->armv7a_common
.debug_base
+ CPUDBG_BCR_BASE
;
522 cr
= a8
->armv7a_common
.debug_base
+ CPUDBG_WCR_BASE
;
530 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr
);
532 /* clear control register */
533 return cortex_a8_dap_write_memap_register_u32(dpm
->arm
->target
, cr
, 0);
536 static int cortex_a8_dpm_setup(struct cortex_a8_common
*a8
, uint32_t didr
)
538 struct arm_dpm
*dpm
= &a8
->armv7a_common
.dpm
;
541 dpm
->arm
= &a8
->armv7a_common
.armv4_5_common
;
544 dpm
->prepare
= cortex_a8_dpm_prepare
;
545 dpm
->finish
= cortex_a8_dpm_finish
;
547 dpm
->instr_write_data_dcc
= cortex_a8_instr_write_data_dcc
;
548 dpm
->instr_write_data_r0
= cortex_a8_instr_write_data_r0
;
549 dpm
->instr_cpsr_sync
= cortex_a8_instr_cpsr_sync
;
551 dpm
->instr_read_data_dcc
= cortex_a8_instr_read_data_dcc
;
552 dpm
->instr_read_data_r0
= cortex_a8_instr_read_data_r0
;
554 dpm
->bpwp_enable
= cortex_a8_bpwp_enable
;
555 dpm
->bpwp_disable
= cortex_a8_bpwp_disable
;
557 retval
= arm_dpm_setup(dpm
);
558 if (retval
== ERROR_OK
)
559 retval
= arm_dpm_initialize(dpm
);
566 * Cortex-A8 Run control
569 static int cortex_a8_poll(struct target
*target
)
571 int retval
= ERROR_OK
;
573 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
574 struct armv7a_common
*armv7a
= &cortex_a8
->armv7a_common
;
575 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
576 enum target_state prev_target_state
= target
->state
;
577 uint8_t saved_apsel
= dap_ap_get_select(swjdp
);
579 dap_ap_select(swjdp
, swjdp_debugap
);
580 retval
= mem_ap_read_atomic_u32(swjdp
,
581 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
582 if (retval
!= ERROR_OK
)
584 dap_ap_select(swjdp
, saved_apsel
);
587 cortex_a8
->cpudbg_dscr
= dscr
;
589 if ((dscr
& 0x3) == 0x3)
591 if (prev_target_state
!= TARGET_HALTED
)
593 /* We have a halting debug event */
594 LOG_DEBUG("Target halted");
595 target
->state
= TARGET_HALTED
;
596 if ((prev_target_state
== TARGET_RUNNING
)
597 || (prev_target_state
== TARGET_RESET
))
599 retval
= cortex_a8_debug_entry(target
);
600 if (retval
!= ERROR_OK
)
603 target_call_event_callbacks(target
,
604 TARGET_EVENT_HALTED
);
606 if (prev_target_state
== TARGET_DEBUG_RUNNING
)
610 retval
= cortex_a8_debug_entry(target
);
611 if (retval
!= ERROR_OK
)
614 target_call_event_callbacks(target
,
615 TARGET_EVENT_DEBUG_HALTED
);
619 else if ((dscr
& 0x3) == 0x2)
621 target
->state
= TARGET_RUNNING
;
625 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32
, dscr
);
626 target
->state
= TARGET_UNKNOWN
;
629 dap_ap_select(swjdp
, saved_apsel
);
634 static int cortex_a8_halt(struct target
*target
)
636 int retval
= ERROR_OK
;
638 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
639 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
640 uint8_t saved_apsel
= dap_ap_get_select(swjdp
);
641 dap_ap_select(swjdp
, swjdp_debugap
);
644 * Tell the core to be halted by writing DRCR with 0x1
645 * and then wait for the core to be halted.
647 retval
= mem_ap_write_atomic_u32(swjdp
,
648 armv7a
->debug_base
+ CPUDBG_DRCR
, 0x1);
651 * enter halting debug mode
653 mem_ap_read_atomic_u32(swjdp
, armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
654 retval
= mem_ap_write_atomic_u32(swjdp
,
655 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
| DSCR_HALT_DBG_MODE
);
657 if (retval
!= ERROR_OK
)
661 mem_ap_read_atomic_u32(swjdp
,
662 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
663 } while ((dscr
& DSCR_CORE_HALTED
) == 0);
665 target
->debug_reason
= DBG_REASON_DBGRQ
;
668 dap_ap_select(swjdp
, saved_apsel
);
672 static int cortex_a8_resume(struct target
*target
, int current
,
673 uint32_t address
, int handle_breakpoints
, int debug_execution
)
675 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
676 struct arm
*armv4_5
= &armv7a
->armv4_5_common
;
677 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
679 // struct breakpoint *breakpoint = NULL;
680 uint32_t resume_pc
, dscr
;
682 uint8_t saved_apsel
= dap_ap_get_select(swjdp
);
683 dap_ap_select(swjdp
, swjdp_debugap
);
685 if (!debug_execution
)
686 target_free_all_working_areas(target
);
691 /* Disable interrupts */
692 /* We disable interrupts in the PRIMASK register instead of
693 * masking with C_MASKINTS,
694 * This is probably the same issue as Cortex-M3 Errata 377493:
695 * C_MASKINTS in parallel with disabled interrupts can cause
696 * local faults to not be taken. */
697 buf_set_u32(armv7m
->core_cache
->reg_list
[ARMV7M_PRIMASK
].value
, 0, 32, 1);
698 armv7m
->core_cache
->reg_list
[ARMV7M_PRIMASK
].dirty
= 1;
699 armv7m
->core_cache
->reg_list
[ARMV7M_PRIMASK
].valid
= 1;
701 /* Make sure we are in Thumb mode */
702 buf_set_u32(armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].value
, 0, 32,
703 buf_get_u32(armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].value
, 0, 32) | (1 << 24));
704 armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].dirty
= 1;
705 armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].valid
= 1;
709 /* current = 1: continue on current pc, otherwise continue at <address> */
710 resume_pc
= buf_get_u32(armv4_5
->pc
->value
, 0, 32);
714 /* Make sure that the Armv7 gdb thumb fixups does not
715 * kill the return address
717 switch (armv4_5
->core_state
)
720 resume_pc
&= 0xFFFFFFFC;
722 case ARM_STATE_THUMB
:
723 case ARM_STATE_THUMB_EE
:
724 /* When the return address is loaded into PC
725 * bit 0 must be 1 to stay in Thumb state
729 case ARM_STATE_JAZELLE
:
730 LOG_ERROR("How do I resume into Jazelle state??");
733 LOG_DEBUG("resume pc = 0x%08" PRIx32
, resume_pc
);
734 buf_set_u32(armv4_5
->pc
->value
, 0, 32, resume_pc
);
735 armv4_5
->pc
->dirty
= 1;
736 armv4_5
->pc
->valid
= 1;
738 cortex_a8_restore_context(target
, handle_breakpoints
);
741 /* the front-end may request us not to handle breakpoints */
742 if (handle_breakpoints
)
744 /* Single step past breakpoint at current address */
745 if ((breakpoint
= breakpoint_find(target
, resume_pc
)))
747 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint
->address
);
748 cortex_m3_unset_breakpoint(target
, breakpoint
);
749 cortex_m3_single_step_core(target
);
750 cortex_m3_set_breakpoint(target
, breakpoint
);
755 /* Restart core and wait for it to be started
756 * NOTE: this clears DSCR_ITR_EN and other bits.
758 * REVISIT: for single stepping, we probably want to
759 * disable IRQs by default, with optional override...
761 mem_ap_write_atomic_u32(swjdp
, armv7a
->debug_base
+ CPUDBG_DRCR
, 0x2);
764 mem_ap_read_atomic_u32(swjdp
,
765 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
766 } while ((dscr
& DSCR_CORE_RESTARTED
) == 0);
768 target
->debug_reason
= DBG_REASON_NOTHALTED
;
769 target
->state
= TARGET_RUNNING
;
771 /* registers are now invalid */
772 register_cache_invalidate(armv4_5
->core_cache
);
774 if (!debug_execution
)
776 target
->state
= TARGET_RUNNING
;
777 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
778 LOG_DEBUG("target resumed at 0x%" PRIx32
, resume_pc
);
782 target
->state
= TARGET_DEBUG_RUNNING
;
783 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
784 LOG_DEBUG("target debug resumed at 0x%" PRIx32
, resume_pc
);
787 dap_ap_select(swjdp
, saved_apsel
);
792 static int cortex_a8_debug_entry(struct target
*target
)
795 uint32_t regfile
[16], cpsr
, dscr
;
796 int retval
= ERROR_OK
;
797 struct working_area
*regfile_working_area
= NULL
;
798 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
799 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
800 struct arm
*armv4_5
= &armv7a
->armv4_5_common
;
801 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
804 LOG_DEBUG("dscr = 0x%08" PRIx32
, cortex_a8
->cpudbg_dscr
);
806 /* REVISIT surely we should not re-read DSCR !! */
807 mem_ap_read_atomic_u32(swjdp
,
808 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
810 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
811 * imprecise data aborts get discarded by issuing a Data
812 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
815 /* Enable the ITR execution once we are in debug mode */
817 retval
= mem_ap_write_atomic_u32(swjdp
,
818 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
);
820 /* Examine debug reason */
821 arm_dpm_report_dscr(&armv7a
->dpm
, cortex_a8
->cpudbg_dscr
);
823 /* save address of instruction that triggered the watchpoint? */
824 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
827 retval
= mem_ap_read_atomic_u32(swjdp
,
828 armv7a
->debug_base
+ CPUDBG_WFAR
,
830 arm_dpm_report_wfar(&armv7a
->dpm
, wfar
);
833 /* REVISIT fast_reg_read is never set ... */
835 /* Examine target state and mode */
836 if (cortex_a8
->fast_reg_read
)
837 target_alloc_working_area(target
, 64, ®file_working_area
);
839 /* First load register acessible through core debug port*/
840 if (!regfile_working_area
)
842 retval
= arm_dpm_read_current_registers(&armv7a
->dpm
);
846 dap_ap_select(swjdp
, swjdp_memoryap
);
847 cortex_a8_read_regs_through_mem(target
,
848 regfile_working_area
->address
, regfile
);
849 dap_ap_select(swjdp
, swjdp_memoryap
);
850 target_free_working_area(target
, regfile_working_area
);
852 /* read Current PSR */
853 cortex_a8_dap_read_coreregister_u32(target
, &cpsr
, 16);
854 dap_ap_select(swjdp
, swjdp_debugap
);
855 LOG_DEBUG("cpsr: %8.8" PRIx32
, cpsr
);
857 arm_set_cpsr(armv4_5
, cpsr
);
860 for (i
= 0; i
<= ARM_PC
; i
++)
862 reg
= arm_reg_current(armv4_5
, i
);
864 buf_set_u32(reg
->value
, 0, 32, regfile
[i
]);
869 /* Fixup PC Resume Address */
872 // T bit set for Thumb or ThumbEE state
873 regfile
[ARM_PC
] -= 4;
878 regfile
[ARM_PC
] -= 8;
882 buf_set_u32(reg
->value
, 0, 32, regfile
[ARM_PC
]);
883 reg
->dirty
= reg
->valid
;
887 /* TODO, Move this */
888 uint32_t cp15_control_register
, cp15_cacr
, cp15_nacr
;
889 cortex_a8_read_cp(target
, &cp15_control_register
, 15, 0, 1, 0, 0);
890 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register
);
892 cortex_a8_read_cp(target
, &cp15_cacr
, 15, 0, 1, 0, 2);
893 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr
);
895 cortex_a8_read_cp(target
, &cp15_nacr
, 15, 0, 1, 1, 2);
896 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr
);
899 /* Are we in an exception handler */
900 // armv4_5->exception_number = 0;
901 if (armv7a
->post_debug_entry
)
902 armv7a
->post_debug_entry(target
);
907 static void cortex_a8_post_debug_entry(struct target
*target
)
909 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
910 struct armv7a_common
*armv7a
= &cortex_a8
->armv7a_common
;
913 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
914 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
917 &cortex_a8
->cp15_control_reg
);
918 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32
, cortex_a8
->cp15_control_reg
);
920 if (armv7a
->armv4_5_mmu
.armv4_5_cache
.ctype
== -1)
922 uint32_t cache_type_reg
;
924 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
925 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
929 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg
);
931 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
932 armv4_5_identify_cache(cache_type_reg
,
933 &armv7a
->armv4_5_mmu
.armv4_5_cache
);
936 armv7a
->armv4_5_mmu
.mmu_enabled
=
937 (cortex_a8
->cp15_control_reg
& 0x1U
) ? 1 : 0;
938 armv7a
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
=
939 (cortex_a8
->cp15_control_reg
& 0x4U
) ? 1 : 0;
940 armv7a
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
=
941 (cortex_a8
->cp15_control_reg
& 0x1000U
) ? 1 : 0;
946 static int cortex_a8_step(struct target
*target
, int current
, uint32_t address
,
947 int handle_breakpoints
)
949 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
950 struct arm
*armv4_5
= &armv7a
->armv4_5_common
;
951 struct breakpoint
*breakpoint
= NULL
;
952 struct breakpoint stepbreakpoint
;
957 if (target
->state
!= TARGET_HALTED
)
959 LOG_WARNING("target not halted");
960 return ERROR_TARGET_NOT_HALTED
;
963 /* current = 1: continue on current pc, otherwise continue at <address> */
967 buf_set_u32(r
->value
, 0, 32, address
);
971 address
= buf_get_u32(r
->value
, 0, 32);
974 /* The front-end may request us not to handle breakpoints.
975 * But since Cortex-A8 uses breakpoint for single step,
976 * we MUST handle breakpoints.
978 handle_breakpoints
= 1;
979 if (handle_breakpoints
) {
980 breakpoint
= breakpoint_find(target
, address
);
982 cortex_a8_unset_breakpoint(target
, breakpoint
);
985 /* Setup single step breakpoint */
986 stepbreakpoint
.address
= address
;
987 stepbreakpoint
.length
= (armv4_5
->core_state
== ARM_STATE_THUMB
)
989 stepbreakpoint
.type
= BKPT_HARD
;
990 stepbreakpoint
.set
= 0;
992 /* Break on IVA mismatch */
993 cortex_a8_set_breakpoint(target
, &stepbreakpoint
, 0x04);
995 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
997 cortex_a8_resume(target
, 1, address
, 0, 0);
999 while (target
->state
!= TARGET_HALTED
)
1001 cortex_a8_poll(target
);
1004 LOG_WARNING("timeout waiting for target halt");
1009 cortex_a8_unset_breakpoint(target
, &stepbreakpoint
);
1011 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
1014 cortex_a8_set_breakpoint(target
, breakpoint
, 0);
1016 if (target
->state
!= TARGET_HALTED
)
1017 LOG_DEBUG("target stepped");
1022 static int cortex_a8_restore_context(struct target
*target
, bool bpwp
)
1024 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1028 if (armv7a
->pre_restore_context
)
1029 armv7a
->pre_restore_context(target
);
1031 arm_dpm_write_dirty_registers(&armv7a
->dpm
, bpwp
);
1038 * Cortex-A8 Breakpoint and watchpoint fuctions
1041 /* Setup hardware Breakpoint Register Pair */
1042 static int cortex_a8_set_breakpoint(struct target
*target
,
1043 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1048 uint8_t byte_addr_select
= 0x0F;
1049 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
1050 struct armv7a_common
*armv7a
= &cortex_a8
->armv7a_common
;
1051 struct cortex_a8_brp
* brp_list
= cortex_a8
->brp_list
;
1053 if (breakpoint
->set
)
1055 LOG_WARNING("breakpoint already set");
1059 if (breakpoint
->type
== BKPT_HARD
)
1061 while (brp_list
[brp_i
].used
&& (brp_i
< cortex_a8
->brp_num
))
1063 if (brp_i
>= cortex_a8
->brp_num
)
1065 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1068 breakpoint
->set
= brp_i
+ 1;
1069 if (breakpoint
->length
== 2)
1071 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1073 control
= ((matchmode
& 0x7) << 20)
1074 | (byte_addr_select
<< 5)
1076 brp_list
[brp_i
].used
= 1;
1077 brp_list
[brp_i
].value
= (breakpoint
->address
& 0xFFFFFFFC);
1078 brp_list
[brp_i
].control
= control
;
1079 cortex_a8_dap_write_memap_register_u32(target
, armv7a
->debug_base
1080 + CPUDBG_BVR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1081 brp_list
[brp_i
].value
);
1082 cortex_a8_dap_write_memap_register_u32(target
, armv7a
->debug_base
1083 + CPUDBG_BCR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1084 brp_list
[brp_i
].control
);
1085 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%0" PRIx32
, brp_i
,
1086 brp_list
[brp_i
].control
,
1087 brp_list
[brp_i
].value
);
1089 else if (breakpoint
->type
== BKPT_SOFT
)
1092 if (breakpoint
->length
== 2)
1094 buf_set_u32(code
, 0, 32, ARMV5_T_BKPT(0x11));
1098 buf_set_u32(code
, 0, 32, ARMV5_BKPT(0x11));
1100 retval
= target
->type
->read_memory(target
,
1101 breakpoint
->address
& 0xFFFFFFFE,
1102 breakpoint
->length
, 1,
1103 breakpoint
->orig_instr
);
1104 if (retval
!= ERROR_OK
)
1106 retval
= target
->type
->write_memory(target
,
1107 breakpoint
->address
& 0xFFFFFFFE,
1108 breakpoint
->length
, 1, code
);
1109 if (retval
!= ERROR_OK
)
1111 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1117 static int cortex_a8_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1120 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
1121 struct armv7a_common
*armv7a
= &cortex_a8
->armv7a_common
;
1122 struct cortex_a8_brp
* brp_list
= cortex_a8
->brp_list
;
1124 if (!breakpoint
->set
)
1126 LOG_WARNING("breakpoint not set");
1130 if (breakpoint
->type
== BKPT_HARD
)
1132 int brp_i
= breakpoint
->set
- 1;
1133 if ((brp_i
< 0) || (brp_i
>= cortex_a8
->brp_num
))
1135 LOG_DEBUG("Invalid BRP number in breakpoint");
1138 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx32
, brp_i
,
1139 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1140 brp_list
[brp_i
].used
= 0;
1141 brp_list
[brp_i
].value
= 0;
1142 brp_list
[brp_i
].control
= 0;
1143 cortex_a8_dap_write_memap_register_u32(target
, armv7a
->debug_base
1144 + CPUDBG_BCR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1145 brp_list
[brp_i
].control
);
1146 cortex_a8_dap_write_memap_register_u32(target
, armv7a
->debug_base
1147 + CPUDBG_BVR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1148 brp_list
[brp_i
].value
);
1152 /* restore original instruction (kept in target endianness) */
1153 if (breakpoint
->length
== 4)
1155 retval
= target
->type
->write_memory(target
,
1156 breakpoint
->address
& 0xFFFFFFFE,
1157 4, 1, breakpoint
->orig_instr
);
1158 if (retval
!= ERROR_OK
)
1163 retval
= target
->type
->write_memory(target
,
1164 breakpoint
->address
& 0xFFFFFFFE,
1165 2, 1, breakpoint
->orig_instr
);
1166 if (retval
!= ERROR_OK
)
1170 breakpoint
->set
= 0;
1175 static int cortex_a8_add_breakpoint(struct target
*target
,
1176 struct breakpoint
*breakpoint
)
1178 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
1180 if ((breakpoint
->type
== BKPT_HARD
) && (cortex_a8
->brp_num_available
< 1))
1182 LOG_INFO("no hardware breakpoint available");
1183 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1186 if (breakpoint
->type
== BKPT_HARD
)
1187 cortex_a8
->brp_num_available
--;
1188 cortex_a8_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1193 static int cortex_a8_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1195 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
1198 /* It is perfectly possible to remove brakpoints while the taget is running */
1199 if (target
->state
!= TARGET_HALTED
)
1201 LOG_WARNING("target not halted");
1202 return ERROR_TARGET_NOT_HALTED
;
1206 if (breakpoint
->set
)
1208 cortex_a8_unset_breakpoint(target
, breakpoint
);
1209 if (breakpoint
->type
== BKPT_HARD
)
1210 cortex_a8
->brp_num_available
++ ;
1220 * Cortex-A8 Reset fuctions
1223 static int cortex_a8_assert_reset(struct target
*target
)
1225 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1229 /* FIXME when halt is requested, make it work somehow... */
1231 /* Issue some kind of warm reset. */
1232 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
)) {
1233 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1234 } else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1235 /* REVISIT handle "pulls" cases, if there's
1236 * hardware that needs them to work.
1238 jtag_add_reset(0, 1);
1240 LOG_ERROR("%s: how to reset?", target_name(target
));
1244 /* registers are now invalid */
1245 register_cache_invalidate(armv7a
->armv4_5_common
.core_cache
);
1247 target
->state
= TARGET_RESET
;
1252 static int cortex_a8_deassert_reset(struct target
*target
)
1258 /* be certain SRST is off */
1259 jtag_add_reset(0, 0);
1261 retval
= cortex_a8_poll(target
);
1263 if (target
->reset_halt
) {
1264 if (target
->state
!= TARGET_HALTED
) {
1265 LOG_WARNING("%s: ran after reset and before halt ...",
1266 target_name(target
));
1267 if ((retval
= target_halt(target
)) != ERROR_OK
)
1276 * Cortex-A8 Memory access
1278 * This is same Cortex M3 but we must also use the correct
1279 * ap number for every access.
1282 static int cortex_a8_read_phys_memory(struct target
*target
,
1283 uint32_t address
, uint32_t size
,
1284 uint32_t count
, uint8_t *buffer
)
1286 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1287 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
1288 int retval
= ERROR_INVALID_ARGUMENTS
;
1290 /* cortex_a8 handles unaligned memory access */
1292 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1293 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address
, size
, count
);
1294 if (count
&& buffer
) {
1297 retval
= mem_ap_read_buf_u32(swjdp
, buffer
, 4 * count
, address
);
1300 retval
= mem_ap_read_buf_u16(swjdp
, buffer
, 2 * count
, address
);
1303 retval
= mem_ap_read_buf_u8(swjdp
, buffer
, count
, address
);
1311 static int cortex_a8_read_memory(struct target
*target
, uint32_t address
,
1312 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1315 uint32_t virt
, phys
;
1317 /* cortex_a8 handles unaligned memory access */
1319 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1320 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address
, size
, count
);
1321 cortex_a8_mmu(target
, &enabled
);
1325 cortex_a8_virt2phys(target
, virt
, &phys
);
1326 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt
, phys
);
1330 return cortex_a8_read_phys_memory(target
, address
, size
, count
, buffer
);
1333 static int cortex_a8_write_phys_memory(struct target
*target
,
1334 uint32_t address
, uint32_t size
,
1335 uint32_t count
, uint8_t *buffer
)
1337 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1338 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
1339 int retval
= ERROR_INVALID_ARGUMENTS
;
1341 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1343 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address
, size
, count
);
1344 if (count
&& buffer
) {
1347 retval
= mem_ap_write_buf_u32(swjdp
, buffer
, 4 * count
, address
);
1350 retval
= mem_ap_write_buf_u16(swjdp
, buffer
, 2 * count
, address
);
1353 retval
= mem_ap_write_buf_u8(swjdp
, buffer
, count
, address
);
1358 /* REVISIT this op is generic ARMv7-A/R stuff */
1359 if (retval
== ERROR_OK
&& target
->state
== TARGET_HALTED
)
1361 struct arm_dpm
*dpm
= armv7a
->armv4_5_common
.dpm
;
1363 retval
= dpm
->prepare(dpm
);
1364 if (retval
!= ERROR_OK
)
1367 /* The Cache handling will NOT work with MMU active, the
1368 * wrong addresses will be invalidated!
1370 * For both ICache and DCache, walk all cache lines in the
1371 * address range. Cortex-A8 has fixed 64 byte line length.
1373 * REVISIT per ARMv7, these may trigger watchpoints ...
1376 /* invalidate I-Cache */
1377 if (armv7a
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
)
1379 /* ICIMVAU - Invalidate Cache single entry
1381 * MCR p15, 0, r0, c7, c5, 1
1383 for (uint32_t cacheline
= address
;
1384 cacheline
< address
+ size
* count
;
1386 retval
= dpm
->instr_write_data_r0(dpm
,
1387 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1392 /* invalidate D-Cache */
1393 if (armv7a
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
)
1395 /* DCIMVAC - Invalidate data Cache line
1397 * MCR p15, 0, r0, c7, c6, 1
1399 for (uint32_t cacheline
= address
;
1400 cacheline
< address
+ size
* count
;
1402 retval
= dpm
->instr_write_data_r0(dpm
,
1403 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1408 /* (void) */ dpm
->finish(dpm
);
1414 static int cortex_a8_write_memory(struct target
*target
, uint32_t address
,
1415 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1418 uint32_t virt
, phys
;
1420 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1422 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address
, size
, count
);
1423 cortex_a8_mmu(target
, &enabled
);
1427 cortex_a8_virt2phys(target
, virt
, &phys
);
1428 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt
, phys
);
1432 return cortex_a8_write_phys_memory(target
, address
, size
,
1436 static int cortex_a8_bulk_write_memory(struct target
*target
, uint32_t address
,
1437 uint32_t count
, uint8_t *buffer
)
1439 return cortex_a8_write_memory(target
, address
, 4, count
, buffer
);
1443 static int cortex_a8_dcc_read(struct adiv5_dap
*swjdp
, uint8_t *value
, uint8_t *ctrl
)
1448 mem_ap_read_buf_u16(swjdp
, (uint8_t*)&dcrdr
, 1, DCB_DCRDR
);
1449 *ctrl
= (uint8_t)dcrdr
;
1450 *value
= (uint8_t)(dcrdr
>> 8);
1452 LOG_DEBUG("data 0x%x ctrl 0x%x", *value
, *ctrl
);
1454 /* write ack back to software dcc register
1455 * signify we have read data */
1456 if (dcrdr
& (1 << 0))
1459 mem_ap_write_buf_u16(swjdp
, (uint8_t*)&dcrdr
, 1, DCB_DCRDR
);
1466 static int cortex_a8_handle_target_request(void *priv
)
1468 struct target
*target
= priv
;
1469 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1470 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
1472 if (!target_was_examined(target
))
1474 if (!target
->dbg_msg_enabled
)
1477 if (target
->state
== TARGET_RUNNING
)
1482 cortex_a8_dcc_read(swjdp
, &data
, &ctrl
);
1484 /* check if we have data */
1485 if (ctrl
& (1 << 0))
1489 /* we assume target is quick enough */
1491 cortex_a8_dcc_read(swjdp
, &data
, &ctrl
);
1492 request
|= (data
<< 8);
1493 cortex_a8_dcc_read(swjdp
, &data
, &ctrl
);
1494 request
|= (data
<< 16);
1495 cortex_a8_dcc_read(swjdp
, &data
, &ctrl
);
1496 request
|= (data
<< 24);
1497 target_request(target
, request
);
1505 * Cortex-A8 target information and configuration
1508 static int cortex_a8_examine_first(struct target
*target
)
1510 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
1511 struct armv7a_common
*armv7a
= &cortex_a8
->armv7a_common
;
1512 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
1514 int retval
= ERROR_OK
;
1515 uint32_t didr
, ctypr
, ttypr
, cpuid
;
1517 /* stop assuming this is an OMAP! */
1518 LOG_DEBUG("TODO - autoconfigure");
1520 /* Here we shall insert a proper ROM Table scan */
1521 armv7a
->debug_base
= OMAP3530_DEBUG_BASE
;
1523 /* We do one extra read to ensure DAP is configured,
1524 * we call ahbap_debugport_init(swjdp) instead
1526 ahbap_debugport_init(swjdp
);
1527 mem_ap_read_atomic_u32(swjdp
, armv7a
->debug_base
+ CPUDBG_CPUID
, &cpuid
);
1528 if ((retval
= mem_ap_read_atomic_u32(swjdp
,
1529 armv7a
->debug_base
+ CPUDBG_CPUID
, &cpuid
)) != ERROR_OK
)
1531 LOG_DEBUG("Examine %s failed", "CPUID");
1535 if ((retval
= mem_ap_read_atomic_u32(swjdp
,
1536 armv7a
->debug_base
+ CPUDBG_CTYPR
, &ctypr
)) != ERROR_OK
)
1538 LOG_DEBUG("Examine %s failed", "CTYPR");
1542 if ((retval
= mem_ap_read_atomic_u32(swjdp
,
1543 armv7a
->debug_base
+ CPUDBG_TTYPR
, &ttypr
)) != ERROR_OK
)
1545 LOG_DEBUG("Examine %s failed", "TTYPR");
1549 if ((retval
= mem_ap_read_atomic_u32(swjdp
,
1550 armv7a
->debug_base
+ CPUDBG_DIDR
, &didr
)) != ERROR_OK
)
1552 LOG_DEBUG("Examine %s failed", "DIDR");
1556 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
1557 LOG_DEBUG("ctypr = 0x%08" PRIx32
, ctypr
);
1558 LOG_DEBUG("ttypr = 0x%08" PRIx32
, ttypr
);
1559 LOG_DEBUG("didr = 0x%08" PRIx32
, didr
);
1561 armv7a
->armv4_5_common
.core_type
= ARM_MODE_MON
;
1562 cortex_a8_dpm_setup(cortex_a8
, didr
);
1564 /* Setup Breakpoint Register Pairs */
1565 cortex_a8
->brp_num
= ((didr
>> 24) & 0x0F) + 1;
1566 cortex_a8
->brp_num_context
= ((didr
>> 20) & 0x0F) + 1;
1567 cortex_a8
->brp_num_available
= cortex_a8
->brp_num
;
1568 cortex_a8
->brp_list
= calloc(cortex_a8
->brp_num
, sizeof(struct cortex_a8_brp
));
1569 // cortex_a8->brb_enabled = ????;
1570 for (i
= 0; i
< cortex_a8
->brp_num
; i
++)
1572 cortex_a8
->brp_list
[i
].used
= 0;
1573 if (i
< (cortex_a8
->brp_num
-cortex_a8
->brp_num_context
))
1574 cortex_a8
->brp_list
[i
].type
= BRP_NORMAL
;
1576 cortex_a8
->brp_list
[i
].type
= BRP_CONTEXT
;
1577 cortex_a8
->brp_list
[i
].value
= 0;
1578 cortex_a8
->brp_list
[i
].control
= 0;
1579 cortex_a8
->brp_list
[i
].BRPn
= i
;
1582 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8
->brp_num
);
1584 target_set_examined(target
);
1588 static int cortex_a8_examine(struct target
*target
)
1590 int retval
= ERROR_OK
;
1592 /* don't re-probe hardware after each reset */
1593 if (!target_was_examined(target
))
1594 retval
= cortex_a8_examine_first(target
);
1596 /* Configure core debug access */
1597 if (retval
== ERROR_OK
)
1598 retval
= cortex_a8_init_debug_access(target
);
1604 * Cortex-A8 target creation and initialization
1607 static int cortex_a8_init_target(struct command_context
*cmd_ctx
,
1608 struct target
*target
)
1610 /* examine_first() does a bunch of this */
1614 static int cortex_a8_init_arch_info(struct target
*target
,
1615 struct cortex_a8_common
*cortex_a8
, struct jtag_tap
*tap
)
1617 struct armv7a_common
*armv7a
= &cortex_a8
->armv7a_common
;
1618 struct arm
*armv4_5
= &armv7a
->armv4_5_common
;
1619 struct adiv5_dap
*dap
= &armv7a
->dap
;
1621 armv7a
->armv4_5_common
.dap
= dap
;
1623 /* Setup struct cortex_a8_common */
1624 cortex_a8
->common_magic
= CORTEX_A8_COMMON_MAGIC
;
1625 armv4_5
->arch_info
= armv7a
;
1627 /* prepare JTAG information for the new target */
1628 cortex_a8
->jtag_info
.tap
= tap
;
1629 cortex_a8
->jtag_info
.scann_size
= 4;
1631 /* Leave (only) generic DAP stuff for debugport_init() */
1632 dap
->jtag_info
= &cortex_a8
->jtag_info
;
1633 dap
->memaccess_tck
= 80;
1635 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1636 dap
->tar_autoincr_block
= (1 << 10);
1638 cortex_a8
->fast_reg_read
= 0;
1640 /* Set default value */
1641 cortex_a8
->current_address_mode
= ARM_MODE_ANY
;
1643 /* register arch-specific functions */
1644 armv7a
->examine_debug_reason
= NULL
;
1646 armv7a
->post_debug_entry
= cortex_a8_post_debug_entry
;
1648 armv7a
->pre_restore_context
= NULL
;
1649 armv7a
->armv4_5_mmu
.armv4_5_cache
.ctype
= -1;
1650 armv7a
->armv4_5_mmu
.get_ttb
= cortex_a8_get_ttb
;
1651 armv7a
->armv4_5_mmu
.read_memory
= cortex_a8_read_phys_memory
;
1652 armv7a
->armv4_5_mmu
.write_memory
= cortex_a8_write_phys_memory
;
1653 armv7a
->armv4_5_mmu
.disable_mmu_caches
= cortex_a8_disable_mmu_caches
;
1654 armv7a
->armv4_5_mmu
.enable_mmu_caches
= cortex_a8_enable_mmu_caches
;
1655 armv7a
->armv4_5_mmu
.has_tiny_pages
= 1;
1656 armv7a
->armv4_5_mmu
.mmu_enabled
= 0;
1659 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
1661 /* REVISIT v7a setup should be in a v7a-specific routine */
1662 arm_init_arch_info(target
, armv4_5
);
1663 armv7a
->common_magic
= ARMV7_COMMON_MAGIC
;
1665 target_register_timer_callback(cortex_a8_handle_target_request
, 1, 1, target
);
1670 static int cortex_a8_target_create(struct target
*target
, Jim_Interp
*interp
)
1672 struct cortex_a8_common
*cortex_a8
= calloc(1, sizeof(struct cortex_a8_common
));
1674 cortex_a8_init_arch_info(target
, cortex_a8
, target
->tap
);
1679 static uint32_t cortex_a8_get_ttb(struct target
*target
)
1681 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
1682 struct armv7a_common
*armv7a
= &cortex_a8
->armv7a_common
;
1683 uint32_t ttb
= 0, retval
= ERROR_OK
;
1685 /* current_address_mode is set inside cortex_a8_virt2phys()
1686 where we can determine if address belongs to user or kernel */
1687 if(cortex_a8
->current_address_mode
== ARM_MODE_SVC
)
1689 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1690 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
1691 0, 1, /* op1, op2 */
1692 2, 0, /* CRn, CRm */
1695 else if(cortex_a8
->current_address_mode
== ARM_MODE_USR
)
1697 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1698 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
1699 0, 0, /* op1, op2 */
1700 2, 0, /* CRn, CRm */
1703 /* we don't know whose address is: user or kernel
1704 we assume that if we are in kernel mode then
1705 address belongs to kernel else if in user mode
1707 else if(armv7a
->armv4_5_common
.core_mode
== ARM_MODE_SVC
)
1709 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1710 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
1711 0, 1, /* op1, op2 */
1712 2, 0, /* CRn, CRm */
1715 else if(armv7a
->armv4_5_common
.core_mode
== ARM_MODE_USR
)
1717 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1718 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
1719 0, 0, /* op1, op2 */
1720 2, 0, /* CRn, CRm */
1723 /* finaly we don't know whose ttb to use: user or kernel */
1725 LOG_ERROR("Don't know how to get ttb for current mode!!!");
1732 static void cortex_a8_disable_mmu_caches(struct target
*target
, int mmu
,
1733 int d_u_cache
, int i_cache
)
1735 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
1736 struct armv7a_common
*armv7a
= &cortex_a8
->armv7a_common
;
1737 uint32_t cp15_control
;
1739 /* read cp15 control register */
1740 armv7a
->armv4_5_common
.mrc(target
, 15,
1741 0, 0, /* op1, op2 */
1742 1, 0, /* CRn, CRm */
1747 cp15_control
&= ~0x1U
;
1750 cp15_control
&= ~0x4U
;
1753 cp15_control
&= ~0x1000U
;
1755 armv7a
->armv4_5_common
.mcr(target
, 15,
1756 0, 0, /* op1, op2 */
1757 1, 0, /* CRn, CRm */
1761 static void cortex_a8_enable_mmu_caches(struct target
*target
, int mmu
,
1762 int d_u_cache
, int i_cache
)
1764 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
1765 struct armv7a_common
*armv7a
= &cortex_a8
->armv7a_common
;
1766 uint32_t cp15_control
;
1768 /* read cp15 control register */
1769 armv7a
->armv4_5_common
.mrc(target
, 15,
1770 0, 0, /* op1, op2 */
1771 1, 0, /* CRn, CRm */
1775 cp15_control
|= 0x1U
;
1778 cp15_control
|= 0x4U
;
1781 cp15_control
|= 0x1000U
;
1783 armv7a
->armv4_5_common
.mcr(target
, 15,
1784 0, 0, /* op1, op2 */
1785 1, 0, /* CRn, CRm */
1790 static int cortex_a8_mmu(struct target
*target
, int *enabled
)
1792 if (target
->state
!= TARGET_HALTED
) {
1793 LOG_ERROR("%s: target not halted", __func__
);
1794 return ERROR_TARGET_INVALID
;
1797 *enabled
= target_to_cortex_a8(target
)->armv7a_common
.armv4_5_mmu
.mmu_enabled
;
1801 static int cortex_a8_virt2phys(struct target
*target
,
1802 uint32_t virt
, uint32_t *phys
)
1808 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
1809 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1810 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1812 /* We assume that virtual address is separated
1813 between user and kernel in Linux style:
1814 0x00000000-0xbfffffff - User space
1815 0xc0000000-0xffffffff - Kernel space */
1816 if( virt
< 0xc0000000 ) /* Linux user space */
1817 cortex_a8
->current_address_mode
= ARM_MODE_USR
;
1818 else /* Linux kernel */
1819 cortex_a8
->current_address_mode
= ARM_MODE_SVC
;
1820 uint32_t ret
= armv4_5_mmu_translate_va(target
,
1821 &armv7a
->armv4_5_mmu
, virt
, &type
, &cb
, &domain
, &ap
);
1822 /* Reset the flag. We don't want someone else to use it by error */
1823 cortex_a8
->current_address_mode
= ARM_MODE_ANY
;
1833 COMMAND_HANDLER(cortex_a8_handle_cache_info_command
)
1835 struct target
*target
= get_current_target(CMD_CTX
);
1836 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1838 return armv4_5_handle_cache_info_command(CMD_CTX
,
1839 &armv7a
->armv4_5_mmu
.armv4_5_cache
);
1843 COMMAND_HANDLER(cortex_a8_handle_dbginit_command
)
1845 struct target
*target
= get_current_target(CMD_CTX
);
1847 cortex_a8_init_debug_access(target
);
1852 static const struct command_registration cortex_a8_exec_command_handlers
[] = {
1854 .name
= "cache_info",
1855 .handler
= cortex_a8_handle_cache_info_command
,
1856 .mode
= COMMAND_EXEC
,
1857 .help
= "display information about target caches",
1861 .handler
= cortex_a8_handle_dbginit_command
,
1862 .mode
= COMMAND_EXEC
,
1863 .help
= "Initialize core debug",
1865 COMMAND_REGISTRATION_DONE
1867 static const struct command_registration cortex_a8_command_handlers
[] = {
1869 .chain
= arm_command_handlers
,
1872 .chain
= armv7a_command_handlers
,
1875 .name
= "cortex_a8",
1876 .mode
= COMMAND_ANY
,
1877 .help
= "Cortex-A8 command group",
1878 .chain
= cortex_a8_exec_command_handlers
,
1880 COMMAND_REGISTRATION_DONE
1883 struct target_type cortexa8_target
= {
1884 .name
= "cortex_a8",
1886 .poll
= cortex_a8_poll
,
1887 .arch_state
= armv7a_arch_state
,
1889 .target_request_data
= NULL
,
1891 .halt
= cortex_a8_halt
,
1892 .resume
= cortex_a8_resume
,
1893 .step
= cortex_a8_step
,
1895 .assert_reset
= cortex_a8_assert_reset
,
1896 .deassert_reset
= cortex_a8_deassert_reset
,
1897 .soft_reset_halt
= NULL
,
1899 /* REVISIT allow exporting VFP3 registers ... */
1900 .get_gdb_reg_list
= arm_get_gdb_reg_list
,
1902 .read_memory
= cortex_a8_read_memory
,
1903 .write_memory
= cortex_a8_write_memory
,
1904 .bulk_write_memory
= cortex_a8_bulk_write_memory
,
1906 .checksum_memory
= arm_checksum_memory
,
1907 .blank_check_memory
= arm_blank_check_memory
,
1909 .run_algorithm
= armv4_5_run_algorithm
,
1911 .add_breakpoint
= cortex_a8_add_breakpoint
,
1912 .remove_breakpoint
= cortex_a8_remove_breakpoint
,
1913 .add_watchpoint
= NULL
,
1914 .remove_watchpoint
= NULL
,
1916 .commands
= cortex_a8_command_handlers
,
1917 .target_create
= cortex_a8_target_create
,
1918 .init_target
= cortex_a8_init_target
,
1919 .examine
= cortex_a8_examine
,
1921 .read_phys_memory
= cortex_a8_read_phys_memory
,
1922 .write_phys_memory
= cortex_a8_write_phys_memory
,
1923 .mmu
= cortex_a8_mmu
,
1924 .virt2phys
= cortex_a8_virt2phys
,