cortex_a : smp support
[openocd/ntfreak.git] / src / target / cortex_a.c
blob9b8ba41665fb4a517ec9c8c9854f8d697f20ce81
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * This program is free software; you can redistribute it and/or modify *
21 * it under the terms of the GNU General Public License as published by *
22 * the Free Software Foundation; either version 2 of the License, or *
23 * (at your option) any later version. *
24 * *
25 * This program is distributed in the hope that it will be useful, *
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
28 * GNU General Public License for more details. *
29 * *
30 * You should have received a copy of the GNU General Public License *
31 * along with this program; if not, write to the *
32 * Free Software Foundation, Inc., *
33 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
34 * *
35 * Cortex-A8(tm) TRM, ARM DDI 0344H *
36 * Cortex-A9(tm) TRM, ARM DDI 0407F *
37 * *
38 ***************************************************************************/
39 #ifdef HAVE_CONFIG_H
40 #include "config.h"
41 #endif
43 #include "breakpoints.h"
44 #include "cortex_a.h"
45 #include "register.h"
46 #include "target_request.h"
47 #include "target_type.h"
48 #include "arm_opcodes.h"
49 #include <helper/time_support.h>
51 static int cortex_a8_poll(struct target *target);
52 static int cortex_a8_debug_entry(struct target *target);
53 static int cortex_a8_restore_context(struct target *target, bool bpwp);
54 static int cortex_a8_set_breakpoint(struct target *target,
55 struct breakpoint *breakpoint, uint8_t matchmode);
56 static int cortex_a8_unset_breakpoint(struct target *target,
57 struct breakpoint *breakpoint);
58 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
59 uint32_t *value, int regnum);
60 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
61 uint32_t value, int regnum);
62 static int cortex_a8_mmu(struct target *target, int *enabled);
63 static int cortex_a8_virt2phys(struct target *target,
64 uint32_t virt, uint32_t *phys);
65 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
66 int d_u_cache, int i_cache);
67 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
68 int d_u_cache, int i_cache);
69 static int cortex_a8_get_ttb(struct target *target, uint32_t *result);
73 * FIXME do topology discovery using the ROM; don't
74 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
75 * cores, with different AP numbering ... don't use a #define
76 * for these numbers, use per-core armv7a state.
78 #define swjdp_memoryap 0
79 #define swjdp_debugap 1
82 * Cortex-A8 Basic debug access, very low level assumes state is saved
84 static int cortex_a8_init_debug_access(struct target *target)
86 struct armv7a_common *armv7a = target_to_armv7a(target);
87 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
88 int retval;
89 uint32_t dummy;
91 LOG_DEBUG(" ");
93 /* Unlocking the debug registers for modification */
94 /* The debugport might be uninitialised so try twice */
95 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
96 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
97 if (retval != ERROR_OK)
99 /* try again */
100 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
101 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
102 if (retval == ERROR_OK)
104 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
107 if (retval != ERROR_OK)
108 return retval;
109 /* Clear Sticky Power Down status Bit in PRSR to enable access to
110 the registers in the Core Power Domain */
111 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
112 armv7a->debug_base + CPUDBG_PRSR, &dummy);
113 if (retval != ERROR_OK)
114 return retval;
116 /* Enabling of instruction execution in debug mode is done in debug_entry code */
118 /* Resync breakpoint registers */
120 /* Since this is likely called from init or reset, update target state information*/
121 return cortex_a8_poll(target);
124 /* To reduce needless round-trips, pass in a pointer to the current
125 * DSCR value. Initialize it to zero if you just need to know the
126 * value on return from this function; or DSCR_INSTR_COMP if you
127 * happen to know that no instruction is pending.
129 static int cortex_a8_exec_opcode(struct target *target,
130 uint32_t opcode, uint32_t *dscr_p)
132 uint32_t dscr;
133 int retval;
134 struct armv7a_common *armv7a = target_to_armv7a(target);
135 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
137 dscr = dscr_p ? *dscr_p : 0;
139 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
141 /* Wait for InstrCompl bit to be set */
142 long long then = timeval_ms();
143 while ((dscr & DSCR_INSTR_COMP) == 0)
145 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
146 armv7a->debug_base + CPUDBG_DSCR, &dscr);
147 if (retval != ERROR_OK)
149 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
150 return retval;
152 if (timeval_ms() > then + 1000)
154 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
155 return ERROR_FAIL;
159 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
160 armv7a->debug_base + CPUDBG_ITR, opcode);
161 if (retval != ERROR_OK)
162 return retval;
164 then = timeval_ms();
167 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
168 armv7a->debug_base + CPUDBG_DSCR, &dscr);
169 if (retval != ERROR_OK)
171 LOG_ERROR("Could not read DSCR register");
172 return retval;
174 if (timeval_ms() > then + 1000)
176 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
177 return ERROR_FAIL;
180 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
182 if (dscr_p)
183 *dscr_p = dscr;
185 return retval;
188 /**************************************************************************
189 Read core register with very few exec_opcode, fast but needs work_area.
190 This can cause problems with MMU active.
191 **************************************************************************/
192 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
193 uint32_t * regfile)
195 int retval = ERROR_OK;
196 struct armv7a_common *armv7a = target_to_armv7a(target);
197 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
199 retval = cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
200 if (retval != ERROR_OK)
201 return retval;
202 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
203 if (retval != ERROR_OK)
204 return retval;
205 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
206 if (retval != ERROR_OK)
207 return retval;
209 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
210 (uint8_t *)(&regfile[1]), 4*15, address);
212 return retval;
215 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
216 uint32_t *value, int regnum)
218 int retval = ERROR_OK;
219 uint8_t reg = regnum&0xFF;
220 uint32_t dscr = 0;
221 struct armv7a_common *armv7a = target_to_armv7a(target);
222 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
224 if (reg > 17)
225 return retval;
227 if (reg < 15)
229 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
230 retval = cortex_a8_exec_opcode(target,
231 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
232 &dscr);
233 if (retval != ERROR_OK)
234 return retval;
236 else if (reg == 15)
238 /* "MOV r0, r15"; then move r0 to DCCTX */
239 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
240 if (retval != ERROR_OK)
241 return retval;
242 retval = cortex_a8_exec_opcode(target,
243 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
244 &dscr);
245 if (retval != ERROR_OK)
246 return retval;
248 else
250 /* "MRS r0, CPSR" or "MRS r0, SPSR"
251 * then move r0 to DCCTX
253 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
254 if (retval != ERROR_OK)
255 return retval;
256 retval = cortex_a8_exec_opcode(target,
257 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
258 &dscr);
259 if (retval != ERROR_OK)
260 return retval;
263 /* Wait for DTRRXfull then read DTRRTX */
264 long long then = timeval_ms();
265 while ((dscr & DSCR_DTR_TX_FULL) == 0)
267 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
268 armv7a->debug_base + CPUDBG_DSCR, &dscr);
269 if (retval != ERROR_OK)
270 return retval;
271 if (timeval_ms() > then + 1000)
273 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
274 return ERROR_FAIL;
278 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
279 armv7a->debug_base + CPUDBG_DTRTX, value);
280 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
282 return retval;
285 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
286 uint32_t value, int regnum)
288 int retval = ERROR_OK;
289 uint8_t Rd = regnum&0xFF;
290 uint32_t dscr;
291 struct armv7a_common *armv7a = target_to_armv7a(target);
292 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
294 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
296 /* Check that DCCRX is not full */
297 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
298 armv7a->debug_base + CPUDBG_DSCR, &dscr);
299 if (retval != ERROR_OK)
300 return retval;
301 if (dscr & DSCR_DTR_RX_FULL)
303 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
304 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
305 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
306 &dscr);
307 if (retval != ERROR_OK)
308 return retval;
311 if (Rd > 17)
312 return retval;
314 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
315 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
316 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
317 armv7a->debug_base + CPUDBG_DTRRX, value);
318 if (retval != ERROR_OK)
319 return retval;
321 if (Rd < 15)
323 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
324 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
325 &dscr);
327 if (retval != ERROR_OK)
328 return retval;
330 else if (Rd == 15)
332 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
333 * then "mov r15, r0"
335 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
336 &dscr);
337 if (retval != ERROR_OK)
338 return retval;
339 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
340 if (retval != ERROR_OK)
341 return retval;
343 else
345 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
346 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
348 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
349 &dscr);
350 if (retval != ERROR_OK)
351 return retval;
352 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
353 &dscr);
354 if (retval != ERROR_OK)
355 return retval;
357 /* "Prefetch flush" after modifying execution status in CPSR */
358 if (Rd == 16)
360 retval = cortex_a8_exec_opcode(target,
361 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
362 &dscr);
363 if (retval != ERROR_OK)
364 return retval;
368 return retval;
371 /* Write to memory mapped registers directly with no cache or mmu handling */
372 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
374 int retval;
375 struct armv7a_common *armv7a = target_to_armv7a(target);
376 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
378 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap, address, value);
380 return retval;
384 * Cortex-A8 implementation of Debug Programmer's Model
386 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
387 * so there's no need to poll for it before executing an instruction.
389 * NOTE that in several of these cases the "stall" mode might be useful.
390 * It'd let us queue a few operations together... prepare/finish might
391 * be the places to enable/disable that mode.
394 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
396 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
399 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
401 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
402 return mem_ap_sel_write_u32(a8->armv7a_common.armv4_5_common.dap,
403 swjdp_debugap,a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
406 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
407 uint32_t *dscr_p)
409 struct adiv5_dap *swjdp = a8->armv7a_common.armv4_5_common.dap;
410 uint32_t dscr = DSCR_INSTR_COMP;
411 int retval;
413 if (dscr_p)
414 dscr = *dscr_p;
416 /* Wait for DTRRXfull */
417 long long then = timeval_ms();
418 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
419 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
420 a8->armv7a_common.debug_base + CPUDBG_DSCR,
421 &dscr);
422 if (retval != ERROR_OK)
423 return retval;
424 if (timeval_ms() > then + 1000)
426 LOG_ERROR("Timeout waiting for read dcc");
427 return ERROR_FAIL;
431 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
432 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
433 if (retval != ERROR_OK)
434 return retval;
435 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
437 if (dscr_p)
438 *dscr_p = dscr;
440 return retval;
443 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
445 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
446 struct adiv5_dap *swjdp = a8->armv7a_common.armv4_5_common.dap;
447 uint32_t dscr;
448 int retval;
450 /* set up invariant: INSTR_COMP is set after ever DPM operation */
451 long long then = timeval_ms();
452 for (;;)
454 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
455 a8->armv7a_common.debug_base + CPUDBG_DSCR,
456 &dscr);
457 if (retval != ERROR_OK)
458 return retval;
459 if ((dscr & DSCR_INSTR_COMP) != 0)
460 break;
461 if (timeval_ms() > then + 1000)
463 LOG_ERROR("Timeout waiting for dpm prepare");
464 return ERROR_FAIL;
468 /* this "should never happen" ... */
469 if (dscr & DSCR_DTR_RX_FULL) {
470 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
471 /* Clear DCCRX */
472 retval = cortex_a8_exec_opcode(
473 a8->armv7a_common.armv4_5_common.target,
474 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
475 &dscr);
476 if (retval != ERROR_OK)
477 return retval;
480 return retval;
483 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
485 /* REVISIT what could be done here? */
486 return ERROR_OK;
489 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
490 uint32_t opcode, uint32_t data)
492 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
493 int retval;
494 uint32_t dscr = DSCR_INSTR_COMP;
496 retval = cortex_a8_write_dcc(a8, data);
497 if (retval != ERROR_OK)
498 return retval;
500 return cortex_a8_exec_opcode(
501 a8->armv7a_common.armv4_5_common.target,
502 opcode,
503 &dscr);
506 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
507 uint32_t opcode, uint32_t data)
509 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
510 uint32_t dscr = DSCR_INSTR_COMP;
511 int retval;
513 retval = cortex_a8_write_dcc(a8, data);
514 if (retval != ERROR_OK)
515 return retval;
517 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
518 retval = cortex_a8_exec_opcode(
519 a8->armv7a_common.armv4_5_common.target,
520 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
521 &dscr);
522 if (retval != ERROR_OK)
523 return retval;
525 /* then the opcode, taking data from R0 */
526 retval = cortex_a8_exec_opcode(
527 a8->armv7a_common.armv4_5_common.target,
528 opcode,
529 &dscr);
531 return retval;
534 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
536 struct target *target = dpm->arm->target;
537 uint32_t dscr = DSCR_INSTR_COMP;
539 /* "Prefetch flush" after modifying execution status in CPSR */
540 return cortex_a8_exec_opcode(target,
541 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
542 &dscr);
545 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
546 uint32_t opcode, uint32_t *data)
548 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
549 int retval;
550 uint32_t dscr = DSCR_INSTR_COMP;
552 /* the opcode, writing data to DCC */
553 retval = cortex_a8_exec_opcode(
554 a8->armv7a_common.armv4_5_common.target,
555 opcode,
556 &dscr);
557 if (retval != ERROR_OK)
558 return retval;
560 return cortex_a8_read_dcc(a8, data, &dscr);
564 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
565 uint32_t opcode, uint32_t *data)
567 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
568 uint32_t dscr = DSCR_INSTR_COMP;
569 int retval;
571 /* the opcode, writing data to R0 */
572 retval = cortex_a8_exec_opcode(
573 a8->armv7a_common.armv4_5_common.target,
574 opcode,
575 &dscr);
576 if (retval != ERROR_OK)
577 return retval;
579 /* write R0 to DCC */
580 retval = cortex_a8_exec_opcode(
581 a8->armv7a_common.armv4_5_common.target,
582 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
583 &dscr);
584 if (retval != ERROR_OK)
585 return retval;
587 return cortex_a8_read_dcc(a8, data, &dscr);
590 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
591 uint32_t addr, uint32_t control)
593 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
594 uint32_t vr = a8->armv7a_common.debug_base;
595 uint32_t cr = a8->armv7a_common.debug_base;
596 int retval;
598 switch (index_t) {
599 case 0 ... 15: /* breakpoints */
600 vr += CPUDBG_BVR_BASE;
601 cr += CPUDBG_BCR_BASE;
602 break;
603 case 16 ... 31: /* watchpoints */
604 vr += CPUDBG_WVR_BASE;
605 cr += CPUDBG_WCR_BASE;
606 index_t -= 16;
607 break;
608 default:
609 return ERROR_FAIL;
611 vr += 4 * index_t;
612 cr += 4 * index_t;
614 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
615 (unsigned) vr, (unsigned) cr);
617 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
618 vr, addr);
619 if (retval != ERROR_OK)
620 return retval;
621 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
622 cr, control);
623 return retval;
626 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
628 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
629 uint32_t cr;
631 switch (index_t) {
632 case 0 ... 15:
633 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
634 break;
635 case 16 ... 31:
636 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
637 index_t -= 16;
638 break;
639 default:
640 return ERROR_FAIL;
642 cr += 4 * index_t;
644 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
646 /* clear control register */
647 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
650 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
652 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
653 int retval;
655 dpm->arm = &a8->armv7a_common.armv4_5_common;
656 dpm->didr = didr;
658 dpm->prepare = cortex_a8_dpm_prepare;
659 dpm->finish = cortex_a8_dpm_finish;
661 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
662 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
663 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
665 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
666 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
668 dpm->bpwp_enable = cortex_a8_bpwp_enable;
669 dpm->bpwp_disable = cortex_a8_bpwp_disable;
671 retval = arm_dpm_setup(dpm);
672 if (retval == ERROR_OK)
673 retval = arm_dpm_initialize(dpm);
675 return retval;
677 static struct target *get_cortex_a8(struct target *target, int32_t coreid)
679 struct target_list *head;
680 struct target *curr;
682 head = target->head;
683 while(head != (struct target_list*)NULL)
685 curr = head->target;
686 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
688 return curr;
690 head = head->next;
692 return target;
694 static int cortex_a8_halt(struct target *target);
696 static int cortex_a8_halt_smp(struct target *target)
698 int retval = 0;
699 struct target_list *head;
700 struct target *curr;
701 head = target->head;
702 while(head != (struct target_list*)NULL)
704 curr = head->target;
705 if ((curr != target) && (curr->state!= TARGET_HALTED))
707 retval += cortex_a8_halt(curr);
709 head = head->next;
711 return retval;
714 static int update_halt_gdb(struct target *target)
716 int retval = 0;
717 if (target->gdb_service->core[0]==-1)
719 target->gdb_service->target = target;
720 target->gdb_service->core[0] = target->coreid;
721 retval += cortex_a8_halt_smp(target);
723 return retval;
727 * Cortex-A8 Run control
730 static int cortex_a8_poll(struct target *target)
732 int retval = ERROR_OK;
733 uint32_t dscr;
734 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
735 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
736 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
737 enum target_state prev_target_state = target->state;
738 // toggle to another core is done by gdb as follow
739 // maint packet J core_id
740 // continue
741 // the next polling trigger an halt event sent to gdb
742 if ((target->state == TARGET_HALTED) && (target->smp) &&
743 (target->gdb_service) &&
744 (target->gdb_service->target==NULL) )
746 target->gdb_service->target =
747 get_cortex_a8(target, target->gdb_service->core[1]);
748 target_call_event_callbacks(target,
749 TARGET_EVENT_HALTED);
750 return retval;
752 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
753 armv7a->debug_base + CPUDBG_DSCR, &dscr);
754 if (retval != ERROR_OK)
756 return retval;
758 cortex_a8->cpudbg_dscr = dscr;
760 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED))
762 if (prev_target_state != TARGET_HALTED)
764 /* We have a halting debug event */
765 LOG_DEBUG("Target halted");
766 target->state = TARGET_HALTED;
767 if ((prev_target_state == TARGET_RUNNING)
768 || (prev_target_state == TARGET_RESET))
770 retval = cortex_a8_debug_entry(target);
771 if (retval != ERROR_OK)
772 return retval;
773 if (target->smp)
775 retval = update_halt_gdb(target);
776 if (retval != ERROR_OK)
777 return retval;
779 target_call_event_callbacks(target,
780 TARGET_EVENT_HALTED);
782 if (prev_target_state == TARGET_DEBUG_RUNNING)
784 LOG_DEBUG(" ");
786 retval = cortex_a8_debug_entry(target);
787 if (retval != ERROR_OK)
788 return retval;
789 if (target->smp)
791 retval = update_halt_gdb(target);
792 if (retval != ERROR_OK)
793 return retval;
796 target_call_event_callbacks(target,
797 TARGET_EVENT_DEBUG_HALTED);
801 else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
803 target->state = TARGET_RUNNING;
805 else
807 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
808 target->state = TARGET_UNKNOWN;
811 return retval;
814 static int cortex_a8_halt(struct target *target)
816 int retval = ERROR_OK;
817 uint32_t dscr;
818 struct armv7a_common *armv7a = target_to_armv7a(target);
819 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
822 * Tell the core to be halted by writing DRCR with 0x1
823 * and then wait for the core to be halted.
825 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
826 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
827 if (retval != ERROR_OK)
828 return retval;
831 * enter halting debug mode
833 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
834 armv7a->debug_base + CPUDBG_DSCR, &dscr);
835 if (retval != ERROR_OK)
836 return retval;
838 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
839 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
840 if (retval != ERROR_OK)
841 return retval;
843 long long then = timeval_ms();
844 for (;;)
846 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
847 armv7a->debug_base + CPUDBG_DSCR, &dscr);
848 if (retval != ERROR_OK)
849 return retval;
850 if ((dscr & DSCR_CORE_HALTED) != 0)
852 break;
854 if (timeval_ms() > then + 1000)
856 LOG_ERROR("Timeout waiting for halt");
857 return ERROR_FAIL;
861 target->debug_reason = DBG_REASON_DBGRQ;
863 return ERROR_OK;
866 static int cortex_a8_internal_restore(struct target *target, int current,
867 uint32_t *address, int handle_breakpoints, int debug_execution)
869 struct armv7a_common *armv7a = target_to_armv7a(target);
870 struct arm *armv4_5 = &armv7a->armv4_5_common;
871 int retval;
872 uint32_t resume_pc;
874 if (!debug_execution)
875 target_free_all_working_areas(target);
877 #if 0
878 if (debug_execution)
880 /* Disable interrupts */
881 /* We disable interrupts in the PRIMASK register instead of
882 * masking with C_MASKINTS,
883 * This is probably the same issue as Cortex-M3 Errata 377493:
884 * C_MASKINTS in parallel with disabled interrupts can cause
885 * local faults to not be taken. */
886 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
887 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
888 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
890 /* Make sure we are in Thumb mode */
891 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
892 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
893 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
894 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
896 #endif
898 /* current = 1: continue on current pc, otherwise continue at <address> */
899 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
900 if (!current)
901 resume_pc = *address;
902 else
903 *address = resume_pc;
905 /* Make sure that the Armv7 gdb thumb fixups does not
906 * kill the return address
908 switch (armv4_5->core_state)
910 case ARM_STATE_ARM:
911 resume_pc &= 0xFFFFFFFC;
912 break;
913 case ARM_STATE_THUMB:
914 case ARM_STATE_THUMB_EE:
915 /* When the return address is loaded into PC
916 * bit 0 must be 1 to stay in Thumb state
918 resume_pc |= 0x1;
919 break;
920 case ARM_STATE_JAZELLE:
921 LOG_ERROR("How do I resume into Jazelle state??");
922 return ERROR_FAIL;
924 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
925 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
926 armv4_5->pc->dirty = 1;
927 armv4_5->pc->valid = 1;
929 retval = cortex_a8_restore_context(target, handle_breakpoints);
930 if (retval != ERROR_OK)
931 return retval;
932 target->debug_reason = DBG_REASON_NOTHALTED;
933 target->state = TARGET_RUNNING;
935 /* registers are now invalid */
936 register_cache_invalidate(armv4_5->core_cache);
938 #if 0
939 /* the front-end may request us not to handle breakpoints */
940 if (handle_breakpoints)
942 /* Single step past breakpoint at current address */
943 if ((breakpoint = breakpoint_find(target, resume_pc)))
945 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
946 cortex_m3_unset_breakpoint(target, breakpoint);
947 cortex_m3_single_step_core(target);
948 cortex_m3_set_breakpoint(target, breakpoint);
952 #endif
953 return retval;
956 static int cortex_a8_internal_restart(struct target *target)
958 struct armv7a_common *armv7a = target_to_armv7a(target);
959 struct arm *armv4_5 = &armv7a->armv4_5_common;
960 struct adiv5_dap *swjdp = armv4_5->dap;
961 int retval;
962 uint32_t dscr;
964 * Restart core and wait for it to be started. Clear ITRen and sticky
965 * exception flags: see ARMv7 ARM, C5.9.
967 * REVISIT: for single stepping, we probably want to
968 * disable IRQs by default, with optional override...
971 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
972 armv7a->debug_base + CPUDBG_DSCR, &dscr);
973 if (retval != ERROR_OK)
974 return retval;
976 if ((dscr & DSCR_INSTR_COMP) == 0)
977 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
979 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
980 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
981 if (retval != ERROR_OK)
982 return retval;
984 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
985 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
986 DRCR_CLEAR_EXCEPTIONS);
987 if (retval != ERROR_OK)
988 return retval;
990 long long then = timeval_ms();
991 for (;;)
993 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
994 armv7a->debug_base + CPUDBG_DSCR, &dscr);
995 if (retval != ERROR_OK)
996 return retval;
997 if ((dscr & DSCR_CORE_RESTARTED) != 0)
998 break;
999 if (timeval_ms() > then + 1000)
1001 LOG_ERROR("Timeout waiting for resume");
1002 return ERROR_FAIL;
1006 target->debug_reason = DBG_REASON_NOTHALTED;
1007 target->state = TARGET_RUNNING;
1009 /* registers are now invalid */
1010 register_cache_invalidate(armv4_5->core_cache);
1012 return ERROR_OK;
1015 static int cortex_a8_restore_smp(struct target *target,int handle_breakpoints)
1017 int retval = 0;
1018 struct target_list *head;
1019 struct target *curr;
1020 uint32_t address;
1021 head = target->head;
1022 while(head != (struct target_list*)NULL)
1024 curr = head->target;
1025 if ((curr != target) && (curr->state != TARGET_RUNNING))
1027 /* resume current address , not in step mode */
1028 retval += cortex_a8_internal_restore(curr, 1, &address,
1029 handle_breakpoints, 0);
1030 retval += cortex_a8_internal_restart(curr);
1032 head = head->next;
1035 return retval;
1038 static int cortex_a8_resume(struct target *target, int current,
1039 uint32_t address, int handle_breakpoints, int debug_execution)
1041 int retval = 0;
1042 /* dummy resume for smp toggle in order to reduce gdb impact */
1043 if ((target->smp) && (target->gdb_service->core[1]!=-1))
1045 /* simulate a start and halt of target */
1046 target->gdb_service->target = NULL;
1047 target->gdb_service->core[0] = target->gdb_service->core[1];
1048 /* fake resume at next poll we play the target core[1], see poll*/
1049 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1050 return 0;
1052 cortex_a8_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1053 if (target->smp)
1054 { target->gdb_service->core[0] = -1;
1055 retval += cortex_a8_restore_smp(target, handle_breakpoints);
1057 cortex_a8_internal_restart(target);
1059 if (!debug_execution)
1061 target->state = TARGET_RUNNING;
1062 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1063 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1065 else
1067 target->state = TARGET_DEBUG_RUNNING;
1068 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1069 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1072 return ERROR_OK;
1075 static int cortex_a8_debug_entry(struct target *target)
1077 int i;
1078 uint32_t regfile[16], cpsr, dscr;
1079 int retval = ERROR_OK;
1080 struct working_area *regfile_working_area = NULL;
1081 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1082 struct armv7a_common *armv7a = target_to_armv7a(target);
1083 struct arm *armv4_5 = &armv7a->armv4_5_common;
1084 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1085 struct reg *reg;
1087 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
1089 /* REVISIT surely we should not re-read DSCR !! */
1090 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1091 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1092 if (retval != ERROR_OK)
1093 return retval;
1095 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1096 * imprecise data aborts get discarded by issuing a Data
1097 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1100 /* Enable the ITR execution once we are in debug mode */
1101 dscr |= DSCR_ITR_EN;
1102 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
1103 armv7a->debug_base + CPUDBG_DSCR, dscr);
1104 if (retval != ERROR_OK)
1105 return retval;
1107 /* Examine debug reason */
1108 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
1110 /* save address of instruction that triggered the watchpoint? */
1111 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1112 uint32_t wfar;
1114 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1115 armv7a->debug_base + CPUDBG_WFAR,
1116 &wfar);
1117 if (retval != ERROR_OK)
1118 return retval;
1119 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1122 /* REVISIT fast_reg_read is never set ... */
1124 /* Examine target state and mode */
1125 if (cortex_a8->fast_reg_read)
1126 target_alloc_working_area(target, 64, &regfile_working_area);
1128 /* First load register acessible through core debug port*/
1129 if (!regfile_working_area)
1131 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1133 else
1135 retval = cortex_a8_read_regs_through_mem(target,
1136 regfile_working_area->address, regfile);
1138 target_free_working_area(target, regfile_working_area);
1139 if (retval != ERROR_OK)
1141 return retval;
1144 /* read Current PSR */
1145 retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
1146 if (retval != ERROR_OK)
1147 return retval;
1149 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1151 arm_set_cpsr(armv4_5, cpsr);
1153 /* update cache */
1154 for (i = 0; i <= ARM_PC; i++)
1156 reg = arm_reg_current(armv4_5, i);
1158 buf_set_u32(reg->value, 0, 32, regfile[i]);
1159 reg->valid = 1;
1160 reg->dirty = 0;
1163 /* Fixup PC Resume Address */
1164 if (cpsr & (1 << 5))
1166 // T bit set for Thumb or ThumbEE state
1167 regfile[ARM_PC] -= 4;
1169 else
1171 // ARM state
1172 regfile[ARM_PC] -= 8;
1175 reg = armv4_5->pc;
1176 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1177 reg->dirty = reg->valid;
1180 #if 0
1181 /* TODO, Move this */
1182 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1183 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1184 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1186 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1187 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1189 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1190 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1191 #endif
1193 /* Are we in an exception handler */
1194 // armv4_5->exception_number = 0;
1195 if (armv7a->post_debug_entry)
1197 retval = armv7a->post_debug_entry(target);
1198 if (retval != ERROR_OK)
1199 return retval;
1202 return retval;
1205 static int cortex_a8_post_debug_entry(struct target *target)
1207 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1208 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1209 int retval;
1211 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1212 retval = armv7a->armv4_5_common.mrc(target, 15,
1213 0, 0, /* op1, op2 */
1214 1, 0, /* CRn, CRm */
1215 &cortex_a8->cp15_control_reg);
1216 if (retval != ERROR_OK)
1217 return retval;
1218 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1220 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
1222 uint32_t cache_type_reg;
1224 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1225 retval = armv7a->armv4_5_common.mrc(target, 15,
1226 0, 1, /* op1, op2 */
1227 0, 0, /* CRn, CRm */
1228 &cache_type_reg);
1229 if (retval != ERROR_OK)
1230 return retval;
1231 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
1233 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
1234 armv4_5_identify_cache(cache_type_reg,
1235 &armv7a->armv4_5_mmu.armv4_5_cache);
1238 armv7a->armv4_5_mmu.mmu_enabled =
1239 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1240 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1241 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1242 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1243 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1245 return ERROR_OK;
1248 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1249 int handle_breakpoints)
1251 struct armv7a_common *armv7a = target_to_armv7a(target);
1252 struct arm *armv4_5 = &armv7a->armv4_5_common;
1253 struct breakpoint *breakpoint = NULL;
1254 struct breakpoint stepbreakpoint;
1255 struct reg *r;
1256 int retval;
1258 if (target->state != TARGET_HALTED)
1260 LOG_WARNING("target not halted");
1261 return ERROR_TARGET_NOT_HALTED;
1264 /* current = 1: continue on current pc, otherwise continue at <address> */
1265 r = armv4_5->pc;
1266 if (!current)
1268 buf_set_u32(r->value, 0, 32, address);
1270 else
1272 address = buf_get_u32(r->value, 0, 32);
1275 /* The front-end may request us not to handle breakpoints.
1276 * But since Cortex-A8 uses breakpoint for single step,
1277 * we MUST handle breakpoints.
1279 handle_breakpoints = 1;
1280 if (handle_breakpoints) {
1281 breakpoint = breakpoint_find(target, address);
1282 if (breakpoint)
1283 cortex_a8_unset_breakpoint(target, breakpoint);
1286 /* Setup single step breakpoint */
1287 stepbreakpoint.address = address;
1288 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1289 ? 2 : 4;
1290 stepbreakpoint.type = BKPT_HARD;
1291 stepbreakpoint.set = 0;
1293 /* Break on IVA mismatch */
1294 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1296 target->debug_reason = DBG_REASON_SINGLESTEP;
1298 retval = cortex_a8_resume(target, 1, address, 0, 0);
1299 if (retval != ERROR_OK)
1300 return retval;
1302 long long then = timeval_ms();
1303 while (target->state != TARGET_HALTED)
1305 retval = cortex_a8_poll(target);
1306 if (retval != ERROR_OK)
1307 return retval;
1308 if (timeval_ms() > then + 1000)
1310 LOG_ERROR("timeout waiting for target halt");
1311 return ERROR_FAIL;
1315 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1317 target->debug_reason = DBG_REASON_BREAKPOINT;
1319 if (breakpoint)
1320 cortex_a8_set_breakpoint(target, breakpoint, 0);
1322 if (target->state != TARGET_HALTED)
1323 LOG_DEBUG("target stepped");
1325 return ERROR_OK;
1328 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1330 struct armv7a_common *armv7a = target_to_armv7a(target);
1332 LOG_DEBUG(" ");
1334 if (armv7a->pre_restore_context)
1335 armv7a->pre_restore_context(target);
1337 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1342 * Cortex-A8 Breakpoint and watchpoint functions
1345 /* Setup hardware Breakpoint Register Pair */
1346 static int cortex_a8_set_breakpoint(struct target *target,
1347 struct breakpoint *breakpoint, uint8_t matchmode)
1349 int retval;
1350 int brp_i=0;
1351 uint32_t control;
1352 uint8_t byte_addr_select = 0x0F;
1353 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1354 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1355 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1357 if (breakpoint->set)
1359 LOG_WARNING("breakpoint already set");
1360 return ERROR_OK;
1363 if (breakpoint->type == BKPT_HARD)
1365 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1366 brp_i++ ;
1367 if (brp_i >= cortex_a8->brp_num)
1369 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1370 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1372 breakpoint->set = brp_i + 1;
1373 if (breakpoint->length == 2)
1375 byte_addr_select = (3 << (breakpoint->address & 0x02));
1377 control = ((matchmode & 0x7) << 20)
1378 | (byte_addr_select << 5)
1379 | (3 << 1) | 1;
1380 brp_list[brp_i].used = 1;
1381 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1382 brp_list[brp_i].control = control;
1383 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1384 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1385 brp_list[brp_i].value);
1386 if (retval != ERROR_OK)
1387 return retval;
1388 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1389 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1390 brp_list[brp_i].control);
1391 if (retval != ERROR_OK)
1392 return retval;
1393 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1394 brp_list[brp_i].control,
1395 brp_list[brp_i].value);
1397 else if (breakpoint->type == BKPT_SOFT)
1399 uint8_t code[4];
1400 if (breakpoint->length == 2)
1402 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1404 else
1406 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1408 retval = target->type->read_memory(target,
1409 breakpoint->address & 0xFFFFFFFE,
1410 breakpoint->length, 1,
1411 breakpoint->orig_instr);
1412 if (retval != ERROR_OK)
1413 return retval;
1414 retval = target->type->write_memory(target,
1415 breakpoint->address & 0xFFFFFFFE,
1416 breakpoint->length, 1, code);
1417 if (retval != ERROR_OK)
1418 return retval;
1419 breakpoint->set = 0x11; /* Any nice value but 0 */
1422 return ERROR_OK;
1425 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1427 int retval;
1428 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1429 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1430 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1432 if (!breakpoint->set)
1434 LOG_WARNING("breakpoint not set");
1435 return ERROR_OK;
1438 if (breakpoint->type == BKPT_HARD)
1440 int brp_i = breakpoint->set - 1;
1441 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1443 LOG_DEBUG("Invalid BRP number in breakpoint");
1444 return ERROR_OK;
1446 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1447 brp_list[brp_i].control, brp_list[brp_i].value);
1448 brp_list[brp_i].used = 0;
1449 brp_list[brp_i].value = 0;
1450 brp_list[brp_i].control = 0;
1451 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1452 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1453 brp_list[brp_i].control);
1454 if (retval != ERROR_OK)
1455 return retval;
1456 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1457 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1458 brp_list[brp_i].value);
1459 if (retval != ERROR_OK)
1460 return retval;
1462 else
1464 /* restore original instruction (kept in target endianness) */
1465 if (breakpoint->length == 4)
1467 retval = target->type->write_memory(target,
1468 breakpoint->address & 0xFFFFFFFE,
1469 4, 1, breakpoint->orig_instr);
1470 if (retval != ERROR_OK)
1471 return retval;
1473 else
1475 retval = target->type->write_memory(target,
1476 breakpoint->address & 0xFFFFFFFE,
1477 2, 1, breakpoint->orig_instr);
1478 if (retval != ERROR_OK)
1479 return retval;
1482 breakpoint->set = 0;
1484 return ERROR_OK;
1487 static int cortex_a8_add_breakpoint(struct target *target,
1488 struct breakpoint *breakpoint)
1490 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1492 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1494 LOG_INFO("no hardware breakpoint available");
1495 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1498 if (breakpoint->type == BKPT_HARD)
1499 cortex_a8->brp_num_available--;
1501 return cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1504 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1506 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1508 #if 0
1509 /* It is perfectly possible to remove breakpoints while the target is running */
1510 if (target->state != TARGET_HALTED)
1512 LOG_WARNING("target not halted");
1513 return ERROR_TARGET_NOT_HALTED;
1515 #endif
1517 if (breakpoint->set)
1519 cortex_a8_unset_breakpoint(target, breakpoint);
1520 if (breakpoint->type == BKPT_HARD)
1521 cortex_a8->brp_num_available++ ;
1525 return ERROR_OK;
1531 * Cortex-A8 Reset functions
1534 static int cortex_a8_assert_reset(struct target *target)
1536 struct armv7a_common *armv7a = target_to_armv7a(target);
1538 LOG_DEBUG(" ");
1540 /* FIXME when halt is requested, make it work somehow... */
1542 /* Issue some kind of warm reset. */
1543 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1544 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1545 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1546 /* REVISIT handle "pulls" cases, if there's
1547 * hardware that needs them to work.
1549 jtag_add_reset(0, 1);
1550 } else {
1551 LOG_ERROR("%s: how to reset?", target_name(target));
1552 return ERROR_FAIL;
1555 /* registers are now invalid */
1556 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1558 target->state = TARGET_RESET;
1560 return ERROR_OK;
1563 static int cortex_a8_deassert_reset(struct target *target)
1565 int retval;
1567 LOG_DEBUG(" ");
1569 /* be certain SRST is off */
1570 jtag_add_reset(0, 0);
1572 retval = cortex_a8_poll(target);
1573 if (retval != ERROR_OK)
1574 return retval;
1576 if (target->reset_halt) {
1577 if (target->state != TARGET_HALTED) {
1578 LOG_WARNING("%s: ran after reset and before halt ...",
1579 target_name(target));
1580 if ((retval = target_halt(target)) != ERROR_OK)
1581 return retval;
1585 return ERROR_OK;
1589 static int cortex_a8_write_apb_ab_memory(struct target *target,
1590 uint32_t address, uint32_t size,
1591 uint32_t count, const uint8_t *buffer)
1594 /* write memory through APB-AP */
1596 int retval = ERROR_INVALID_ARGUMENTS;
1597 struct armv7a_common *armv7a = target_to_armv7a(target);
1598 struct arm *armv4_5 = &armv7a->armv4_5_common;
1599 int total_bytes = count * size;
1600 int start_byte, nbytes_to_write, i;
1601 struct reg *reg;
1602 union _data {
1603 uint8_t uc_a[4];
1604 uint32_t ui;
1605 } data;
1607 if (target->state != TARGET_HALTED)
1609 LOG_WARNING("target not halted");
1610 return ERROR_TARGET_NOT_HALTED;
1613 reg = arm_reg_current(armv4_5, 0);
1614 reg->dirty = 1;
1615 reg = arm_reg_current(armv4_5, 1);
1616 reg->dirty = 1;
1618 retval = cortex_a8_dap_write_coreregister_u32(target, address & 0xFFFFFFFC, 0);
1619 if (retval != ERROR_OK)
1620 return retval;
1622 start_byte = address & 0x3;
1624 while (total_bytes > 0) {
1626 nbytes_to_write = 4 - start_byte;
1627 if (total_bytes < nbytes_to_write)
1628 nbytes_to_write = total_bytes;
1630 if ( nbytes_to_write != 4 ) {
1632 /* execute instruction LDR r1, [r0] */
1633 retval = cortex_a8_exec_opcode(target, ARMV4_5_LDR(1, 0), NULL);
1634 if (retval != ERROR_OK)
1635 return retval;
1637 retval = cortex_a8_dap_read_coreregister_u32(target, &data.ui, 1);
1638 if (retval != ERROR_OK)
1639 return retval;
1642 for (i = 0; i < nbytes_to_write; ++i)
1643 data.uc_a[i + start_byte] = *buffer++;
1645 retval = cortex_a8_dap_write_coreregister_u32(target, data.ui, 1);
1646 if (retval != ERROR_OK)
1647 return retval;
1649 /* execute instruction STRW r1, [r0], 1 (0xe4801004) */
1650 retval = cortex_a8_exec_opcode(target, ARMV4_5_STRW_IP(1, 0) , NULL);
1651 if (retval != ERROR_OK)
1652 return retval;
1654 total_bytes -= nbytes_to_write;
1655 start_byte = 0;
1658 return retval;
1662 static int cortex_a8_read_apb_ab_memory(struct target *target,
1663 uint32_t address, uint32_t size,
1664 uint32_t count, uint8_t *buffer)
1667 /* read memory through APB-AP */
1669 int retval = ERROR_INVALID_ARGUMENTS;
1670 struct armv7a_common *armv7a = target_to_armv7a(target);
1671 struct arm *armv4_5 = &armv7a->armv4_5_common;
1672 int total_bytes = count * size;
1673 int start_byte, nbytes_to_read, i;
1674 struct reg *reg;
1675 union _data {
1676 uint8_t uc_a[4];
1677 uint32_t ui;
1678 } data;
1680 if (target->state != TARGET_HALTED)
1682 LOG_WARNING("target not halted");
1683 return ERROR_TARGET_NOT_HALTED;
1686 reg = arm_reg_current(armv4_5, 0);
1687 reg->dirty = 1;
1688 reg = arm_reg_current(armv4_5, 1);
1689 reg->dirty = 1;
1691 retval = cortex_a8_dap_write_coreregister_u32(target, address & 0xFFFFFFFC, 0);
1692 if (retval != ERROR_OK)
1693 return retval;
1695 start_byte = address & 0x3;
1697 while (total_bytes > 0) {
1699 /* execute instruction LDRW r1, [r0], 4 (0xe4901004) */
1700 retval = cortex_a8_exec_opcode(target, ARMV4_5_LDRW_IP(1, 0), NULL);
1701 if (retval != ERROR_OK)
1702 return retval;
1704 retval = cortex_a8_dap_read_coreregister_u32(target, &data.ui, 1);
1705 if (retval != ERROR_OK)
1706 return retval;
1708 nbytes_to_read = 4 - start_byte;
1709 if (total_bytes < nbytes_to_read)
1710 nbytes_to_read = total_bytes;
1712 for (i = 0; i < nbytes_to_read; ++i)
1713 *buffer++ = data.uc_a[i + start_byte];
1715 total_bytes -= nbytes_to_read;
1716 start_byte = 0;
1719 return retval;
1725 * Cortex-A8 Memory access
1727 * This is same Cortex M3 but we must also use the correct
1728 * ap number for every access.
1731 static int cortex_a8_read_phys_memory(struct target *target,
1732 uint32_t address, uint32_t size,
1733 uint32_t count, uint8_t *buffer)
1735 struct armv7a_common *armv7a = target_to_armv7a(target);
1736 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1737 int retval = ERROR_INVALID_ARGUMENTS;
1738 uint8_t apsel = swjdp->apsel;
1739 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d",
1740 address, size, count);
1742 if (count && buffer) {
1744 if ( apsel == swjdp_memoryap ) {
1746 /* read memory through AHB-AP */
1748 switch (size) {
1749 case 4:
1750 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
1751 buffer, 4 * count, address);
1752 break;
1753 case 2:
1754 retval = mem_ap_sel_read_buf_u16(swjdp, swjdp_memoryap,
1755 buffer, 2 * count, address);
1756 break;
1757 case 1:
1758 retval = mem_ap_sel_read_buf_u8(swjdp, swjdp_memoryap,
1759 buffer, count, address);
1760 break;
1762 } else {
1764 /* read memory through APB-AP */
1765 int enabled = 0;
1767 retval = cortex_a8_mmu(target, &enabled);
1768 if (retval != ERROR_OK)
1769 return retval;
1771 if (enabled)
1773 LOG_WARNING("Reading physical memory through \
1774 APB with MMU enabled is not yet implemented");
1775 return ERROR_TARGET_FAILURE;
1777 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
1780 return retval;
1783 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1784 uint32_t size, uint32_t count, uint8_t *buffer)
1786 int enabled = 0;
1787 uint32_t virt, phys;
1788 int retval;
1789 struct armv7a_common *armv7a = target_to_armv7a(target);
1790 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1791 uint8_t apsel = swjdp->apsel;
1793 /* cortex_a8 handles unaligned memory access */
1794 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
1795 size, count);
1796 if (apsel == swjdp_memoryap) {
1797 retval = cortex_a8_mmu(target, &enabled);
1798 if (retval != ERROR_OK)
1799 return retval;
1802 if(enabled)
1804 virt = address;
1805 retval = cortex_a8_virt2phys(target, virt, &phys);
1806 if (retval != ERROR_OK)
1807 return retval;
1809 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x",
1810 virt, phys);
1811 address = phys;
1813 retval = cortex_a8_read_phys_memory(target, address, size, count, buffer);
1814 } else {
1815 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
1817 return retval;
1820 static int cortex_a8_write_phys_memory(struct target *target,
1821 uint32_t address, uint32_t size,
1822 uint32_t count, const uint8_t *buffer)
1824 struct armv7a_common *armv7a = target_to_armv7a(target);
1825 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1826 int retval = ERROR_INVALID_ARGUMENTS;
1827 uint8_t apsel = swjdp->apsel;
1829 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address,
1830 size, count);
1832 if (count && buffer) {
1834 if ( apsel == swjdp_memoryap ) {
1836 /* write memory through AHB-AP */
1838 switch (size) {
1839 case 4:
1840 retval = mem_ap_sel_write_buf_u32(swjdp, swjdp_memoryap,
1841 buffer, 4 * count, address);
1842 break;
1843 case 2:
1844 retval = mem_ap_sel_write_buf_u16(swjdp, swjdp_memoryap,
1845 buffer, 2 * count, address);
1846 break;
1847 case 1:
1848 retval = mem_ap_sel_write_buf_u8(swjdp, swjdp_memoryap,
1849 buffer, count, address);
1850 break;
1853 } else {
1855 /* write memory through APB-AP */
1856 int enabled = 0;
1858 retval = cortex_a8_mmu(target, &enabled);
1859 if (retval != ERROR_OK)
1860 return retval;
1862 if (enabled)
1864 LOG_WARNING("Writing physical memory through APB with MMU" \
1865 "enabled is not yet implemented");
1866 return ERROR_TARGET_FAILURE;
1868 return cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
1873 /* REVISIT this op is generic ARMv7-A/R stuff */
1874 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1876 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1878 retval = dpm->prepare(dpm);
1879 if (retval != ERROR_OK)
1880 return retval;
1882 /* The Cache handling will NOT work with MMU active, the
1883 * wrong addresses will be invalidated!
1885 * For both ICache and DCache, walk all cache lines in the
1886 * address range. Cortex-A8 has fixed 64 byte line length.
1888 * REVISIT per ARMv7, these may trigger watchpoints ...
1891 /* invalidate I-Cache */
1892 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1894 /* ICIMVAU - Invalidate Cache single entry
1895 * with MVA to PoU
1896 * MCR p15, 0, r0, c7, c5, 1
1898 for (uint32_t cacheline = address;
1899 cacheline < address + size * count;
1900 cacheline += 64) {
1901 retval = dpm->instr_write_data_r0(dpm,
1902 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1903 cacheline);
1904 if (retval != ERROR_OK)
1905 return retval;
1909 /* invalidate D-Cache */
1910 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1912 /* DCIMVAC - Invalidate data Cache line
1913 * with MVA to PoC
1914 * MCR p15, 0, r0, c7, c6, 1
1916 for (uint32_t cacheline = address;
1917 cacheline < address + size * count;
1918 cacheline += 64) {
1919 retval = dpm->instr_write_data_r0(dpm,
1920 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1921 cacheline);
1922 if (retval != ERROR_OK)
1923 return retval;
1927 /* (void) */ dpm->finish(dpm);
1930 return retval;
1933 static int cortex_a8_write_memory(struct target *target, uint32_t address,
1934 uint32_t size, uint32_t count, const uint8_t *buffer)
1936 int enabled = 0;
1937 uint32_t virt, phys;
1938 int retval;
1939 struct armv7a_common *armv7a = target_to_armv7a(target);
1940 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1941 uint8_t apsel = swjdp->apsel;
1942 /* cortex_a8 handles unaligned memory access */
1943 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
1944 size, count);
1945 if (apsel == swjdp_memoryap) {
1947 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1948 retval = cortex_a8_mmu(target, &enabled);
1949 if (retval != ERROR_OK)
1950 return retval;
1952 if(enabled)
1954 virt = address;
1955 retval = cortex_a8_virt2phys(target, virt, &phys);
1956 if (retval != ERROR_OK)
1957 return retval;
1958 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1959 address = phys;
1962 retval = cortex_a8_write_phys_memory(target, address, size,
1963 count, buffer);
1965 else {
1966 retval = cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
1968 return retval;
1971 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
1972 uint32_t count, const uint8_t *buffer)
1974 return cortex_a8_write_memory(target, address, 4, count, buffer);
1978 static int cortex_a8_handle_target_request(void *priv)
1980 struct target *target = priv;
1981 struct armv7a_common *armv7a = target_to_armv7a(target);
1982 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1983 int retval;
1985 if (!target_was_examined(target))
1986 return ERROR_OK;
1987 if (!target->dbg_msg_enabled)
1988 return ERROR_OK;
1990 if (target->state == TARGET_RUNNING)
1992 uint32_t request;
1993 uint32_t dscr;
1994 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1995 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1997 /* check if we have data */
1998 while ((dscr & DSCR_DTR_TX_FULL) && (retval==ERROR_OK))
2000 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2001 armv7a->debug_base+ CPUDBG_DTRTX, &request);
2002 if (retval == ERROR_OK)
2004 target_request(target, request);
2005 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2006 armv7a->debug_base+ CPUDBG_DSCR, &dscr);
2011 return ERROR_OK;
2015 * Cortex-A8 target information and configuration
2018 static int cortex_a8_examine_first(struct target *target)
2020 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2021 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2022 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
2023 int i;
2024 int retval = ERROR_OK;
2025 uint32_t didr, ctypr, ttypr, cpuid;
2027 /* We do one extra read to ensure DAP is configured,
2028 * we call ahbap_debugport_init(swjdp) instead
2030 retval = ahbap_debugport_init(swjdp);
2031 if (retval != ERROR_OK)
2032 return retval;
2034 if (!target->dbgbase_set)
2036 uint32_t dbgbase;
2037 /* Get ROM Table base */
2038 uint32_t apid;
2039 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
2040 if (retval != ERROR_OK)
2041 return retval;
2042 /* Lookup 0x15 -- Processor DAP */
2043 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
2044 &armv7a->debug_base);
2045 if (retval != ERROR_OK)
2046 return retval;
2048 else
2050 armv7a->debug_base = target->dbgbase;
2053 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2054 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2055 if (retval != ERROR_OK)
2056 return retval;
2058 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2059 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
2061 LOG_DEBUG("Examine %s failed", "CPUID");
2062 return retval;
2065 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2066 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
2068 LOG_DEBUG("Examine %s failed", "CTYPR");
2069 return retval;
2072 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2073 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
2075 LOG_DEBUG("Examine %s failed", "TTYPR");
2076 return retval;
2079 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2080 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
2082 LOG_DEBUG("Examine %s failed", "DIDR");
2083 return retval;
2086 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2087 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2088 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2089 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2091 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
2092 retval = cortex_a8_dpm_setup(cortex_a8, didr);
2093 if (retval != ERROR_OK)
2094 return retval;
2096 /* Setup Breakpoint Register Pairs */
2097 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
2098 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2099 cortex_a8->brp_num_available = cortex_a8->brp_num;
2100 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
2101 // cortex_a8->brb_enabled = ????;
2102 for (i = 0; i < cortex_a8->brp_num; i++)
2104 cortex_a8->brp_list[i].used = 0;
2105 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
2106 cortex_a8->brp_list[i].type = BRP_NORMAL;
2107 else
2108 cortex_a8->brp_list[i].type = BRP_CONTEXT;
2109 cortex_a8->brp_list[i].value = 0;
2110 cortex_a8->brp_list[i].control = 0;
2111 cortex_a8->brp_list[i].BRPn = i;
2114 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
2116 target_set_examined(target);
2117 return ERROR_OK;
2120 static int cortex_a8_examine(struct target *target)
2122 int retval = ERROR_OK;
2124 /* don't re-probe hardware after each reset */
2125 if (!target_was_examined(target))
2126 retval = cortex_a8_examine_first(target);
2128 /* Configure core debug access */
2129 if (retval == ERROR_OK)
2130 retval = cortex_a8_init_debug_access(target);
2132 return retval;
2136 * Cortex-A8 target creation and initialization
2139 static int cortex_a8_init_target(struct command_context *cmd_ctx,
2140 struct target *target)
2142 /* examine_first() does a bunch of this */
2143 return ERROR_OK;
2146 static int cortex_a8_init_arch_info(struct target *target,
2147 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
2149 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2150 struct arm *armv4_5 = &armv7a->armv4_5_common;
2151 struct adiv5_dap *dap = &armv7a->dap;
2153 armv7a->armv4_5_common.dap = dap;
2155 /* Setup struct cortex_a8_common */
2156 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
2157 /* tap has no dap initialized */
2158 if (!tap->dap)
2160 armv7a->armv4_5_common.dap = dap;
2161 /* Setup struct cortex_a8_common */
2162 armv4_5->arch_info = armv7a;
2164 /* prepare JTAG information for the new target */
2165 cortex_a8->jtag_info.tap = tap;
2166 cortex_a8->jtag_info.scann_size = 4;
2168 /* Leave (only) generic DAP stuff for debugport_init() */
2169 dap->jtag_info = &cortex_a8->jtag_info;
2171 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
2172 dap->tar_autoincr_block = (1 << 10);
2173 dap->memaccess_tck = 80;
2174 tap->dap = dap;
2176 else
2177 armv7a->armv4_5_common.dap = tap->dap;
2179 cortex_a8->fast_reg_read = 0;
2181 /* Set default value */
2182 cortex_a8->current_address_mode = ARM_MODE_ANY;
2184 /* register arch-specific functions */
2185 armv7a->examine_debug_reason = NULL;
2187 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
2189 armv7a->pre_restore_context = NULL;
2190 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
2191 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
2192 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
2193 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
2194 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
2195 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
2196 armv7a->armv4_5_mmu.has_tiny_pages = 1;
2197 armv7a->armv4_5_mmu.mmu_enabled = 0;
2200 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
2202 /* REVISIT v7a setup should be in a v7a-specific routine */
2203 arm_init_arch_info(target, armv4_5);
2204 armv7a->common_magic = ARMV7_COMMON_MAGIC;
2206 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
2208 return ERROR_OK;
2211 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
2213 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
2215 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
2218 static int cortex_a8_get_ttb(struct target *target, uint32_t *result)
2220 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2221 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2222 uint32_t ttb = 0, retval = ERROR_OK;
2224 /* current_address_mode is set inside cortex_a8_virt2phys()
2225 where we can determine if address belongs to user or kernel */
2226 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
2228 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2229 retval = armv7a->armv4_5_common.mrc(target, 15,
2230 0, 1, /* op1, op2 */
2231 2, 0, /* CRn, CRm */
2232 &ttb);
2233 if (retval != ERROR_OK)
2234 return retval;
2236 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
2238 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2239 retval = armv7a->armv4_5_common.mrc(target, 15,
2240 0, 0, /* op1, op2 */
2241 2, 0, /* CRn, CRm */
2242 &ttb);
2243 if (retval != ERROR_OK)
2244 return retval;
2246 /* we don't know whose address is: user or kernel
2247 we assume that if we are in kernel mode then
2248 address belongs to kernel else if in user mode
2249 - to user */
2250 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
2252 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2253 retval = armv7a->armv4_5_common.mrc(target, 15,
2254 0, 1, /* op1, op2 */
2255 2, 0, /* CRn, CRm */
2256 &ttb);
2257 if (retval != ERROR_OK)
2258 return retval;
2260 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
2262 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2263 retval = armv7a->armv4_5_common.mrc(target, 15,
2264 0, 0, /* op1, op2 */
2265 2, 0, /* CRn, CRm */
2266 &ttb);
2267 if (retval != ERROR_OK)
2268 return retval;
2270 /* finally we don't know whose ttb to use: user or kernel */
2271 else
2272 LOG_ERROR("Don't know how to get ttb for current mode!!!");
2274 ttb &= 0xffffc000;
2276 *result = ttb;
2278 return ERROR_OK;
2281 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
2282 int d_u_cache, int i_cache)
2284 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2285 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2286 uint32_t cp15_control;
2287 int retval;
2289 /* read cp15 control register */
2290 retval = armv7a->armv4_5_common.mrc(target, 15,
2291 0, 0, /* op1, op2 */
2292 1, 0, /* CRn, CRm */
2293 &cp15_control);
2294 if (retval != ERROR_OK)
2295 return retval;
2298 if (mmu)
2299 cp15_control &= ~0x1U;
2301 if (d_u_cache)
2302 cp15_control &= ~0x4U;
2304 if (i_cache)
2305 cp15_control &= ~0x1000U;
2307 retval = armv7a->armv4_5_common.mcr(target, 15,
2308 0, 0, /* op1, op2 */
2309 1, 0, /* CRn, CRm */
2310 cp15_control);
2311 return retval;
2314 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
2315 int d_u_cache, int i_cache)
2317 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2318 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2319 uint32_t cp15_control;
2320 int retval;
2322 /* read cp15 control register */
2323 retval = armv7a->armv4_5_common.mrc(target, 15,
2324 0, 0, /* op1, op2 */
2325 1, 0, /* CRn, CRm */
2326 &cp15_control);
2327 if (retval != ERROR_OK)
2328 return retval;
2330 if (mmu)
2331 cp15_control |= 0x1U;
2333 if (d_u_cache)
2334 cp15_control |= 0x4U;
2336 if (i_cache)
2337 cp15_control |= 0x1000U;
2339 retval = armv7a->armv4_5_common.mcr(target, 15,
2340 0, 0, /* op1, op2 */
2341 1, 0, /* CRn, CRm */
2342 cp15_control);
2343 return retval;
2347 static int cortex_a8_mmu(struct target *target, int *enabled)
2349 if (target->state != TARGET_HALTED) {
2350 LOG_ERROR("%s: target not halted", __func__);
2351 return ERROR_TARGET_INVALID;
2354 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
2355 return ERROR_OK;
2358 static int cortex_a8_virt2phys(struct target *target,
2359 uint32_t virt, uint32_t *phys)
2361 uint32_t cb;
2362 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2363 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2364 struct armv7a_common *armv7a = target_to_armv7a(target);
2366 /* We assume that virtual address is separated
2367 between user and kernel in Linux style:
2368 0x00000000-0xbfffffff - User space
2369 0xc0000000-0xffffffff - Kernel space */
2370 if( virt < 0xc0000000 ) /* Linux user space */
2371 cortex_a8->current_address_mode = ARM_MODE_USR;
2372 else /* Linux kernel */
2373 cortex_a8->current_address_mode = ARM_MODE_SVC;
2374 uint32_t ret;
2375 int retval = armv4_5_mmu_translate_va(target,
2376 &armv7a->armv4_5_mmu, virt, &cb, &ret);
2377 if (retval != ERROR_OK)
2378 return retval;
2379 /* Reset the flag. We don't want someone else to use it by error */
2380 cortex_a8->current_address_mode = ARM_MODE_ANY;
2382 *phys = ret;
2383 return ERROR_OK;
2386 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
2388 struct target *target = get_current_target(CMD_CTX);
2389 struct armv7a_common *armv7a = target_to_armv7a(target);
2391 return armv4_5_handle_cache_info_command(CMD_CTX,
2392 &armv7a->armv4_5_mmu.armv4_5_cache);
2396 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
2398 struct target *target = get_current_target(CMD_CTX);
2399 if (!target_was_examined(target))
2401 LOG_ERROR("target not examined yet");
2402 return ERROR_FAIL;
2405 return cortex_a8_init_debug_access(target);
2407 COMMAND_HANDLER(cortex_a8_handle_smp_off_command)
2409 struct target *target = get_current_target(CMD_CTX);
2410 /* check target is an smp target */
2411 struct target_list *head;
2412 struct target *curr;
2413 head = target->head;
2414 target->smp = 0;
2415 if (head != (struct target_list*)NULL)
2417 while (head != (struct target_list*)NULL)
2419 curr = head->target;
2420 curr->smp = 0;
2421 head = head->next;
2423 /* fixes the target display to the debugger */
2424 target->gdb_service->target = target;
2426 return ERROR_OK;
2429 COMMAND_HANDLER(cortex_a8_handle_smp_on_command)
2431 struct target *target = get_current_target(CMD_CTX);
2432 struct target_list *head;
2433 struct target *curr;
2434 head = target->head;
2435 if (head != (struct target_list*)NULL)
2436 { target->smp=1;
2437 while (head != (struct target_list*)NULL)
2439 curr = head->target;
2440 curr->smp = 1;
2441 head = head->next;
2444 return ERROR_OK;
2447 COMMAND_HANDLER(cortex_a8_handle_smp_gdb_command)
2449 struct target *target = get_current_target(CMD_CTX);
2450 int retval = ERROR_OK;
2451 struct target_list *head;
2452 head = target->head;
2453 if (head != (struct target_list*)NULL)
2455 if (CMD_ARGC == 1)
2457 int coreid = 0;
2458 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2459 if (ERROR_OK != retval)
2460 return retval;
2461 target->gdb_service->core[1]=coreid;
2464 command_print(CMD_CTX, "gdb coreid %d -> %d", target->gdb_service->core[0]
2465 , target->gdb_service->core[1]);
2467 return ERROR_OK;
2470 static const struct command_registration cortex_a8_exec_command_handlers[] = {
2472 .name = "cache_info",
2473 .handler = cortex_a8_handle_cache_info_command,
2474 .mode = COMMAND_EXEC,
2475 .help = "display information about target caches",
2478 .name = "dbginit",
2479 .handler = cortex_a8_handle_dbginit_command,
2480 .mode = COMMAND_EXEC,
2481 .help = "Initialize core debug",
2483 { .name ="smp_off",
2484 .handler = cortex_a8_handle_smp_off_command,
2485 .mode = COMMAND_EXEC,
2486 .help = "Stop smp handling",
2489 .name ="smp_on",
2490 .handler = cortex_a8_handle_smp_on_command,
2491 .mode = COMMAND_EXEC,
2492 .help = "Restart smp handling",
2495 .name ="smp_gdb",
2496 .handler = cortex_a8_handle_smp_gdb_command,
2497 .mode = COMMAND_EXEC,
2498 .help = "display/fix current core played to gdb",
2502 COMMAND_REGISTRATION_DONE
2504 static const struct command_registration cortex_a8_command_handlers[] = {
2506 .chain = arm_command_handlers,
2509 .chain = armv7a_command_handlers,
2512 .name = "cortex_a8",
2513 .mode = COMMAND_ANY,
2514 .help = "Cortex-A8 command group",
2515 .chain = cortex_a8_exec_command_handlers,
2517 COMMAND_REGISTRATION_DONE
2520 struct target_type cortexa8_target = {
2521 .name = "cortex_a8",
2523 .poll = cortex_a8_poll,
2524 .arch_state = armv7a_arch_state,
2526 .target_request_data = NULL,
2528 .halt = cortex_a8_halt,
2529 .resume = cortex_a8_resume,
2530 .step = cortex_a8_step,
2532 .assert_reset = cortex_a8_assert_reset,
2533 .deassert_reset = cortex_a8_deassert_reset,
2534 .soft_reset_halt = NULL,
2536 /* REVISIT allow exporting VFP3 registers ... */
2537 .get_gdb_reg_list = arm_get_gdb_reg_list,
2539 .read_memory = cortex_a8_read_memory,
2540 .write_memory = cortex_a8_write_memory,
2541 .bulk_write_memory = cortex_a8_bulk_write_memory,
2543 .checksum_memory = arm_checksum_memory,
2544 .blank_check_memory = arm_blank_check_memory,
2546 .run_algorithm = armv4_5_run_algorithm,
2548 .add_breakpoint = cortex_a8_add_breakpoint,
2549 .remove_breakpoint = cortex_a8_remove_breakpoint,
2550 .add_watchpoint = NULL,
2551 .remove_watchpoint = NULL,
2553 .commands = cortex_a8_command_handlers,
2554 .target_create = cortex_a8_target_create,
2555 .init_target = cortex_a8_init_target,
2556 .examine = cortex_a8_examine,
2558 .read_phys_memory = cortex_a8_read_phys_memory,
2559 .write_phys_memory = cortex_a8_write_phys_memory,
2560 .mmu = cortex_a8_mmu,
2561 .virt2phys = cortex_a8_virt2phys,