aarch64: fix stepping from address
[openocd.git] / src / target / aarch64.c
blobae7f5a18e4ed0c8cddeef81f3262539701e6a384
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include <helper/time_support.h>
32 static int aarch64_poll(struct target *target);
33 static int aarch64_debug_entry(struct target *target);
34 static int aarch64_restore_context(struct target *target, bool bpwp);
35 static int aarch64_set_breakpoint(struct target *target,
36 struct breakpoint *breakpoint, uint8_t matchmode);
37 static int aarch64_set_context_breakpoint(struct target *target,
38 struct breakpoint *breakpoint, uint8_t matchmode);
39 static int aarch64_set_hybrid_breakpoint(struct target *target,
40 struct breakpoint *breakpoint);
41 static int aarch64_unset_breakpoint(struct target *target,
42 struct breakpoint *breakpoint);
43 static int aarch64_mmu(struct target *target, int *enabled);
44 static int aarch64_virt2phys(struct target *target,
45 target_addr_t virt, target_addr_t *phys);
46 static int aarch64_read_apb_ap_memory(struct target *target,
47 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
48 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
49 uint32_t opcode, uint32_t data);
51 static int aarch64_restore_system_control_reg(struct target *target)
53 int retval = ERROR_OK;
55 struct aarch64_common *aarch64 = target_to_aarch64(target);
56 struct armv8_common *armv8 = target_to_armv8(target);
58 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
59 aarch64->system_control_reg_curr = aarch64->system_control_reg;
60 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
62 switch (armv8->arm.core_mode) {
63 case ARMV8_64_EL0T:
64 case ARMV8_64_EL1T:
65 case ARMV8_64_EL1H:
66 retval = armv8->arm.msr(target, 3, /*op 0*/
67 0, 1, /* op1, op2 */
68 0, 0, /* CRn, CRm */
69 aarch64->system_control_reg);
70 if (retval != ERROR_OK)
71 return retval;
72 break;
73 case ARMV8_64_EL2T:
74 case ARMV8_64_EL2H:
75 retval = armv8->arm.msr(target, 3, /*op 0*/
76 4, 1, /* op1, op2 */
77 0, 0, /* CRn, CRm */
78 aarch64->system_control_reg);
79 if (retval != ERROR_OK)
80 return retval;
81 break;
82 case ARMV8_64_EL3H:
83 case ARMV8_64_EL3T:
84 retval = armv8->arm.msr(target, 3, /*op 0*/
85 6, 1, /* op1, op2 */
86 0, 0, /* CRn, CRm */
87 aarch64->system_control_reg);
88 if (retval != ERROR_OK)
89 return retval;
90 break;
91 default:
92 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
95 return retval;
98 /* check address before aarch64_apb read write access with mmu on
99 * remove apb predictible data abort */
100 static int aarch64_check_address(struct target *target, uint32_t address)
102 /* TODO */
103 return ERROR_OK;
105 /* modify system_control_reg in order to enable or disable mmu for :
106 * - virt2phys address conversion
107 * - read or write memory in phys or virt address */
108 static int aarch64_mmu_modify(struct target *target, int enable)
110 struct aarch64_common *aarch64 = target_to_aarch64(target);
111 struct armv8_common *armv8 = &aarch64->armv8_common;
112 int retval = ERROR_OK;
114 if (enable) {
115 /* if mmu enabled at target stop and mmu not enable */
116 if (!(aarch64->system_control_reg & 0x1U)) {
117 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
118 return ERROR_FAIL;
120 if (!(aarch64->system_control_reg_curr & 0x1U)) {
121 aarch64->system_control_reg_curr |= 0x1U;
122 switch (armv8->arm.core_mode) {
123 case ARMV8_64_EL0T:
124 case ARMV8_64_EL1T:
125 case ARMV8_64_EL1H:
126 retval = armv8->arm.msr(target, 3, /*op 0*/
127 0, 0, /* op1, op2 */
128 1, 0, /* CRn, CRm */
129 aarch64->system_control_reg_curr);
130 if (retval != ERROR_OK)
131 return retval;
132 break;
133 case ARMV8_64_EL2T:
134 case ARMV8_64_EL2H:
135 retval = armv8->arm.msr(target, 3, /*op 0*/
136 4, 0, /* op1, op2 */
137 1, 0, /* CRn, CRm */
138 aarch64->system_control_reg_curr);
139 if (retval != ERROR_OK)
140 return retval;
141 break;
142 case ARMV8_64_EL3H:
143 case ARMV8_64_EL3T:
144 retval = armv8->arm.msr(target, 3, /*op 0*/
145 6, 0, /* op1, op2 */
146 1, 0, /* CRn, CRm */
147 aarch64->system_control_reg_curr);
148 if (retval != ERROR_OK)
149 return retval;
150 break;
151 default:
152 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
155 } else {
156 if (aarch64->system_control_reg_curr & 0x4U) {
157 /* data cache is active */
158 aarch64->system_control_reg_curr &= ~0x4U;
159 /* flush data cache armv7 function to be called */
160 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
161 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
163 if ((aarch64->system_control_reg_curr & 0x1U)) {
164 aarch64->system_control_reg_curr &= ~0x1U;
165 switch (armv8->arm.core_mode) {
166 case ARMV8_64_EL0T:
167 case ARMV8_64_EL1T:
168 case ARMV8_64_EL1H:
169 retval = armv8->arm.msr(target, 3, /*op 0*/
170 0, 0, /* op1, op2 */
171 1, 0, /* CRn, CRm */
172 aarch64->system_control_reg_curr);
173 if (retval != ERROR_OK)
174 return retval;
175 break;
176 case ARMV8_64_EL2T:
177 case ARMV8_64_EL2H:
178 retval = armv8->arm.msr(target, 3, /*op 0*/
179 4, 0, /* op1, op2 */
180 1, 0, /* CRn, CRm */
181 aarch64->system_control_reg_curr);
182 if (retval != ERROR_OK)
183 return retval;
184 break;
185 case ARMV8_64_EL3H:
186 case ARMV8_64_EL3T:
187 retval = armv8->arm.msr(target, 3, /*op 0*/
188 6, 0, /* op1, op2 */
189 1, 0, /* CRn, CRm */
190 aarch64->system_control_reg_curr);
191 if (retval != ERROR_OK)
192 return retval;
193 break;
194 default:
195 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
196 break;
200 return retval;
204 * Basic debug access, very low level assumes state is saved
206 static int aarch64_init_debug_access(struct target *target)
208 struct armv8_common *armv8 = target_to_armv8(target);
209 int retval;
210 uint32_t dummy;
212 LOG_DEBUG(" ");
214 /* Unlocking the debug registers for modification
215 * The debugport might be uninitialised so try twice */
216 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
217 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
218 if (retval != ERROR_OK) {
219 /* try again */
220 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
221 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
222 if (retval == ERROR_OK)
223 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
225 if (retval != ERROR_OK)
226 return retval;
227 /* Clear Sticky Power Down status Bit in PRSR to enable access to
228 the registers in the Core Power Domain */
229 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
230 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
231 if (retval != ERROR_OK)
232 return retval;
234 /* Enabling of instruction execution in debug mode is done in debug_entry code */
236 /* Resync breakpoint registers */
238 /* Since this is likely called from init or reset, update target state information*/
239 return aarch64_poll(target);
242 /* To reduce needless round-trips, pass in a pointer to the current
243 * DSCR value. Initialize it to zero if you just need to know the
244 * value on return from this function; or DSCR_ITE if you
245 * happen to know that no instruction is pending.
247 static int aarch64_exec_opcode(struct target *target,
248 uint32_t opcode, uint32_t *dscr_p)
250 uint32_t dscr;
251 int retval;
252 struct armv8_common *armv8 = target_to_armv8(target);
253 dscr = dscr_p ? *dscr_p : 0;
255 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
257 /* Wait for InstrCompl bit to be set */
258 long long then = timeval_ms();
259 while ((dscr & DSCR_ITE) == 0) {
260 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
261 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
262 if (retval != ERROR_OK) {
263 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
264 return retval;
266 if (timeval_ms() > then + 1000) {
267 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
268 return ERROR_FAIL;
272 retval = mem_ap_write_u32(armv8->debug_ap,
273 armv8->debug_base + CPUV8_DBG_ITR, opcode);
274 if (retval != ERROR_OK)
275 return retval;
277 then = timeval_ms();
278 do {
279 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
280 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
281 if (retval != ERROR_OK) {
282 LOG_ERROR("Could not read DSCR register");
283 return retval;
285 if (timeval_ms() > then + 1000) {
286 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
287 return ERROR_FAIL;
289 } while ((dscr & DSCR_ITE) == 0); /* Wait for InstrCompl bit to be set */
291 if (dscr_p)
292 *dscr_p = dscr;
294 return retval;
297 /* Write to memory mapped registers directly with no cache or mmu handling */
298 static int aarch64_dap_write_memap_register_u32(struct target *target,
299 uint32_t address,
300 uint32_t value)
302 int retval;
303 struct armv8_common *armv8 = target_to_armv8(target);
305 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
307 return retval;
311 * AARCH64 implementation of Debug Programmer's Model
313 * NOTE the invariant: these routines return with DSCR_ITE set,
314 * so there's no need to poll for it before executing an instruction.
316 * NOTE that in several of these cases the "stall" mode might be useful.
317 * It'd let us queue a few operations together... prepare/finish might
318 * be the places to enable/disable that mode.
321 static inline struct aarch64_common *dpm_to_a8(struct arm_dpm *dpm)
323 return container_of(dpm, struct aarch64_common, armv8_common.dpm);
326 static int aarch64_write_dcc(struct armv8_common *armv8, uint32_t data)
328 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
329 return mem_ap_write_u32(armv8->debug_ap,
330 armv8->debug_base + CPUV8_DBG_DTRRX, data);
333 static int aarch64_write_dcc_64(struct armv8_common *armv8, uint64_t data)
335 int ret;
336 LOG_DEBUG("write DCC Low word0x%08" PRIx32, (unsigned)data);
337 LOG_DEBUG("write DCC High word 0x%08" PRIx32, (unsigned)(data >> 32));
338 ret = mem_ap_write_u32(armv8->debug_ap,
339 armv8->debug_base + CPUV8_DBG_DTRRX, data);
340 ret += mem_ap_write_u32(armv8->debug_ap,
341 armv8->debug_base + CPUV8_DBG_DTRTX, data >> 32);
342 return ret;
345 static int aarch64_read_dcc(struct armv8_common *armv8, uint32_t *data,
346 uint32_t *dscr_p)
348 uint32_t dscr = DSCR_ITE;
349 int retval;
351 if (dscr_p)
352 dscr = *dscr_p;
354 /* Wait for DTRRXfull */
355 long long then = timeval_ms();
356 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
357 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
358 armv8->debug_base + CPUV8_DBG_DSCR,
359 &dscr);
360 if (retval != ERROR_OK)
361 return retval;
362 if (timeval_ms() > then + 1000) {
363 LOG_ERROR("Timeout waiting for read dcc");
364 return ERROR_FAIL;
368 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
369 armv8->debug_base + CPUV8_DBG_DTRTX,
370 data);
371 if (retval != ERROR_OK)
372 return retval;
373 LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
375 if (dscr_p)
376 *dscr_p = dscr;
378 return retval;
381 static int aarch64_read_dcc_64(struct armv8_common *armv8, uint64_t *data,
382 uint32_t *dscr_p)
384 uint32_t dscr = DSCR_ITE;
385 uint32_t higher;
386 int retval;
388 if (dscr_p)
389 dscr = *dscr_p;
391 /* Wait for DTRRXfull */
392 long long then = timeval_ms();
393 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
394 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
395 armv8->debug_base + CPUV8_DBG_DSCR,
396 &dscr);
397 if (retval != ERROR_OK)
398 return retval;
399 if (timeval_ms() > then + 1000) {
400 LOG_ERROR("Timeout waiting for read dcc");
401 return ERROR_FAIL;
405 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
406 armv8->debug_base + CPUV8_DBG_DTRTX,
407 (uint32_t *)data);
408 if (retval != ERROR_OK)
409 return retval;
411 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
412 armv8->debug_base + CPUV8_DBG_DTRRX,
413 &higher);
414 if (retval != ERROR_OK)
415 return retval;
417 *data = *(uint32_t *)data | (uint64_t)higher << 32;
418 LOG_DEBUG("read DCC 0x%16.16" PRIx64, *data);
420 if (dscr_p)
421 *dscr_p = dscr;
423 return retval;
426 static int aarch64_dpm_prepare(struct arm_dpm *dpm)
428 struct aarch64_common *a8 = dpm_to_a8(dpm);
429 uint32_t dscr;
430 int retval;
432 /* set up invariant: INSTR_COMP is set after ever DPM operation */
433 long long then = timeval_ms();
434 for (;; ) {
435 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
436 a8->armv8_common.debug_base + CPUV8_DBG_DSCR,
437 &dscr);
438 if (retval != ERROR_OK)
439 return retval;
440 if ((dscr & DSCR_ITE) != 0)
441 break;
442 if (timeval_ms() > then + 1000) {
443 LOG_ERROR("Timeout waiting for dpm prepare");
444 return ERROR_FAIL;
448 /* this "should never happen" ... */
449 if (dscr & DSCR_DTR_RX_FULL) {
450 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
451 /* Clear DCCRX */
452 retval = mem_ap_read_u32(a8->armv8_common.debug_ap,
453 a8->armv8_common.debug_base + CPUV8_DBG_DTRRX, &dscr);
454 if (retval != ERROR_OK)
455 return retval;
457 /* Clear sticky error */
458 retval = mem_ap_write_u32(a8->armv8_common.debug_ap,
459 a8->armv8_common.debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
460 if (retval != ERROR_OK)
461 return retval;
464 return retval;
467 static int aarch64_dpm_finish(struct arm_dpm *dpm)
469 /* REVISIT what could be done here? */
470 return ERROR_OK;
473 static int aarch64_instr_execute(struct arm_dpm *dpm,
474 uint32_t opcode)
476 struct aarch64_common *a8 = dpm_to_a8(dpm);
477 uint32_t dscr = DSCR_ITE;
479 return aarch64_exec_opcode(
480 a8->armv8_common.arm.target,
481 opcode,
482 &dscr);
485 static int aarch64_instr_write_data_dcc(struct arm_dpm *dpm,
486 uint32_t opcode, uint32_t data)
488 struct aarch64_common *a8 = dpm_to_a8(dpm);
489 int retval;
490 uint32_t dscr = DSCR_ITE;
492 retval = aarch64_write_dcc(&a8->armv8_common, data);
493 if (retval != ERROR_OK)
494 return retval;
496 return aarch64_exec_opcode(
497 a8->armv8_common.arm.target,
498 opcode,
499 &dscr);
502 static int aarch64_instr_write_data_dcc_64(struct arm_dpm *dpm,
503 uint32_t opcode, uint64_t data)
505 struct aarch64_common *a8 = dpm_to_a8(dpm);
506 int retval;
507 uint32_t dscr = DSCR_ITE;
509 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
510 if (retval != ERROR_OK)
511 return retval;
513 return aarch64_exec_opcode(
514 a8->armv8_common.arm.target,
515 opcode,
516 &dscr);
519 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
520 uint32_t opcode, uint32_t data)
522 struct aarch64_common *a8 = dpm_to_a8(dpm);
523 uint32_t dscr = DSCR_ITE;
524 int retval;
526 retval = aarch64_write_dcc(&a8->armv8_common, data);
527 if (retval != ERROR_OK)
528 return retval;
530 retval = aarch64_exec_opcode(
531 a8->armv8_common.arm.target,
532 ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 0),
533 &dscr);
534 if (retval != ERROR_OK)
535 return retval;
537 /* then the opcode, taking data from R0 */
538 retval = aarch64_exec_opcode(
539 a8->armv8_common.arm.target,
540 opcode,
541 &dscr);
543 return retval;
546 static int aarch64_instr_write_data_r0_64(struct arm_dpm *dpm,
547 uint32_t opcode, uint64_t data)
549 struct aarch64_common *a8 = dpm_to_a8(dpm);
550 uint32_t dscr = DSCR_ITE;
551 int retval;
553 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
554 if (retval != ERROR_OK)
555 return retval;
557 retval = aarch64_exec_opcode(
558 a8->armv8_common.arm.target,
559 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0),
560 &dscr);
561 if (retval != ERROR_OK)
562 return retval;
564 /* then the opcode, taking data from R0 */
565 retval = aarch64_exec_opcode(
566 a8->armv8_common.arm.target,
567 opcode,
568 &dscr);
570 return retval;
573 static int aarch64_instr_cpsr_sync(struct arm_dpm *dpm)
575 struct target *target = dpm->arm->target;
576 uint32_t dscr = DSCR_ITE;
578 /* "Prefetch flush" after modifying execution status in CPSR */
579 return aarch64_exec_opcode(target,
580 DSB_SY,
581 &dscr);
584 static int aarch64_instr_read_data_dcc(struct arm_dpm *dpm,
585 uint32_t opcode, uint32_t *data)
587 struct aarch64_common *a8 = dpm_to_a8(dpm);
588 int retval;
589 uint32_t dscr = DSCR_ITE;
591 /* the opcode, writing data to DCC */
592 retval = aarch64_exec_opcode(
593 a8->armv8_common.arm.target,
594 opcode,
595 &dscr);
596 if (retval != ERROR_OK)
597 return retval;
599 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
602 static int aarch64_instr_read_data_dcc_64(struct arm_dpm *dpm,
603 uint32_t opcode, uint64_t *data)
605 struct aarch64_common *a8 = dpm_to_a8(dpm);
606 int retval;
607 uint32_t dscr = DSCR_ITE;
609 /* the opcode, writing data to DCC */
610 retval = aarch64_exec_opcode(
611 a8->armv8_common.arm.target,
612 opcode,
613 &dscr);
614 if (retval != ERROR_OK)
615 return retval;
617 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
620 static int aarch64_instr_read_data_r0(struct arm_dpm *dpm,
621 uint32_t opcode, uint32_t *data)
623 struct aarch64_common *a8 = dpm_to_a8(dpm);
624 uint32_t dscr = DSCR_ITE;
625 int retval;
627 /* the opcode, writing data to R0 */
628 retval = aarch64_exec_opcode(
629 a8->armv8_common.arm.target,
630 opcode,
631 &dscr);
632 if (retval != ERROR_OK)
633 return retval;
635 /* write R0 to DCC */
636 retval = aarch64_exec_opcode(
637 a8->armv8_common.arm.target,
638 ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 0), /* msr dbgdtr_el0, x0 */
639 &dscr);
640 if (retval != ERROR_OK)
641 return retval;
643 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
646 static int aarch64_instr_read_data_r0_64(struct arm_dpm *dpm,
647 uint32_t opcode, uint64_t *data)
649 struct aarch64_common *a8 = dpm_to_a8(dpm);
650 uint32_t dscr = DSCR_ITE;
651 int retval;
653 /* the opcode, writing data to R0 */
654 retval = aarch64_exec_opcode(
655 a8->armv8_common.arm.target,
656 opcode,
657 &dscr);
658 if (retval != ERROR_OK)
659 return retval;
661 /* write R0 to DCC */
662 retval = aarch64_exec_opcode(
663 a8->armv8_common.arm.target,
664 ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), /* msr dbgdtr_el0, x0 */
665 &dscr);
666 if (retval != ERROR_OK)
667 return retval;
669 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
672 static int aarch64_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
673 uint32_t addr, uint32_t control)
675 struct aarch64_common *a8 = dpm_to_a8(dpm);
676 uint32_t vr = a8->armv8_common.debug_base;
677 uint32_t cr = a8->armv8_common.debug_base;
678 int retval;
680 switch (index_t) {
681 case 0 ... 15: /* breakpoints */
682 vr += CPUV8_DBG_BVR_BASE;
683 cr += CPUV8_DBG_BCR_BASE;
684 break;
685 case 16 ... 31: /* watchpoints */
686 vr += CPUV8_DBG_WVR_BASE;
687 cr += CPUV8_DBG_WCR_BASE;
688 index_t -= 16;
689 break;
690 default:
691 return ERROR_FAIL;
693 vr += 16 * index_t;
694 cr += 16 * index_t;
696 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
697 (unsigned) vr, (unsigned) cr);
699 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
700 vr, addr);
701 if (retval != ERROR_OK)
702 return retval;
703 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
704 cr, control);
705 return retval;
708 static int aarch64_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
710 struct aarch64_common *a = dpm_to_a8(dpm);
711 uint32_t cr;
713 switch (index_t) {
714 case 0 ... 15:
715 cr = a->armv8_common.debug_base + CPUV8_DBG_BCR_BASE;
716 break;
717 case 16 ... 31:
718 cr = a->armv8_common.debug_base + CPUV8_DBG_WCR_BASE;
719 index_t -= 16;
720 break;
721 default:
722 return ERROR_FAIL;
724 cr += 16 * index_t;
726 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
728 /* clear control register */
729 return aarch64_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
733 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
735 struct arm_dpm *dpm = &a8->armv8_common.dpm;
736 int retval;
738 dpm->arm = &a8->armv8_common.arm;
739 dpm->didr = debug;
741 dpm->prepare = aarch64_dpm_prepare;
742 dpm->finish = aarch64_dpm_finish;
744 dpm->instr_execute = aarch64_instr_execute;
745 dpm->instr_write_data_dcc = aarch64_instr_write_data_dcc;
746 dpm->instr_write_data_dcc_64 = aarch64_instr_write_data_dcc_64;
747 dpm->instr_write_data_r0 = aarch64_instr_write_data_r0;
748 dpm->instr_write_data_r0_64 = aarch64_instr_write_data_r0_64;
749 dpm->instr_cpsr_sync = aarch64_instr_cpsr_sync;
751 dpm->instr_read_data_dcc = aarch64_instr_read_data_dcc;
752 dpm->instr_read_data_dcc_64 = aarch64_instr_read_data_dcc_64;
753 dpm->instr_read_data_r0 = aarch64_instr_read_data_r0;
754 dpm->instr_read_data_r0_64 = aarch64_instr_read_data_r0_64;
756 dpm->arm_reg_current = armv8_reg_current;
758 dpm->bpwp_enable = aarch64_bpwp_enable;
759 dpm->bpwp_disable = aarch64_bpwp_disable;
761 retval = armv8_dpm_setup(dpm);
762 if (retval == ERROR_OK)
763 retval = armv8_dpm_initialize(dpm);
765 return retval;
767 static struct target *get_aarch64(struct target *target, int32_t coreid)
769 struct target_list *head;
770 struct target *curr;
772 head = target->head;
773 while (head != (struct target_list *)NULL) {
774 curr = head->target;
775 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
776 return curr;
777 head = head->next;
779 return target;
781 static int aarch64_halt(struct target *target);
783 static int aarch64_halt_smp(struct target *target)
785 int retval = 0;
786 struct target_list *head;
787 struct target *curr;
788 head = target->head;
789 while (head != (struct target_list *)NULL) {
790 curr = head->target;
791 if ((curr != target) && (curr->state != TARGET_HALTED))
792 retval += aarch64_halt(curr);
793 head = head->next;
795 return retval;
798 static int update_halt_gdb(struct target *target)
800 int retval = 0;
801 if (target->gdb_service && target->gdb_service->core[0] == -1) {
802 target->gdb_service->target = target;
803 target->gdb_service->core[0] = target->coreid;
804 retval += aarch64_halt_smp(target);
806 return retval;
810 * Cortex-A8 Run control
813 static int aarch64_poll(struct target *target)
815 int retval = ERROR_OK;
816 uint32_t dscr;
817 struct aarch64_common *aarch64 = target_to_aarch64(target);
818 struct armv8_common *armv8 = &aarch64->armv8_common;
819 enum target_state prev_target_state = target->state;
820 /* toggle to another core is done by gdb as follow */
821 /* maint packet J core_id */
822 /* continue */
823 /* the next polling trigger an halt event sent to gdb */
824 if ((target->state == TARGET_HALTED) && (target->smp) &&
825 (target->gdb_service) &&
826 (target->gdb_service->target == NULL)) {
827 target->gdb_service->target =
828 get_aarch64(target, target->gdb_service->core[1]);
829 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
830 return retval;
832 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
833 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
834 if (retval != ERROR_OK)
835 return retval;
836 aarch64->cpudbg_dscr = dscr;
838 if (DSCR_RUN_MODE(dscr) == 0x3) {
839 if (prev_target_state != TARGET_HALTED) {
840 /* We have a halting debug event */
841 LOG_DEBUG("Target halted");
842 target->state = TARGET_HALTED;
843 if ((prev_target_state == TARGET_RUNNING)
844 || (prev_target_state == TARGET_UNKNOWN)
845 || (prev_target_state == TARGET_RESET)) {
846 retval = aarch64_debug_entry(target);
847 if (retval != ERROR_OK)
848 return retval;
849 if (target->smp) {
850 retval = update_halt_gdb(target);
851 if (retval != ERROR_OK)
852 return retval;
854 target_call_event_callbacks(target,
855 TARGET_EVENT_HALTED);
857 if (prev_target_state == TARGET_DEBUG_RUNNING) {
858 LOG_DEBUG(" ");
860 retval = aarch64_debug_entry(target);
861 if (retval != ERROR_OK)
862 return retval;
863 if (target->smp) {
864 retval = update_halt_gdb(target);
865 if (retval != ERROR_OK)
866 return retval;
869 target_call_event_callbacks(target,
870 TARGET_EVENT_DEBUG_HALTED);
873 } else
874 target->state = TARGET_RUNNING;
876 return retval;
879 static int aarch64_halt(struct target *target)
881 int retval = ERROR_OK;
882 uint32_t dscr;
883 struct armv8_common *armv8 = target_to_armv8(target);
885 /* enable CTI*/
886 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
887 armv8->cti_base + CTI_CTR, 1);
888 if (retval != ERROR_OK)
889 return retval;
891 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
892 armv8->cti_base + CTI_GATE, 3);
893 if (retval != ERROR_OK)
894 return retval;
896 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
897 armv8->cti_base + CTI_OUTEN0, 1);
898 if (retval != ERROR_OK)
899 return retval;
901 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
902 armv8->cti_base + CTI_OUTEN1, 2);
903 if (retval != ERROR_OK)
904 return retval;
907 * add HDE in halting debug mode
909 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
910 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
911 if (retval != ERROR_OK)
912 return retval;
914 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
915 armv8->debug_base + CPUV8_DBG_DSCR, dscr | DSCR_HDE);
916 if (retval != ERROR_OK)
917 return retval;
919 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
920 armv8->cti_base + CTI_APPPULSE, 1);
921 if (retval != ERROR_OK)
922 return retval;
924 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
925 armv8->cti_base + CTI_INACK, 1);
926 if (retval != ERROR_OK)
927 return retval;
930 long long then = timeval_ms();
931 for (;; ) {
932 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
933 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
934 if (retval != ERROR_OK)
935 return retval;
936 if ((dscr & DSCRV8_HALT_MASK) != 0)
937 break;
938 if (timeval_ms() > then + 1000) {
939 LOG_ERROR("Timeout waiting for halt");
940 return ERROR_FAIL;
944 target->debug_reason = DBG_REASON_DBGRQ;
946 return ERROR_OK;
949 static int aarch64_internal_restore(struct target *target, int current,
950 uint64_t *address, int handle_breakpoints, int debug_execution)
952 struct armv8_common *armv8 = target_to_armv8(target);
953 struct arm *arm = &armv8->arm;
954 int retval;
955 uint64_t resume_pc;
957 if (!debug_execution)
958 target_free_all_working_areas(target);
960 /* current = 1: continue on current pc, otherwise continue at <address> */
961 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
962 if (!current)
963 resume_pc = *address;
964 else
965 *address = resume_pc;
967 /* Make sure that the Armv7 gdb thumb fixups does not
968 * kill the return address
970 switch (arm->core_state) {
971 case ARM_STATE_ARM:
972 resume_pc &= 0xFFFFFFFC;
973 break;
974 case ARM_STATE_AARCH64:
975 resume_pc &= 0xFFFFFFFFFFFFFFFC;
976 break;
977 case ARM_STATE_THUMB:
978 case ARM_STATE_THUMB_EE:
979 /* When the return address is loaded into PC
980 * bit 0 must be 1 to stay in Thumb state
982 resume_pc |= 0x1;
983 break;
984 case ARM_STATE_JAZELLE:
985 LOG_ERROR("How do I resume into Jazelle state??");
986 return ERROR_FAIL;
988 LOG_DEBUG("resume pc = 0x%16" PRIx64, resume_pc);
989 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
990 arm->pc->dirty = 1;
991 arm->pc->valid = 1;
992 dpmv8_modeswitch(&armv8->dpm, ARM_MODE_ANY);
994 /* called it now before restoring context because it uses cpu
995 * register r0 for restoring system control register */
996 retval = aarch64_restore_system_control_reg(target);
997 if (retval != ERROR_OK)
998 return retval;
999 retval = aarch64_restore_context(target, handle_breakpoints);
1000 if (retval != ERROR_OK)
1001 return retval;
1002 target->debug_reason = DBG_REASON_NOTHALTED;
1003 target->state = TARGET_RUNNING;
1005 /* registers are now invalid */
1006 register_cache_invalidate(arm->core_cache);
1008 #if 0
1009 /* the front-end may request us not to handle breakpoints */
1010 if (handle_breakpoints) {
1011 /* Single step past breakpoint at current address */
1012 breakpoint = breakpoint_find(target, resume_pc);
1013 if (breakpoint) {
1014 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1015 cortex_m3_unset_breakpoint(target, breakpoint);
1016 cortex_m3_single_step_core(target);
1017 cortex_m3_set_breakpoint(target, breakpoint);
1020 #endif
1022 return retval;
1025 static int aarch64_internal_restart(struct target *target)
1027 struct armv8_common *armv8 = target_to_armv8(target);
1028 struct arm *arm = &armv8->arm;
1029 int retval;
1030 uint32_t dscr;
1032 * * Restart core and wait for it to be started. Clear ITRen and sticky
1033 * * exception flags: see ARMv7 ARM, C5.9.
1035 * REVISIT: for single stepping, we probably want to
1036 * disable IRQs by default, with optional override...
1039 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1040 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1041 if (retval != ERROR_OK)
1042 return retval;
1044 if ((dscr & DSCR_ITE) == 0)
1045 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1047 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1048 armv8->cti_base + CTI_APPPULSE, 2);
1049 if (retval != ERROR_OK)
1050 return retval;
1052 long long then = timeval_ms();
1053 for (;; ) {
1054 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1055 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1056 if (retval != ERROR_OK)
1057 return retval;
1058 if ((dscr & DSCR_HDE) != 0)
1059 break;
1060 if (timeval_ms() > then + 1000) {
1061 LOG_ERROR("Timeout waiting for resume");
1062 return ERROR_FAIL;
1066 target->debug_reason = DBG_REASON_NOTHALTED;
1067 target->state = TARGET_RUNNING;
1069 /* registers are now invalid */
1070 register_cache_invalidate(arm->core_cache);
1072 return ERROR_OK;
1075 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
1077 int retval = 0;
1078 struct target_list *head;
1079 struct target *curr;
1080 uint64_t address;
1081 head = target->head;
1082 while (head != (struct target_list *)NULL) {
1083 curr = head->target;
1084 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1085 /* resume current address , not in step mode */
1086 retval += aarch64_internal_restore(curr, 1, &address,
1087 handle_breakpoints, 0);
1088 retval += aarch64_internal_restart(curr);
1090 head = head->next;
1093 return retval;
1096 static int aarch64_resume(struct target *target, int current,
1097 target_addr_t address, int handle_breakpoints, int debug_execution)
1099 int retval = 0;
1100 uint64_t addr = address;
1102 /* dummy resume for smp toggle in order to reduce gdb impact */
1103 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1104 /* simulate a start and halt of target */
1105 target->gdb_service->target = NULL;
1106 target->gdb_service->core[0] = target->gdb_service->core[1];
1107 /* fake resume at next poll we play the target core[1], see poll*/
1108 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1109 return 0;
1111 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
1112 debug_execution);
1113 if (target->smp) {
1114 target->gdb_service->core[0] = -1;
1115 retval = aarch64_restore_smp(target, handle_breakpoints);
1116 if (retval != ERROR_OK)
1117 return retval;
1119 aarch64_internal_restart(target);
1121 if (!debug_execution) {
1122 target->state = TARGET_RUNNING;
1123 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1124 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
1125 } else {
1126 target->state = TARGET_DEBUG_RUNNING;
1127 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1128 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
1131 return ERROR_OK;
1134 static int aarch64_debug_entry(struct target *target)
1136 int retval = ERROR_OK;
1137 struct aarch64_common *aarch64 = target_to_aarch64(target);
1138 struct armv8_common *armv8 = target_to_armv8(target);
1140 LOG_DEBUG("dscr = 0x%08" PRIx32, aarch64->cpudbg_dscr);
1142 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1143 * imprecise data aborts get discarded by issuing a Data
1144 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1147 /* make sure to clear all sticky errors */
1148 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1149 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1150 if (retval != ERROR_OK)
1151 return retval;
1153 /* Examine debug reason */
1154 armv8_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
1156 /* save address of instruction that triggered the watchpoint? */
1157 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1158 uint32_t tmp;
1159 uint64_t wfar = 0;
1161 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1162 armv8->debug_base + CPUV8_DBG_WFAR1,
1163 &tmp);
1164 if (retval != ERROR_OK)
1165 return retval;
1166 wfar = tmp;
1167 wfar = (wfar << 32);
1168 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1169 armv8->debug_base + CPUV8_DBG_WFAR0,
1170 &tmp);
1171 if (retval != ERROR_OK)
1172 return retval;
1173 wfar |= tmp;
1174 armv8_dpm_report_wfar(&armv8->dpm, wfar);
1177 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1179 if (armv8->post_debug_entry) {
1180 retval = armv8->post_debug_entry(target);
1181 if (retval != ERROR_OK)
1182 return retval;
1185 return retval;
1188 static int aarch64_post_debug_entry(struct target *target)
1190 struct aarch64_common *aarch64 = target_to_aarch64(target);
1191 struct armv8_common *armv8 = &aarch64->armv8_common;
1192 int retval;
1194 mem_ap_write_atomic_u32(armv8->debug_ap,
1195 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1196 switch (armv8->arm.core_mode) {
1197 case ARMV8_64_EL0T:
1198 case ARMV8_64_EL1T:
1199 case ARMV8_64_EL1H:
1200 retval = armv8->arm.mrs(target, 3, /*op 0*/
1201 0, 0, /* op1, op2 */
1202 1, 0, /* CRn, CRm */
1203 &aarch64->system_control_reg);
1204 if (retval != ERROR_OK)
1205 return retval;
1206 break;
1207 case ARMV8_64_EL2T:
1208 case ARMV8_64_EL2H:
1209 retval = armv8->arm.mrs(target, 3, /*op 0*/
1210 4, 0, /* op1, op2 */
1211 1, 0, /* CRn, CRm */
1212 &aarch64->system_control_reg);
1213 if (retval != ERROR_OK)
1214 return retval;
1215 break;
1216 case ARMV8_64_EL3H:
1217 case ARMV8_64_EL3T:
1218 retval = armv8->arm.mrs(target, 3, /*op 0*/
1219 6, 0, /* op1, op2 */
1220 1, 0, /* CRn, CRm */
1221 &aarch64->system_control_reg);
1222 if (retval != ERROR_OK)
1223 return retval;
1224 break;
1225 default:
1226 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
1228 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1229 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1231 if (armv8->armv8_mmu.armv8_cache.ctype == -1)
1232 armv8_identify_cache(target);
1234 armv8->armv8_mmu.mmu_enabled =
1235 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1236 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1237 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1238 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1239 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1240 aarch64->curr_mode = armv8->arm.core_mode;
1241 return ERROR_OK;
1244 static int aarch64_step(struct target *target, int current, target_addr_t address,
1245 int handle_breakpoints)
1247 struct armv8_common *armv8 = target_to_armv8(target);
1248 int retval;
1249 uint32_t tmp;
1251 if (target->state != TARGET_HALTED) {
1252 LOG_WARNING("target not halted");
1253 return ERROR_TARGET_NOT_HALTED;
1256 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1257 armv8->debug_base + CPUV8_DBG_EDECR, &tmp);
1258 if (retval != ERROR_OK)
1259 return retval;
1261 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1262 armv8->debug_base + CPUV8_DBG_EDECR, (tmp|0x4));
1263 if (retval != ERROR_OK)
1264 return retval;
1266 target->debug_reason = DBG_REASON_SINGLESTEP;
1267 retval = aarch64_resume(target, current, address, 0, 0);
1268 if (retval != ERROR_OK)
1269 return retval;
1271 long long then = timeval_ms();
1272 while (target->state != TARGET_HALTED) {
1273 mem_ap_read_atomic_u32(armv8->debug_ap,
1274 armv8->debug_base + CPUV8_DBG_EDESR, &tmp);
1275 LOG_DEBUG("DESR = %#x", tmp);
1276 retval = aarch64_poll(target);
1277 if (retval != ERROR_OK)
1278 return retval;
1279 if (timeval_ms() > then + 1000) {
1280 LOG_ERROR("timeout waiting for target halt");
1281 return ERROR_FAIL;
1285 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1286 armv8->debug_base + CPUV8_DBG_EDECR, (tmp&(~0x4)));
1287 if (retval != ERROR_OK)
1288 return retval;
1290 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1291 if (target->state == TARGET_HALTED)
1292 LOG_DEBUG("target stepped");
1294 return ERROR_OK;
1297 static int aarch64_restore_context(struct target *target, bool bpwp)
1299 struct armv8_common *armv8 = target_to_armv8(target);
1301 LOG_DEBUG(" ");
1303 if (armv8->pre_restore_context)
1304 armv8->pre_restore_context(target);
1306 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1311 * Cortex-A8 Breakpoint and watchpoint functions
1314 /* Setup hardware Breakpoint Register Pair */
1315 static int aarch64_set_breakpoint(struct target *target,
1316 struct breakpoint *breakpoint, uint8_t matchmode)
1318 int retval;
1319 int brp_i = 0;
1320 uint32_t control;
1321 uint8_t byte_addr_select = 0x0F;
1322 struct aarch64_common *aarch64 = target_to_aarch64(target);
1323 struct armv8_common *armv8 = &aarch64->armv8_common;
1324 struct aarch64_brp *brp_list = aarch64->brp_list;
1325 uint32_t dscr;
1327 if (breakpoint->set) {
1328 LOG_WARNING("breakpoint already set");
1329 return ERROR_OK;
1332 if (breakpoint->type == BKPT_HARD) {
1333 int64_t bpt_value;
1334 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1335 brp_i++;
1336 if (brp_i >= aarch64->brp_num) {
1337 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1338 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1340 breakpoint->set = brp_i + 1;
1341 if (breakpoint->length == 2)
1342 byte_addr_select = (3 << (breakpoint->address & 0x02));
1343 control = ((matchmode & 0x7) << 20)
1344 | (1 << 13)
1345 | (byte_addr_select << 5)
1346 | (3 << 1) | 1;
1347 brp_list[brp_i].used = 1;
1348 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1349 brp_list[brp_i].control = control;
1350 bpt_value = brp_list[brp_i].value;
1352 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1353 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1354 (uint32_t)(bpt_value & 0xFFFFFFFF));
1355 if (retval != ERROR_OK)
1356 return retval;
1357 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1358 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1359 (uint32_t)(bpt_value >> 32));
1360 if (retval != ERROR_OK)
1361 return retval;
1363 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1364 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1365 brp_list[brp_i].control);
1366 if (retval != ERROR_OK)
1367 return retval;
1368 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1369 brp_list[brp_i].control,
1370 brp_list[brp_i].value);
1372 } else if (breakpoint->type == BKPT_SOFT) {
1373 uint8_t code[4];
1374 buf_set_u32(code, 0, 32, ARMV8_BKPT(0x11));
1375 retval = target_read_memory(target,
1376 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1377 breakpoint->length, 1,
1378 breakpoint->orig_instr);
1379 if (retval != ERROR_OK)
1380 return retval;
1381 retval = target_write_memory(target,
1382 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1383 breakpoint->length, 1, code);
1384 if (retval != ERROR_OK)
1385 return retval;
1386 breakpoint->set = 0x11; /* Any nice value but 0 */
1389 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1390 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1391 /* Ensure that halting debug mode is enable */
1392 dscr = dscr | DSCR_HDE;
1393 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1394 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1395 if (retval != ERROR_OK) {
1396 LOG_DEBUG("Failed to set DSCR.HDE");
1397 return retval;
1400 return ERROR_OK;
1403 static int aarch64_set_context_breakpoint(struct target *target,
1404 struct breakpoint *breakpoint, uint8_t matchmode)
1406 int retval = ERROR_FAIL;
1407 int brp_i = 0;
1408 uint32_t control;
1409 uint8_t byte_addr_select = 0x0F;
1410 struct aarch64_common *aarch64 = target_to_aarch64(target);
1411 struct armv8_common *armv8 = &aarch64->armv8_common;
1412 struct aarch64_brp *brp_list = aarch64->brp_list;
1414 if (breakpoint->set) {
1415 LOG_WARNING("breakpoint already set");
1416 return retval;
1418 /*check available context BRPs*/
1419 while ((brp_list[brp_i].used ||
1420 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1421 brp_i++;
1423 if (brp_i >= aarch64->brp_num) {
1424 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1425 return ERROR_FAIL;
1428 breakpoint->set = brp_i + 1;
1429 control = ((matchmode & 0x7) << 20)
1430 | (1 << 13)
1431 | (byte_addr_select << 5)
1432 | (3 << 1) | 1;
1433 brp_list[brp_i].used = 1;
1434 brp_list[brp_i].value = (breakpoint->asid);
1435 brp_list[brp_i].control = control;
1436 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1437 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1438 brp_list[brp_i].value);
1439 if (retval != ERROR_OK)
1440 return retval;
1441 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1442 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1443 brp_list[brp_i].control);
1444 if (retval != ERROR_OK)
1445 return retval;
1446 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1447 brp_list[brp_i].control,
1448 brp_list[brp_i].value);
1449 return ERROR_OK;
1453 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1455 int retval = ERROR_FAIL;
1456 int brp_1 = 0; /* holds the contextID pair */
1457 int brp_2 = 0; /* holds the IVA pair */
1458 uint32_t control_CTX, control_IVA;
1459 uint8_t CTX_byte_addr_select = 0x0F;
1460 uint8_t IVA_byte_addr_select = 0x0F;
1461 uint8_t CTX_machmode = 0x03;
1462 uint8_t IVA_machmode = 0x01;
1463 struct aarch64_common *aarch64 = target_to_aarch64(target);
1464 struct armv8_common *armv8 = &aarch64->armv8_common;
1465 struct aarch64_brp *brp_list = aarch64->brp_list;
1467 if (breakpoint->set) {
1468 LOG_WARNING("breakpoint already set");
1469 return retval;
1471 /*check available context BRPs*/
1472 while ((brp_list[brp_1].used ||
1473 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1474 brp_1++;
1476 printf("brp(CTX) found num: %d\n", brp_1);
1477 if (brp_1 >= aarch64->brp_num) {
1478 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1479 return ERROR_FAIL;
1482 while ((brp_list[brp_2].used ||
1483 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1484 brp_2++;
1486 printf("brp(IVA) found num: %d\n", brp_2);
1487 if (brp_2 >= aarch64->brp_num) {
1488 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1489 return ERROR_FAIL;
1492 breakpoint->set = brp_1 + 1;
1493 breakpoint->linked_BRP = brp_2;
1494 control_CTX = ((CTX_machmode & 0x7) << 20)
1495 | (brp_2 << 16)
1496 | (0 << 14)
1497 | (CTX_byte_addr_select << 5)
1498 | (3 << 1) | 1;
1499 brp_list[brp_1].used = 1;
1500 brp_list[brp_1].value = (breakpoint->asid);
1501 brp_list[brp_1].control = control_CTX;
1502 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1503 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1504 brp_list[brp_1].value);
1505 if (retval != ERROR_OK)
1506 return retval;
1507 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1508 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1509 brp_list[brp_1].control);
1510 if (retval != ERROR_OK)
1511 return retval;
1513 control_IVA = ((IVA_machmode & 0x7) << 20)
1514 | (brp_1 << 16)
1515 | (1 << 13)
1516 | (IVA_byte_addr_select << 5)
1517 | (3 << 1) | 1;
1518 brp_list[brp_2].used = 1;
1519 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1520 brp_list[brp_2].control = control_IVA;
1521 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1522 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1523 brp_list[brp_2].value & 0xFFFFFFFF);
1524 if (retval != ERROR_OK)
1525 return retval;
1526 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1527 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1528 brp_list[brp_2].value >> 32);
1529 if (retval != ERROR_OK)
1530 return retval;
1531 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1532 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1533 brp_list[brp_2].control);
1534 if (retval != ERROR_OK)
1535 return retval;
1537 return ERROR_OK;
1540 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1542 int retval;
1543 struct aarch64_common *aarch64 = target_to_aarch64(target);
1544 struct armv8_common *armv8 = &aarch64->armv8_common;
1545 struct aarch64_brp *brp_list = aarch64->brp_list;
1547 if (!breakpoint->set) {
1548 LOG_WARNING("breakpoint not set");
1549 return ERROR_OK;
1552 if (breakpoint->type == BKPT_HARD) {
1553 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1554 int brp_i = breakpoint->set - 1;
1555 int brp_j = breakpoint->linked_BRP;
1556 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1557 LOG_DEBUG("Invalid BRP number in breakpoint");
1558 return ERROR_OK;
1560 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1561 brp_list[brp_i].control, brp_list[brp_i].value);
1562 brp_list[brp_i].used = 0;
1563 brp_list[brp_i].value = 0;
1564 brp_list[brp_i].control = 0;
1565 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1566 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1567 brp_list[brp_i].control);
1568 if (retval != ERROR_OK)
1569 return retval;
1570 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1571 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1572 (uint32_t)brp_list[brp_i].value);
1573 if (retval != ERROR_OK)
1574 return retval;
1575 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1576 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1577 (uint32_t)brp_list[brp_i].value);
1578 if (retval != ERROR_OK)
1579 return retval;
1580 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1581 LOG_DEBUG("Invalid BRP number in breakpoint");
1582 return ERROR_OK;
1584 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1585 brp_list[brp_j].control, brp_list[brp_j].value);
1586 brp_list[brp_j].used = 0;
1587 brp_list[brp_j].value = 0;
1588 brp_list[brp_j].control = 0;
1589 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1590 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1591 brp_list[brp_j].control);
1592 if (retval != ERROR_OK)
1593 return retval;
1594 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1595 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1596 (uint32_t)brp_list[brp_j].value);
1597 if (retval != ERROR_OK)
1598 return retval;
1599 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1600 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1601 (uint32_t)brp_list[brp_j].value);
1602 if (retval != ERROR_OK)
1603 return retval;
1605 breakpoint->linked_BRP = 0;
1606 breakpoint->set = 0;
1607 return ERROR_OK;
1609 } else {
1610 int brp_i = breakpoint->set - 1;
1611 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1612 LOG_DEBUG("Invalid BRP number in breakpoint");
1613 return ERROR_OK;
1615 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1616 brp_list[brp_i].control, brp_list[brp_i].value);
1617 brp_list[brp_i].used = 0;
1618 brp_list[brp_i].value = 0;
1619 brp_list[brp_i].control = 0;
1620 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1621 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1622 brp_list[brp_i].control);
1623 if (retval != ERROR_OK)
1624 return retval;
1625 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1626 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1627 brp_list[brp_i].value);
1628 if (retval != ERROR_OK)
1629 return retval;
1631 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1632 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1633 (uint32_t)brp_list[brp_i].value);
1634 if (retval != ERROR_OK)
1635 return retval;
1636 breakpoint->set = 0;
1637 return ERROR_OK;
1639 } else {
1640 /* restore original instruction (kept in target endianness) */
1641 if (breakpoint->length == 4) {
1642 retval = target_write_memory(target,
1643 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1644 4, 1, breakpoint->orig_instr);
1645 if (retval != ERROR_OK)
1646 return retval;
1647 } else {
1648 retval = target_write_memory(target,
1649 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1650 2, 1, breakpoint->orig_instr);
1651 if (retval != ERROR_OK)
1652 return retval;
1655 breakpoint->set = 0;
1657 return ERROR_OK;
1660 static int aarch64_add_breakpoint(struct target *target,
1661 struct breakpoint *breakpoint)
1663 struct aarch64_common *aarch64 = target_to_aarch64(target);
1665 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1666 LOG_INFO("no hardware breakpoint available");
1667 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1670 if (breakpoint->type == BKPT_HARD)
1671 aarch64->brp_num_available--;
1673 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1676 static int aarch64_add_context_breakpoint(struct target *target,
1677 struct breakpoint *breakpoint)
1679 struct aarch64_common *aarch64 = target_to_aarch64(target);
1681 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1682 LOG_INFO("no hardware breakpoint available");
1683 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1686 if (breakpoint->type == BKPT_HARD)
1687 aarch64->brp_num_available--;
1689 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1692 static int aarch64_add_hybrid_breakpoint(struct target *target,
1693 struct breakpoint *breakpoint)
1695 struct aarch64_common *aarch64 = target_to_aarch64(target);
1697 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1698 LOG_INFO("no hardware breakpoint available");
1699 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1702 if (breakpoint->type == BKPT_HARD)
1703 aarch64->brp_num_available--;
1705 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1709 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1711 struct aarch64_common *aarch64 = target_to_aarch64(target);
1713 #if 0
1714 /* It is perfectly possible to remove breakpoints while the target is running */
1715 if (target->state != TARGET_HALTED) {
1716 LOG_WARNING("target not halted");
1717 return ERROR_TARGET_NOT_HALTED;
1719 #endif
1721 if (breakpoint->set) {
1722 aarch64_unset_breakpoint(target, breakpoint);
1723 if (breakpoint->type == BKPT_HARD)
1724 aarch64->brp_num_available++;
1727 return ERROR_OK;
1731 * Cortex-A8 Reset functions
1734 static int aarch64_assert_reset(struct target *target)
1736 struct armv8_common *armv8 = target_to_armv8(target);
1738 LOG_DEBUG(" ");
1740 /* FIXME when halt is requested, make it work somehow... */
1742 /* Issue some kind of warm reset. */
1743 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1744 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1745 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1746 /* REVISIT handle "pulls" cases, if there's
1747 * hardware that needs them to work.
1749 jtag_add_reset(0, 1);
1750 } else {
1751 LOG_ERROR("%s: how to reset?", target_name(target));
1752 return ERROR_FAIL;
1755 /* registers are now invalid */
1756 register_cache_invalidate(armv8->arm.core_cache);
1758 target->state = TARGET_RESET;
1760 return ERROR_OK;
1763 static int aarch64_deassert_reset(struct target *target)
1765 int retval;
1767 LOG_DEBUG(" ");
1769 /* be certain SRST is off */
1770 jtag_add_reset(0, 0);
1772 retval = aarch64_poll(target);
1773 if (retval != ERROR_OK)
1774 return retval;
1776 if (target->reset_halt) {
1777 if (target->state != TARGET_HALTED) {
1778 LOG_WARNING("%s: ran after reset and before halt ...",
1779 target_name(target));
1780 retval = target_halt(target);
1781 if (retval != ERROR_OK)
1782 return retval;
1786 return ERROR_OK;
1789 static int aarch64_write_apb_ap_memory(struct target *target,
1790 uint64_t address, uint32_t size,
1791 uint32_t count, const uint8_t *buffer)
1793 /* write memory through APB-AP */
1794 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1795 struct armv8_common *armv8 = target_to_armv8(target);
1796 struct arm *arm = &armv8->arm;
1797 int total_bytes = count * size;
1798 int total_u32;
1799 int start_byte = address & 0x3;
1800 int end_byte = (address + total_bytes) & 0x3;
1801 struct reg *reg;
1802 uint32_t dscr;
1803 uint8_t *tmp_buff = NULL;
1805 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count%" PRIu32,
1806 address, size, count);
1807 if (target->state != TARGET_HALTED) {
1808 LOG_WARNING("target not halted");
1809 return ERROR_TARGET_NOT_HALTED;
1812 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1814 /* Mark register R0 as dirty, as it will be used
1815 * for transferring the data.
1816 * It will be restored automatically when exiting
1817 * debug mode
1819 reg = armv8_reg_current(arm, 1);
1820 reg->dirty = true;
1822 reg = armv8_reg_current(arm, 0);
1823 reg->dirty = true;
1825 /* clear any abort */
1826 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1827 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1828 if (retval != ERROR_OK)
1829 return retval;
1832 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1834 /* The algorithm only copies 32 bit words, so the buffer
1835 * should be expanded to include the words at either end.
1836 * The first and last words will be read first to avoid
1837 * corruption if needed.
1839 tmp_buff = malloc(total_u32 * 4);
1841 if ((start_byte != 0) && (total_u32 > 1)) {
1842 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1843 * the other bytes in the word.
1845 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1846 if (retval != ERROR_OK)
1847 goto error_free_buff_w;
1850 /* If end of write is not aligned, or the write is less than 4 bytes */
1851 if ((end_byte != 0) ||
1852 ((total_u32 == 1) && (total_bytes != 4))) {
1854 /* Read the last word to avoid corruption during 32 bit write */
1855 int mem_offset = (total_u32-1) * 4;
1856 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1857 if (retval != ERROR_OK)
1858 goto error_free_buff_w;
1861 /* Copy the write buffer over the top of the temporary buffer */
1862 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1864 /* We now have a 32 bit aligned buffer that can be written */
1866 /* Read DSCR */
1867 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1868 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1869 if (retval != ERROR_OK)
1870 goto error_free_buff_w;
1872 /* Set Normal access mode */
1873 dscr = (dscr & ~DSCR_MA);
1874 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1875 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1877 if (arm->core_state == ARM_STATE_AARCH64) {
1878 /* Write X0 with value 'address' using write procedure */
1879 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1880 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
1881 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1882 retval += aarch64_exec_opcode(target,
1883 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1884 } else {
1885 /* Write R0 with value 'address' using write procedure */
1886 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1887 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
1888 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1889 retval += aarch64_exec_opcode(target,
1890 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
1893 /* Step 1.d - Change DCC to memory mode */
1894 dscr = dscr | DSCR_MA;
1895 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1896 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1897 if (retval != ERROR_OK)
1898 goto error_unset_dtr_w;
1901 /* Step 2.a - Do the write */
1902 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1903 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1904 if (retval != ERROR_OK)
1905 goto error_unset_dtr_w;
1907 /* Step 3.a - Switch DTR mode back to Normal mode */
1908 dscr = (dscr & ~DSCR_MA);
1909 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1910 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1911 if (retval != ERROR_OK)
1912 goto error_unset_dtr_w;
1914 /* Check for sticky abort flags in the DSCR */
1915 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1916 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1917 if (retval != ERROR_OK)
1918 goto error_free_buff_w;
1919 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1920 /* Abort occurred - clear it and exit */
1921 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1922 mem_ap_write_atomic_u32(armv8->debug_ap,
1923 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1924 goto error_free_buff_w;
1927 /* Done */
1928 free(tmp_buff);
1929 return ERROR_OK;
1931 error_unset_dtr_w:
1932 /* Unset DTR mode */
1933 mem_ap_read_atomic_u32(armv8->debug_ap,
1934 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1935 dscr = (dscr & ~DSCR_MA);
1936 mem_ap_write_atomic_u32(armv8->debug_ap,
1937 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1938 error_free_buff_w:
1939 LOG_ERROR("error");
1940 free(tmp_buff);
1941 return ERROR_FAIL;
1944 static int aarch64_read_apb_ap_memory(struct target *target,
1945 target_addr_t address, uint32_t size,
1946 uint32_t count, uint8_t *buffer)
1948 /* read memory through APB-AP */
1949 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1950 struct armv8_common *armv8 = target_to_armv8(target);
1951 struct arm *arm = &armv8->arm;
1952 int total_bytes = count * size;
1953 int total_u32;
1954 int start_byte = address & 0x3;
1955 int end_byte = (address + total_bytes) & 0x3;
1956 struct reg *reg;
1957 uint32_t dscr;
1958 uint8_t *tmp_buff = NULL;
1959 uint8_t *u8buf_ptr;
1960 uint32_t value;
1962 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%" PRIu32,
1963 address, size, count);
1964 if (target->state != TARGET_HALTED) {
1965 LOG_WARNING("target not halted");
1966 return ERROR_TARGET_NOT_HALTED;
1969 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1970 /* Mark register X0, X1 as dirty, as it will be used
1971 * for transferring the data.
1972 * It will be restored automatically when exiting
1973 * debug mode
1975 reg = armv8_reg_current(arm, 1);
1976 reg->dirty = true;
1978 reg = armv8_reg_current(arm, 0);
1979 reg->dirty = true;
1981 /* clear any abort */
1982 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1983 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1984 if (retval != ERROR_OK)
1985 goto error_free_buff_r;
1987 /* Read DSCR */
1988 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1989 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1991 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1993 /* Set Normal access mode */
1994 dscr = (dscr & ~DSCR_MA);
1995 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1996 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1998 if (arm->core_state == ARM_STATE_AARCH64) {
1999 /* Write X0 with value 'address' using write procedure */
2000 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2001 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
2002 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2003 retval += aarch64_exec_opcode(target, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
2004 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2005 retval += aarch64_exec_opcode(target, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
2006 /* Step 1.e - Change DCC to memory mode */
2007 dscr = dscr | DSCR_MA;
2008 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2009 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2010 /* Step 1.f - read DBGDTRTX and discard the value */
2011 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2012 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2013 } else {
2014 /* Write R0 with value 'address' using write procedure */
2015 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2016 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
2017 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2018 retval += aarch64_exec_opcode(target,
2019 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
2020 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2021 retval += aarch64_exec_opcode(target,
2022 T32_FMTITR(ARMV4_5_MCR(14, 0, 0, 0, 5, 0)), &dscr);
2023 /* Step 1.e - Change DCC to memory mode */
2024 dscr = dscr | DSCR_MA;
2025 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2026 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2027 /* Step 1.f - read DBGDTRTX and discard the value */
2028 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2029 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2032 if (retval != ERROR_OK)
2033 goto error_unset_dtr_r;
2035 /* Optimize the read as much as we can, either way we read in a single pass */
2036 if ((start_byte) || (end_byte)) {
2037 /* The algorithm only copies 32 bit words, so the buffer
2038 * should be expanded to include the words at either end.
2039 * The first and last words will be read into a temp buffer
2040 * to avoid corruption
2042 tmp_buff = malloc(total_u32 * 4);
2043 if (!tmp_buff)
2044 goto error_unset_dtr_r;
2046 /* use the tmp buffer to read the entire data */
2047 u8buf_ptr = tmp_buff;
2048 } else
2049 /* address and read length are aligned so read directly into the passed buffer */
2050 u8buf_ptr = buffer;
2052 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2053 * Abort flags are sticky, so can be read at end of transactions
2055 * This data is read in aligned to 32 bit boundary.
2058 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2059 * increments X0 by 4. */
2060 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
2061 armv8->debug_base + CPUV8_DBG_DTRTX);
2062 if (retval != ERROR_OK)
2063 goto error_unset_dtr_r;
2065 /* Step 3.a - set DTR access mode back to Normal mode */
2066 dscr = (dscr & ~DSCR_MA);
2067 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2068 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2069 if (retval != ERROR_OK)
2070 goto error_free_buff_r;
2072 /* Step 3.b - read DBGDTRTX for the final value */
2073 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2074 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2075 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
2077 /* Check for sticky abort flags in the DSCR */
2078 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2079 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2080 if (retval != ERROR_OK)
2081 goto error_free_buff_r;
2082 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2083 /* Abort occurred - clear it and exit */
2084 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2085 mem_ap_write_atomic_u32(armv8->debug_ap,
2086 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
2087 goto error_free_buff_r;
2090 /* check if we need to copy aligned data by applying any shift necessary */
2091 if (tmp_buff) {
2092 memcpy(buffer, tmp_buff + start_byte, total_bytes);
2093 free(tmp_buff);
2096 /* Done */
2097 return ERROR_OK;
2099 error_unset_dtr_r:
2100 /* Unset DTR mode */
2101 mem_ap_read_atomic_u32(armv8->debug_ap,
2102 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2103 dscr = (dscr & ~DSCR_MA);
2104 mem_ap_write_atomic_u32(armv8->debug_ap,
2105 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2106 error_free_buff_r:
2107 LOG_ERROR("error");
2108 free(tmp_buff);
2109 return ERROR_FAIL;
2112 static int aarch64_read_phys_memory(struct target *target,
2113 target_addr_t address, uint32_t size,
2114 uint32_t count, uint8_t *buffer)
2116 struct armv8_common *armv8 = target_to_armv8(target);
2117 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2118 struct adiv5_dap *swjdp = armv8->arm.dap;
2119 uint8_t apsel = swjdp->apsel;
2120 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
2121 address, size, count);
2123 if (count && buffer) {
2125 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2127 /* read memory through AHB-AP */
2128 retval = mem_ap_read_buf(armv8->memory_ap, buffer, size, count, address);
2129 } else {
2130 /* read memory through APB-AP */
2131 retval = aarch64_mmu_modify(target, 0);
2132 if (retval != ERROR_OK)
2133 return retval;
2134 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
2137 return retval;
2140 static int aarch64_read_memory(struct target *target, target_addr_t address,
2141 uint32_t size, uint32_t count, uint8_t *buffer)
2143 int mmu_enabled = 0;
2144 target_addr_t virt, phys;
2145 int retval;
2146 struct armv8_common *armv8 = target_to_armv8(target);
2147 struct adiv5_dap *swjdp = armv8->arm.dap;
2148 uint8_t apsel = swjdp->apsel;
2150 /* aarch64 handles unaligned memory access */
2151 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2152 size, count);
2154 /* determine if MMU was enabled on target stop */
2155 retval = aarch64_mmu(target, &mmu_enabled);
2156 if (retval != ERROR_OK)
2157 return retval;
2159 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2160 if (mmu_enabled) {
2161 virt = address;
2162 retval = aarch64_virt2phys(target, virt, &phys);
2163 if (retval != ERROR_OK)
2164 return retval;
2166 LOG_DEBUG("Reading at virtual address. Translating v:0x%" TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR,
2167 virt, phys);
2168 address = phys;
2170 retval = aarch64_read_phys_memory(target, address, size, count,
2171 buffer);
2172 } else {
2173 if (mmu_enabled) {
2174 retval = aarch64_check_address(target, address);
2175 if (retval != ERROR_OK)
2176 return retval;
2177 /* enable MMU as we could have disabled it for phys
2178 access */
2179 retval = aarch64_mmu_modify(target, 1);
2180 if (retval != ERROR_OK)
2181 return retval;
2183 retval = aarch64_read_apb_ap_memory(target, address, size,
2184 count, buffer);
2186 return retval;
2189 static int aarch64_write_phys_memory(struct target *target,
2190 target_addr_t address, uint32_t size,
2191 uint32_t count, const uint8_t *buffer)
2193 struct armv8_common *armv8 = target_to_armv8(target);
2194 struct adiv5_dap *swjdp = armv8->arm.dap;
2195 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2196 uint8_t apsel = swjdp->apsel;
2198 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2199 size, count);
2201 if (count && buffer) {
2203 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2205 /* write memory through AHB-AP */
2206 retval = mem_ap_write_buf(armv8->memory_ap, buffer, size, count, address);
2207 } else {
2209 /* write memory through APB-AP */
2210 retval = aarch64_mmu_modify(target, 0);
2211 if (retval != ERROR_OK)
2212 return retval;
2213 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2217 /* REVISIT this op is generic ARMv7-A/R stuff */
2218 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2219 struct arm_dpm *dpm = armv8->arm.dpm;
2221 retval = dpm->prepare(dpm);
2222 if (retval != ERROR_OK)
2223 return retval;
2225 /* The Cache handling will NOT work with MMU active, the
2226 * wrong addresses will be invalidated!
2228 * For both ICache and DCache, walk all cache lines in the
2229 * address range. Cortex-A has fixed 64 byte line length.
2231 * REVISIT per ARMv7, these may trigger watchpoints ...
2234 /* invalidate I-Cache */
2235 if (armv8->armv8_mmu.armv8_cache.i_cache_enabled) {
2236 /* ICIMVAU - Invalidate Cache single entry
2237 * with MVA to PoU
2238 * MCR p15, 0, r0, c7, c5, 1
2240 for (uint32_t cacheline = 0;
2241 cacheline < size * count;
2242 cacheline += 64) {
2243 retval = dpm->instr_write_data_r0(dpm,
2244 ARMV8_MSR_GP(SYSTEM_ICIVAU, 0),
2245 address + cacheline);
2246 if (retval != ERROR_OK)
2247 return retval;
2251 /* invalidate D-Cache */
2252 if (armv8->armv8_mmu.armv8_cache.d_u_cache_enabled) {
2253 /* DCIMVAC - Invalidate data Cache line
2254 * with MVA to PoC
2255 * MCR p15, 0, r0, c7, c6, 1
2257 for (uint32_t cacheline = 0;
2258 cacheline < size * count;
2259 cacheline += 64) {
2260 retval = dpm->instr_write_data_r0(dpm,
2261 ARMV8_MSR_GP(SYSTEM_DCCVAU, 0),
2262 address + cacheline);
2263 if (retval != ERROR_OK)
2264 return retval;
2268 /* (void) */ dpm->finish(dpm);
2271 return retval;
2274 static int aarch64_write_memory(struct target *target, target_addr_t address,
2275 uint32_t size, uint32_t count, const uint8_t *buffer)
2277 int mmu_enabled = 0;
2278 target_addr_t virt, phys;
2279 int retval;
2280 struct armv8_common *armv8 = target_to_armv8(target);
2281 struct adiv5_dap *swjdp = armv8->arm.dap;
2282 uint8_t apsel = swjdp->apsel;
2284 /* aarch64 handles unaligned memory access */
2285 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
2286 "; count %" PRId32, address, size, count);
2288 /* determine if MMU was enabled on target stop */
2289 retval = aarch64_mmu(target, &mmu_enabled);
2290 if (retval != ERROR_OK)
2291 return retval;
2293 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2294 LOG_DEBUG("Writing memory to address 0x%" TARGET_PRIxADDR "; size %"
2295 PRId32 "; count %" PRId32, address, size, count);
2296 if (mmu_enabled) {
2297 virt = address;
2298 retval = aarch64_virt2phys(target, virt, &phys);
2299 if (retval != ERROR_OK)
2300 return retval;
2302 LOG_DEBUG("Writing to virtual address. Translating v:0x%"
2303 TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR, virt, phys);
2304 address = phys;
2306 retval = aarch64_write_phys_memory(target, address, size,
2307 count, buffer);
2308 } else {
2309 if (mmu_enabled) {
2310 retval = aarch64_check_address(target, address);
2311 if (retval != ERROR_OK)
2312 return retval;
2313 /* enable MMU as we could have disabled it for phys access */
2314 retval = aarch64_mmu_modify(target, 1);
2315 if (retval != ERROR_OK)
2316 return retval;
2318 retval = aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2320 return retval;
2323 static int aarch64_handle_target_request(void *priv)
2325 struct target *target = priv;
2326 struct armv8_common *armv8 = target_to_armv8(target);
2327 int retval;
2329 if (!target_was_examined(target))
2330 return ERROR_OK;
2331 if (!target->dbg_msg_enabled)
2332 return ERROR_OK;
2334 if (target->state == TARGET_RUNNING) {
2335 uint32_t request;
2336 uint32_t dscr;
2337 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2338 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2340 /* check if we have data */
2341 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2342 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2343 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2344 if (retval == ERROR_OK) {
2345 target_request(target, request);
2346 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2347 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2352 return ERROR_OK;
2355 static int aarch64_examine_first(struct target *target)
2357 struct aarch64_common *aarch64 = target_to_aarch64(target);
2358 struct armv8_common *armv8 = &aarch64->armv8_common;
2359 struct adiv5_dap *swjdp = armv8->arm.dap;
2360 int i;
2361 int retval = ERROR_OK;
2362 uint64_t debug, ttypr;
2363 uint32_t cpuid;
2364 uint32_t tmp0, tmp1;
2365 debug = ttypr = cpuid = 0;
2367 /* We do one extra read to ensure DAP is configured,
2368 * we call ahbap_debugport_init(swjdp) instead
2370 retval = dap_dp_init(swjdp);
2371 if (retval != ERROR_OK)
2372 return retval;
2374 /* Search for the APB-AB - it is needed for access to debug registers */
2375 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2376 if (retval != ERROR_OK) {
2377 LOG_ERROR("Could not find APB-AP for debug access");
2378 return retval;
2381 retval = mem_ap_init(armv8->debug_ap);
2382 if (retval != ERROR_OK) {
2383 LOG_ERROR("Could not initialize the APB-AP");
2384 return retval;
2387 armv8->debug_ap->memaccess_tck = 80;
2389 /* Search for the AHB-AB */
2390 armv8->memory_ap_available = false;
2391 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv8->memory_ap);
2392 if (retval == ERROR_OK) {
2393 retval = mem_ap_init(armv8->memory_ap);
2394 if (retval == ERROR_OK)
2395 armv8->memory_ap_available = true;
2397 if (retval != ERROR_OK) {
2398 /* AHB-AP not found or unavailable - use the CPU */
2399 LOG_DEBUG("No AHB-AP available for memory access");
2403 if (!target->dbgbase_set) {
2404 uint32_t dbgbase;
2405 /* Get ROM Table base */
2406 uint32_t apid;
2407 int32_t coreidx = target->coreid;
2408 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2409 if (retval != ERROR_OK)
2410 return retval;
2411 /* Lookup 0x15 -- Processor DAP */
2412 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2413 &armv8->debug_base, &coreidx);
2414 if (retval != ERROR_OK)
2415 return retval;
2416 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2417 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2418 } else
2419 armv8->debug_base = target->dbgbase;
2421 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2422 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
2423 if (retval != ERROR_OK) {
2424 LOG_DEBUG("LOCK debug access fail");
2425 return retval;
2428 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2429 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2430 if (retval != ERROR_OK) {
2431 LOG_DEBUG("Examine %s failed", "oslock");
2432 return retval;
2435 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2436 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2437 if (retval != ERROR_OK) {
2438 LOG_DEBUG("Examine %s failed", "CPUID");
2439 return retval;
2442 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2443 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2444 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2445 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2446 if (retval != ERROR_OK) {
2447 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2448 return retval;
2450 ttypr |= tmp1;
2451 ttypr = (ttypr << 32) | tmp0;
2453 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2454 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
2455 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2456 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
2457 if (retval != ERROR_OK) {
2458 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2459 return retval;
2461 debug |= tmp1;
2462 debug = (debug << 32) | tmp0;
2464 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2465 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2466 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2468 if (target->ctibase == 0) {
2469 /* assume a v8 rom table layout */
2470 armv8->cti_base = target->ctibase = armv8->debug_base + 0x10000;
2471 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, target->ctibase);
2472 } else
2473 armv8->cti_base = target->ctibase;
2475 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2476 armv8->cti_base + CTI_UNLOCK , 0xC5ACCE55);
2477 if (retval != ERROR_OK)
2478 return retval;
2481 armv8->arm.core_type = ARM_MODE_MON;
2482 retval = aarch64_dpm_setup(aarch64, debug);
2483 if (retval != ERROR_OK)
2484 return retval;
2486 /* Setup Breakpoint Register Pairs */
2487 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2488 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2489 aarch64->brp_num_available = aarch64->brp_num;
2490 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2491 for (i = 0; i < aarch64->brp_num; i++) {
2492 aarch64->brp_list[i].used = 0;
2493 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2494 aarch64->brp_list[i].type = BRP_NORMAL;
2495 else
2496 aarch64->brp_list[i].type = BRP_CONTEXT;
2497 aarch64->brp_list[i].value = 0;
2498 aarch64->brp_list[i].control = 0;
2499 aarch64->brp_list[i].BRPn = i;
2502 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2504 target_set_examined(target);
2505 return ERROR_OK;
2508 static int aarch64_examine(struct target *target)
2510 int retval = ERROR_OK;
2512 /* don't re-probe hardware after each reset */
2513 if (!target_was_examined(target))
2514 retval = aarch64_examine_first(target);
2516 /* Configure core debug access */
2517 if (retval == ERROR_OK)
2518 retval = aarch64_init_debug_access(target);
2520 return retval;
2524 * Cortex-A8 target creation and initialization
2527 static int aarch64_init_target(struct command_context *cmd_ctx,
2528 struct target *target)
2530 /* examine_first() does a bunch of this */
2531 return ERROR_OK;
2534 static int aarch64_init_arch_info(struct target *target,
2535 struct aarch64_common *aarch64, struct jtag_tap *tap)
2537 struct armv8_common *armv8 = &aarch64->armv8_common;
2538 struct adiv5_dap *dap = armv8->arm.dap;
2540 armv8->arm.dap = dap;
2542 /* Setup struct aarch64_common */
2543 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2544 /* tap has no dap initialized */
2545 if (!tap->dap) {
2546 tap->dap = dap_init();
2548 /* Leave (only) generic DAP stuff for debugport_init() */
2549 tap->dap->tap = tap;
2552 armv8->arm.dap = tap->dap;
2554 aarch64->fast_reg_read = 0;
2556 /* register arch-specific functions */
2557 armv8->examine_debug_reason = NULL;
2559 armv8->post_debug_entry = aarch64_post_debug_entry;
2561 armv8->pre_restore_context = NULL;
2563 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2565 /* REVISIT v7a setup should be in a v7a-specific routine */
2566 armv8_init_arch_info(target, armv8);
2567 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2569 return ERROR_OK;
2572 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2574 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2576 return aarch64_init_arch_info(target, aarch64, target->tap);
2579 static int aarch64_mmu(struct target *target, int *enabled)
2581 if (target->state != TARGET_HALTED) {
2582 LOG_ERROR("%s: target not halted", __func__);
2583 return ERROR_TARGET_INVALID;
2586 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2587 return ERROR_OK;
2590 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2591 target_addr_t *phys)
2593 int retval = ERROR_FAIL;
2594 struct armv8_common *armv8 = target_to_armv8(target);
2595 struct adiv5_dap *swjdp = armv8->arm.dap;
2596 uint8_t apsel = swjdp->apsel;
2597 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2598 uint32_t ret;
2599 retval = armv8_mmu_translate_va(target,
2600 virt, &ret);
2601 if (retval != ERROR_OK)
2602 goto done;
2603 *phys = ret;
2604 } else {
2605 LOG_ERROR("AAR64 processor not support translate va to pa");
2607 done:
2608 return retval;
2611 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2613 struct target *target = get_current_target(CMD_CTX);
2614 struct armv8_common *armv8 = target_to_armv8(target);
2616 return armv8_handle_cache_info_command(CMD_CTX,
2617 &armv8->armv8_mmu.armv8_cache);
2621 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2623 struct target *target = get_current_target(CMD_CTX);
2624 if (!target_was_examined(target)) {
2625 LOG_ERROR("target not examined yet");
2626 return ERROR_FAIL;
2629 return aarch64_init_debug_access(target);
2631 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2633 struct target *target = get_current_target(CMD_CTX);
2634 /* check target is an smp target */
2635 struct target_list *head;
2636 struct target *curr;
2637 head = target->head;
2638 target->smp = 0;
2639 if (head != (struct target_list *)NULL) {
2640 while (head != (struct target_list *)NULL) {
2641 curr = head->target;
2642 curr->smp = 0;
2643 head = head->next;
2645 /* fixes the target display to the debugger */
2646 target->gdb_service->target = target;
2648 return ERROR_OK;
2651 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2653 struct target *target = get_current_target(CMD_CTX);
2654 struct target_list *head;
2655 struct target *curr;
2656 head = target->head;
2657 if (head != (struct target_list *)NULL) {
2658 target->smp = 1;
2659 while (head != (struct target_list *)NULL) {
2660 curr = head->target;
2661 curr->smp = 1;
2662 head = head->next;
2665 return ERROR_OK;
2668 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2670 struct target *target = get_current_target(CMD_CTX);
2671 int retval = ERROR_OK;
2672 struct target_list *head;
2673 head = target->head;
2674 if (head != (struct target_list *)NULL) {
2675 if (CMD_ARGC == 1) {
2676 int coreid = 0;
2677 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2678 if (ERROR_OK != retval)
2679 return retval;
2680 target->gdb_service->core[1] = coreid;
2683 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2684 , target->gdb_service->core[1]);
2686 return ERROR_OK;
2689 static const struct command_registration aarch64_exec_command_handlers[] = {
2691 .name = "cache_info",
2692 .handler = aarch64_handle_cache_info_command,
2693 .mode = COMMAND_EXEC,
2694 .help = "display information about target caches",
2695 .usage = "",
2698 .name = "dbginit",
2699 .handler = aarch64_handle_dbginit_command,
2700 .mode = COMMAND_EXEC,
2701 .help = "Initialize core debug",
2702 .usage = "",
2704 { .name = "smp_off",
2705 .handler = aarch64_handle_smp_off_command,
2706 .mode = COMMAND_EXEC,
2707 .help = "Stop smp handling",
2708 .usage = "",
2711 .name = "smp_on",
2712 .handler = aarch64_handle_smp_on_command,
2713 .mode = COMMAND_EXEC,
2714 .help = "Restart smp handling",
2715 .usage = "",
2718 .name = "smp_gdb",
2719 .handler = aarch64_handle_smp_gdb_command,
2720 .mode = COMMAND_EXEC,
2721 .help = "display/fix current core played to gdb",
2722 .usage = "",
2726 COMMAND_REGISTRATION_DONE
2728 static const struct command_registration aarch64_command_handlers[] = {
2730 .chain = arm_command_handlers,
2733 .chain = armv8_command_handlers,
2736 .name = "cortex_a",
2737 .mode = COMMAND_ANY,
2738 .help = "Cortex-A command group",
2739 .usage = "",
2740 .chain = aarch64_exec_command_handlers,
2742 COMMAND_REGISTRATION_DONE
2745 struct target_type aarch64_target = {
2746 .name = "aarch64",
2748 .poll = aarch64_poll,
2749 .arch_state = armv8_arch_state,
2751 .halt = aarch64_halt,
2752 .resume = aarch64_resume,
2753 .step = aarch64_step,
2755 .assert_reset = aarch64_assert_reset,
2756 .deassert_reset = aarch64_deassert_reset,
2758 /* REVISIT allow exporting VFP3 registers ... */
2759 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2761 .read_memory = aarch64_read_memory,
2762 .write_memory = aarch64_write_memory,
2764 .checksum_memory = arm_checksum_memory,
2765 .blank_check_memory = arm_blank_check_memory,
2767 .run_algorithm = armv4_5_run_algorithm,
2769 .add_breakpoint = aarch64_add_breakpoint,
2770 .add_context_breakpoint = aarch64_add_context_breakpoint,
2771 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2772 .remove_breakpoint = aarch64_remove_breakpoint,
2773 .add_watchpoint = NULL,
2774 .remove_watchpoint = NULL,
2776 .commands = aarch64_command_handlers,
2777 .target_create = aarch64_target_create,
2778 .init_target = aarch64_init_target,
2779 .examine = aarch64_examine,
2781 .read_phys_memory = aarch64_read_phys_memory,
2782 .write_phys_memory = aarch64_write_phys_memory,
2783 .mmu = aarch64_mmu,
2784 .virt2phys = aarch64_virt2phys,