target: aarch64: Adding mcr, mrc 32-bit coprocesor read/write support
[openocd.git] / src / target / aarch64.c
blobdf1e49c21c03263349669fd9cca8046e9f8f2508
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
33 enum restart_mode {
34 RESTART_LAZY,
35 RESTART_SYNC,
38 enum halt_mode {
39 HALT_LAZY,
40 HALT_SYNC,
43 struct aarch64_private_config {
44 struct adiv5_private_config adiv5_config;
45 struct arm_cti *cti;
48 static int aarch64_poll(struct target *target);
49 static int aarch64_debug_entry(struct target *target);
50 static int aarch64_restore_context(struct target *target, bool bpwp);
51 static int aarch64_set_breakpoint(struct target *target,
52 struct breakpoint *breakpoint, uint8_t matchmode);
53 static int aarch64_set_context_breakpoint(struct target *target,
54 struct breakpoint *breakpoint, uint8_t matchmode);
55 static int aarch64_set_hybrid_breakpoint(struct target *target,
56 struct breakpoint *breakpoint);
57 static int aarch64_unset_breakpoint(struct target *target,
58 struct breakpoint *breakpoint);
59 static int aarch64_mmu(struct target *target, int *enabled);
60 static int aarch64_virt2phys(struct target *target,
61 target_addr_t virt, target_addr_t *phys);
62 static int aarch64_read_cpu_memory(struct target *target,
63 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
65 #define foreach_smp_target(pos, head) \
66 for (pos = head; (pos != NULL); pos = pos->next)
68 static int aarch64_restore_system_control_reg(struct target *target)
70 enum arm_mode target_mode = ARM_MODE_ANY;
71 int retval = ERROR_OK;
72 uint32_t instr;
74 struct aarch64_common *aarch64 = target_to_aarch64(target);
75 struct armv8_common *armv8 = target_to_armv8(target);
77 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
78 aarch64->system_control_reg_curr = aarch64->system_control_reg;
79 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
81 switch (armv8->arm.core_mode) {
82 case ARMV8_64_EL0T:
83 target_mode = ARMV8_64_EL1H;
84 /* fall through */
85 case ARMV8_64_EL1T:
86 case ARMV8_64_EL1H:
87 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
88 break;
89 case ARMV8_64_EL2T:
90 case ARMV8_64_EL2H:
91 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
92 break;
93 case ARMV8_64_EL3H:
94 case ARMV8_64_EL3T:
95 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
96 break;
98 case ARM_MODE_SVC:
99 case ARM_MODE_ABT:
100 case ARM_MODE_FIQ:
101 case ARM_MODE_IRQ:
102 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
103 break;
105 default:
106 LOG_INFO("cannot read system control register in this mode");
107 return ERROR_FAIL;
110 if (target_mode != ARM_MODE_ANY)
111 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
113 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
114 if (retval != ERROR_OK)
115 return retval;
117 if (target_mode != ARM_MODE_ANY)
118 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
121 return retval;
124 /* modify system_control_reg in order to enable or disable mmu for :
125 * - virt2phys address conversion
126 * - read or write memory in phys or virt address */
127 static int aarch64_mmu_modify(struct target *target, int enable)
129 struct aarch64_common *aarch64 = target_to_aarch64(target);
130 struct armv8_common *armv8 = &aarch64->armv8_common;
131 int retval = ERROR_OK;
132 uint32_t instr = 0;
134 if (enable) {
135 /* if mmu enabled at target stop and mmu not enable */
136 if (!(aarch64->system_control_reg & 0x1U)) {
137 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
138 return ERROR_FAIL;
140 if (!(aarch64->system_control_reg_curr & 0x1U))
141 aarch64->system_control_reg_curr |= 0x1U;
142 } else {
143 if (aarch64->system_control_reg_curr & 0x4U) {
144 /* data cache is active */
145 aarch64->system_control_reg_curr &= ~0x4U;
146 /* flush data cache armv8 function to be called */
147 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
148 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
150 if ((aarch64->system_control_reg_curr & 0x1U)) {
151 aarch64->system_control_reg_curr &= ~0x1U;
155 switch (armv8->arm.core_mode) {
156 case ARMV8_64_EL0T:
157 case ARMV8_64_EL1T:
158 case ARMV8_64_EL1H:
159 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
160 break;
161 case ARMV8_64_EL2T:
162 case ARMV8_64_EL2H:
163 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
164 break;
165 case ARMV8_64_EL3H:
166 case ARMV8_64_EL3T:
167 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
168 break;
170 case ARM_MODE_SVC:
171 case ARM_MODE_ABT:
172 case ARM_MODE_FIQ:
173 case ARM_MODE_IRQ:
174 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
175 break;
177 default:
178 LOG_DEBUG("unknown cpu state 0x%" PRIx32, armv8->arm.core_mode);
179 break;
182 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
183 aarch64->system_control_reg_curr);
184 return retval;
188 * Basic debug access, very low level assumes state is saved
190 static int aarch64_init_debug_access(struct target *target)
192 struct armv8_common *armv8 = target_to_armv8(target);
193 int retval;
194 uint32_t dummy;
196 LOG_DEBUG("%s", target_name(target));
198 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
199 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
200 if (retval != ERROR_OK) {
201 LOG_DEBUG("Examine %s failed", "oslock");
202 return retval;
205 /* Clear Sticky Power Down status Bit in PRSR to enable access to
206 the registers in the Core Power Domain */
207 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
208 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
209 if (retval != ERROR_OK)
210 return retval;
213 * Static CTI configuration:
214 * Channel 0 -> trigger outputs HALT request to PE
215 * Channel 1 -> trigger outputs Resume request to PE
216 * Gate all channel trigger events from entering the CTM
219 /* Enable CTI */
220 retval = arm_cti_enable(armv8->cti, true);
221 /* By default, gate all channel events to and from the CTM */
222 if (retval == ERROR_OK)
223 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
224 /* output halt requests to PE on channel 0 event */
225 if (retval == ERROR_OK)
226 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
227 /* output restart requests to PE on channel 1 event */
228 if (retval == ERROR_OK)
229 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
230 if (retval != ERROR_OK)
231 return retval;
233 /* Resync breakpoint registers */
235 return ERROR_OK;
238 /* Write to memory mapped registers directly with no cache or mmu handling */
239 static int aarch64_dap_write_memap_register_u32(struct target *target,
240 uint32_t address,
241 uint32_t value)
243 int retval;
244 struct armv8_common *armv8 = target_to_armv8(target);
246 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
248 return retval;
251 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
253 struct arm_dpm *dpm = &a8->armv8_common.dpm;
254 int retval;
256 dpm->arm = &a8->armv8_common.arm;
257 dpm->didr = debug;
259 retval = armv8_dpm_setup(dpm);
260 if (retval == ERROR_OK)
261 retval = armv8_dpm_initialize(dpm);
263 return retval;
266 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
268 struct armv8_common *armv8 = target_to_armv8(target);
269 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
272 static int aarch64_check_state_one(struct target *target,
273 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
275 struct armv8_common *armv8 = target_to_armv8(target);
276 uint32_t prsr;
277 int retval;
279 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
280 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
281 if (retval != ERROR_OK)
282 return retval;
284 if (p_prsr)
285 *p_prsr = prsr;
287 if (p_result)
288 *p_result = (prsr & mask) == (val & mask);
290 return ERROR_OK;
293 static int aarch64_wait_halt_one(struct target *target)
295 int retval = ERROR_OK;
296 uint32_t prsr;
298 int64_t then = timeval_ms();
299 for (;;) {
300 int halted;
302 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
303 if (retval != ERROR_OK || halted)
304 break;
306 if (timeval_ms() > then + 1000) {
307 retval = ERROR_TARGET_TIMEOUT;
308 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
309 break;
312 return retval;
315 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
317 int retval = ERROR_OK;
318 struct target_list *head = target->head;
319 struct target *first = NULL;
321 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
323 while (head != NULL) {
324 struct target *curr = head->target;
325 struct armv8_common *armv8 = target_to_armv8(curr);
326 head = head->next;
328 if (exc_target && curr == target)
329 continue;
330 if (!target_was_examined(curr))
331 continue;
332 if (curr->state != TARGET_RUNNING)
333 continue;
335 /* HACK: mark this target as prepared for halting */
336 curr->debug_reason = DBG_REASON_DBGRQ;
338 /* open the gate for channel 0 to let HALT requests pass to the CTM */
339 retval = arm_cti_ungate_channel(armv8->cti, 0);
340 if (retval == ERROR_OK)
341 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
342 if (retval != ERROR_OK)
343 break;
345 LOG_DEBUG("target %s prepared", target_name(curr));
347 if (first == NULL)
348 first = curr;
351 if (p_first) {
352 if (exc_target && first)
353 *p_first = first;
354 else
355 *p_first = target;
358 return retval;
361 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
363 int retval = ERROR_OK;
364 struct armv8_common *armv8 = target_to_armv8(target);
366 LOG_DEBUG("%s", target_name(target));
368 /* allow Halting Debug Mode */
369 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
370 if (retval != ERROR_OK)
371 return retval;
373 /* trigger an event on channel 0, this outputs a halt request to the PE */
374 retval = arm_cti_pulse_channel(armv8->cti, 0);
375 if (retval != ERROR_OK)
376 return retval;
378 if (mode == HALT_SYNC) {
379 retval = aarch64_wait_halt_one(target);
380 if (retval != ERROR_OK) {
381 if (retval == ERROR_TARGET_TIMEOUT)
382 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
383 return retval;
387 return ERROR_OK;
390 static int aarch64_halt_smp(struct target *target, bool exc_target)
392 struct target *next = target;
393 int retval;
395 /* prepare halt on all PEs of the group */
396 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
398 if (exc_target && next == target)
399 return retval;
401 /* halt the target PE */
402 if (retval == ERROR_OK)
403 retval = aarch64_halt_one(next, HALT_LAZY);
405 if (retval != ERROR_OK)
406 return retval;
408 /* wait for all PEs to halt */
409 int64_t then = timeval_ms();
410 for (;;) {
411 bool all_halted = true;
412 struct target_list *head;
413 struct target *curr;
415 foreach_smp_target(head, target->head) {
416 int halted;
418 curr = head->target;
420 if (!target_was_examined(curr))
421 continue;
423 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
424 if (retval != ERROR_OK || !halted) {
425 all_halted = false;
426 break;
430 if (all_halted)
431 break;
433 if (timeval_ms() > then + 1000) {
434 retval = ERROR_TARGET_TIMEOUT;
435 break;
439 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
440 * and it looks like the CTI's are not connected by a common
441 * trigger matrix. It seems that we need to halt one core in each
442 * cluster explicitly. So if we find that a core has not halted
443 * yet, we trigger an explicit halt for the second cluster.
445 retval = aarch64_halt_one(curr, HALT_LAZY);
446 if (retval != ERROR_OK)
447 break;
450 return retval;
453 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
455 struct target *gdb_target = NULL;
456 struct target_list *head;
457 struct target *curr;
459 if (debug_reason == DBG_REASON_NOTHALTED) {
460 LOG_DEBUG("Halting remaining targets in SMP group");
461 aarch64_halt_smp(target, true);
464 /* poll all targets in the group, but skip the target that serves GDB */
465 foreach_smp_target(head, target->head) {
466 curr = head->target;
467 /* skip calling context */
468 if (curr == target)
469 continue;
470 if (!target_was_examined(curr))
471 continue;
472 /* skip targets that were already halted */
473 if (curr->state == TARGET_HALTED)
474 continue;
475 /* remember the gdb_service->target */
476 if (curr->gdb_service != NULL)
477 gdb_target = curr->gdb_service->target;
478 /* skip it */
479 if (curr == gdb_target)
480 continue;
482 /* avoid recursion in aarch64_poll() */
483 curr->smp = 0;
484 aarch64_poll(curr);
485 curr->smp = 1;
488 /* after all targets were updated, poll the gdb serving target */
489 if (gdb_target != NULL && gdb_target != target)
490 aarch64_poll(gdb_target);
492 return ERROR_OK;
496 * Aarch64 Run control
499 static int aarch64_poll(struct target *target)
501 enum target_state prev_target_state;
502 int retval = ERROR_OK;
503 int halted;
505 retval = aarch64_check_state_one(target,
506 PRSR_HALT, PRSR_HALT, &halted, NULL);
507 if (retval != ERROR_OK)
508 return retval;
510 if (halted) {
511 prev_target_state = target->state;
512 if (prev_target_state != TARGET_HALTED) {
513 enum target_debug_reason debug_reason = target->debug_reason;
515 /* We have a halting debug event */
516 target->state = TARGET_HALTED;
517 LOG_DEBUG("Target %s halted", target_name(target));
518 retval = aarch64_debug_entry(target);
519 if (retval != ERROR_OK)
520 return retval;
522 if (target->smp)
523 update_halt_gdb(target, debug_reason);
525 switch (prev_target_state) {
526 case TARGET_RUNNING:
527 case TARGET_UNKNOWN:
528 case TARGET_RESET:
529 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
530 break;
531 case TARGET_DEBUG_RUNNING:
532 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
533 break;
534 default:
535 break;
538 } else
539 target->state = TARGET_RUNNING;
541 return retval;
544 static int aarch64_halt(struct target *target)
546 if (target->smp)
547 return aarch64_halt_smp(target, false);
549 return aarch64_halt_one(target, HALT_SYNC);
552 static int aarch64_restore_one(struct target *target, int current,
553 uint64_t *address, int handle_breakpoints, int debug_execution)
555 struct armv8_common *armv8 = target_to_armv8(target);
556 struct arm *arm = &armv8->arm;
557 int retval;
558 uint64_t resume_pc;
560 LOG_DEBUG("%s", target_name(target));
562 if (!debug_execution)
563 target_free_all_working_areas(target);
565 /* current = 1: continue on current pc, otherwise continue at <address> */
566 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
567 if (!current)
568 resume_pc = *address;
569 else
570 *address = resume_pc;
572 /* Make sure that the Armv7 gdb thumb fixups does not
573 * kill the return address
575 switch (arm->core_state) {
576 case ARM_STATE_ARM:
577 resume_pc &= 0xFFFFFFFC;
578 break;
579 case ARM_STATE_AARCH64:
580 resume_pc &= 0xFFFFFFFFFFFFFFFC;
581 break;
582 case ARM_STATE_THUMB:
583 case ARM_STATE_THUMB_EE:
584 /* When the return address is loaded into PC
585 * bit 0 must be 1 to stay in Thumb state
587 resume_pc |= 0x1;
588 break;
589 case ARM_STATE_JAZELLE:
590 LOG_ERROR("How do I resume into Jazelle state??");
591 return ERROR_FAIL;
593 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
594 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
595 arm->pc->dirty = 1;
596 arm->pc->valid = 1;
598 /* called it now before restoring context because it uses cpu
599 * register r0 for restoring system control register */
600 retval = aarch64_restore_system_control_reg(target);
601 if (retval == ERROR_OK)
602 retval = aarch64_restore_context(target, handle_breakpoints);
604 return retval;
608 * prepare single target for restart
612 static int aarch64_prepare_restart_one(struct target *target)
614 struct armv8_common *armv8 = target_to_armv8(target);
615 int retval;
616 uint32_t dscr;
617 uint32_t tmp;
619 LOG_DEBUG("%s", target_name(target));
621 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
622 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
623 if (retval != ERROR_OK)
624 return retval;
626 if ((dscr & DSCR_ITE) == 0)
627 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
628 if ((dscr & DSCR_ERR) != 0)
629 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
631 /* acknowledge a pending CTI halt event */
632 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
634 * open the CTI gate for channel 1 so that the restart events
635 * get passed along to all PEs. Also close gate for channel 0
636 * to isolate the PE from halt events.
638 if (retval == ERROR_OK)
639 retval = arm_cti_ungate_channel(armv8->cti, 1);
640 if (retval == ERROR_OK)
641 retval = arm_cti_gate_channel(armv8->cti, 0);
643 /* make sure that DSCR.HDE is set */
644 if (retval == ERROR_OK) {
645 dscr |= DSCR_HDE;
646 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
647 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
650 if (retval == ERROR_OK) {
651 /* clear sticky bits in PRSR, SDR is now 0 */
652 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
653 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
656 return retval;
659 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
661 struct armv8_common *armv8 = target_to_armv8(target);
662 int retval;
664 LOG_DEBUG("%s", target_name(target));
666 /* trigger an event on channel 1, generates a restart request to the PE */
667 retval = arm_cti_pulse_channel(armv8->cti, 1);
668 if (retval != ERROR_OK)
669 return retval;
671 if (mode == RESTART_SYNC) {
672 int64_t then = timeval_ms();
673 for (;;) {
674 int resumed;
676 * if PRSR.SDR is set now, the target did restart, even
677 * if it's now already halted again (e.g. due to breakpoint)
679 retval = aarch64_check_state_one(target,
680 PRSR_SDR, PRSR_SDR, &resumed, NULL);
681 if (retval != ERROR_OK || resumed)
682 break;
684 if (timeval_ms() > then + 1000) {
685 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
686 retval = ERROR_TARGET_TIMEOUT;
687 break;
692 if (retval != ERROR_OK)
693 return retval;
695 target->debug_reason = DBG_REASON_NOTHALTED;
696 target->state = TARGET_RUNNING;
698 return ERROR_OK;
701 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
703 int retval;
705 LOG_DEBUG("%s", target_name(target));
707 retval = aarch64_prepare_restart_one(target);
708 if (retval == ERROR_OK)
709 retval = aarch64_do_restart_one(target, mode);
711 return retval;
715 * prepare all but the current target for restart
717 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
719 int retval = ERROR_OK;
720 struct target_list *head;
721 struct target *first = NULL;
722 uint64_t address;
724 foreach_smp_target(head, target->head) {
725 struct target *curr = head->target;
727 /* skip calling target */
728 if (curr == target)
729 continue;
730 if (!target_was_examined(curr))
731 continue;
732 if (curr->state != TARGET_HALTED)
733 continue;
735 /* resume at current address, not in step mode */
736 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
737 if (retval == ERROR_OK)
738 retval = aarch64_prepare_restart_one(curr);
739 if (retval != ERROR_OK) {
740 LOG_ERROR("failed to restore target %s", target_name(curr));
741 break;
743 /* remember the first valid target in the group */
744 if (first == NULL)
745 first = curr;
748 if (p_first)
749 *p_first = first;
751 return retval;
755 static int aarch64_step_restart_smp(struct target *target)
757 int retval = ERROR_OK;
758 struct target_list *head;
759 struct target *first = NULL;
761 LOG_DEBUG("%s", target_name(target));
763 retval = aarch64_prep_restart_smp(target, 0, &first);
764 if (retval != ERROR_OK)
765 return retval;
767 if (first != NULL)
768 retval = aarch64_do_restart_one(first, RESTART_LAZY);
769 if (retval != ERROR_OK) {
770 LOG_DEBUG("error restarting target %s", target_name(first));
771 return retval;
774 int64_t then = timeval_ms();
775 for (;;) {
776 struct target *curr = target;
777 bool all_resumed = true;
779 foreach_smp_target(head, target->head) {
780 uint32_t prsr;
781 int resumed;
783 curr = head->target;
785 if (curr == target)
786 continue;
788 if (!target_was_examined(curr))
789 continue;
791 retval = aarch64_check_state_one(curr,
792 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
793 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
794 all_resumed = false;
795 break;
798 if (curr->state != TARGET_RUNNING) {
799 curr->state = TARGET_RUNNING;
800 curr->debug_reason = DBG_REASON_NOTHALTED;
801 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
805 if (all_resumed)
806 break;
808 if (timeval_ms() > then + 1000) {
809 LOG_ERROR("%s: timeout waiting for target resume", __func__);
810 retval = ERROR_TARGET_TIMEOUT;
811 break;
814 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
815 * and it looks like the CTI's are not connected by a common
816 * trigger matrix. It seems that we need to halt one core in each
817 * cluster explicitly. So if we find that a core has not halted
818 * yet, we trigger an explicit resume for the second cluster.
820 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
821 if (retval != ERROR_OK)
822 break;
825 return retval;
828 static int aarch64_resume(struct target *target, int current,
829 target_addr_t address, int handle_breakpoints, int debug_execution)
831 int retval = 0;
832 uint64_t addr = address;
834 if (target->state != TARGET_HALTED)
835 return ERROR_TARGET_NOT_HALTED;
838 * If this target is part of a SMP group, prepare the others
839 * targets for resuming. This involves restoring the complete
840 * target register context and setting up CTI gates to accept
841 * resume events from the trigger matrix.
843 if (target->smp) {
844 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
845 if (retval != ERROR_OK)
846 return retval;
849 /* all targets prepared, restore and restart the current target */
850 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
851 debug_execution);
852 if (retval == ERROR_OK)
853 retval = aarch64_restart_one(target, RESTART_SYNC);
854 if (retval != ERROR_OK)
855 return retval;
857 if (target->smp) {
858 int64_t then = timeval_ms();
859 for (;;) {
860 struct target *curr = target;
861 struct target_list *head;
862 bool all_resumed = true;
864 foreach_smp_target(head, target->head) {
865 uint32_t prsr;
866 int resumed;
868 curr = head->target;
869 if (curr == target)
870 continue;
871 if (!target_was_examined(curr))
872 continue;
874 retval = aarch64_check_state_one(curr,
875 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
876 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
877 all_resumed = false;
878 break;
881 if (curr->state != TARGET_RUNNING) {
882 curr->state = TARGET_RUNNING;
883 curr->debug_reason = DBG_REASON_NOTHALTED;
884 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
888 if (all_resumed)
889 break;
891 if (timeval_ms() > then + 1000) {
892 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
893 retval = ERROR_TARGET_TIMEOUT;
894 break;
898 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
899 * and it looks like the CTI's are not connected by a common
900 * trigger matrix. It seems that we need to halt one core in each
901 * cluster explicitly. So if we find that a core has not halted
902 * yet, we trigger an explicit resume for the second cluster.
904 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
905 if (retval != ERROR_OK)
906 break;
910 if (retval != ERROR_OK)
911 return retval;
913 target->debug_reason = DBG_REASON_NOTHALTED;
915 if (!debug_execution) {
916 target->state = TARGET_RUNNING;
917 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
918 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
919 } else {
920 target->state = TARGET_DEBUG_RUNNING;
921 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
922 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
925 return ERROR_OK;
928 static int aarch64_debug_entry(struct target *target)
930 int retval = ERROR_OK;
931 struct armv8_common *armv8 = target_to_armv8(target);
932 struct arm_dpm *dpm = &armv8->dpm;
933 enum arm_state core_state;
934 uint32_t dscr;
936 /* make sure to clear all sticky errors */
937 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
938 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
939 if (retval == ERROR_OK)
940 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
941 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
942 if (retval == ERROR_OK)
943 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
945 if (retval != ERROR_OK)
946 return retval;
948 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
950 dpm->dscr = dscr;
951 core_state = armv8_dpm_get_core_state(dpm);
952 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
953 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
955 /* close the CTI gate for all events */
956 if (retval == ERROR_OK)
957 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
958 /* discard async exceptions */
959 if (retval == ERROR_OK)
960 retval = dpm->instr_cpsr_sync(dpm);
961 if (retval != ERROR_OK)
962 return retval;
964 /* Examine debug reason */
965 armv8_dpm_report_dscr(dpm, dscr);
967 /* save address of instruction that triggered the watchpoint? */
968 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
969 uint32_t tmp;
970 uint64_t wfar = 0;
972 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
973 armv8->debug_base + CPUV8_DBG_WFAR1,
974 &tmp);
975 if (retval != ERROR_OK)
976 return retval;
977 wfar = tmp;
978 wfar = (wfar << 32);
979 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
980 armv8->debug_base + CPUV8_DBG_WFAR0,
981 &tmp);
982 if (retval != ERROR_OK)
983 return retval;
984 wfar |= tmp;
985 armv8_dpm_report_wfar(&armv8->dpm, wfar);
988 retval = armv8_dpm_read_current_registers(&armv8->dpm);
990 if (retval == ERROR_OK && armv8->post_debug_entry)
991 retval = armv8->post_debug_entry(target);
993 return retval;
996 static int aarch64_post_debug_entry(struct target *target)
998 struct aarch64_common *aarch64 = target_to_aarch64(target);
999 struct armv8_common *armv8 = &aarch64->armv8_common;
1000 int retval;
1001 enum arm_mode target_mode = ARM_MODE_ANY;
1002 uint32_t instr;
1004 switch (armv8->arm.core_mode) {
1005 case ARMV8_64_EL0T:
1006 target_mode = ARMV8_64_EL1H;
1007 /* fall through */
1008 case ARMV8_64_EL1T:
1009 case ARMV8_64_EL1H:
1010 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1011 break;
1012 case ARMV8_64_EL2T:
1013 case ARMV8_64_EL2H:
1014 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1015 break;
1016 case ARMV8_64_EL3H:
1017 case ARMV8_64_EL3T:
1018 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1019 break;
1021 case ARM_MODE_SVC:
1022 case ARM_MODE_ABT:
1023 case ARM_MODE_FIQ:
1024 case ARM_MODE_IRQ:
1025 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1026 break;
1028 default:
1029 LOG_INFO("cannot read system control register in this mode");
1030 return ERROR_FAIL;
1033 if (target_mode != ARM_MODE_ANY)
1034 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1036 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1037 if (retval != ERROR_OK)
1038 return retval;
1040 if (target_mode != ARM_MODE_ANY)
1041 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1043 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1044 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1046 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1047 armv8_identify_cache(armv8);
1048 armv8_read_mpidr(armv8);
1051 armv8->armv8_mmu.mmu_enabled =
1052 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1053 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1054 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1055 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1056 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1057 return ERROR_OK;
1061 * single-step a target
1063 static int aarch64_step(struct target *target, int current, target_addr_t address,
1064 int handle_breakpoints)
1066 struct armv8_common *armv8 = target_to_armv8(target);
1067 struct aarch64_common *aarch64 = target_to_aarch64(target);
1068 int saved_retval = ERROR_OK;
1069 int retval;
1070 uint32_t edecr;
1072 if (target->state != TARGET_HALTED) {
1073 LOG_WARNING("target not halted");
1074 return ERROR_TARGET_NOT_HALTED;
1077 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1078 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1079 /* make sure EDECR.SS is not set when restoring the register */
1081 if (retval == ERROR_OK) {
1082 edecr &= ~0x4;
1083 /* set EDECR.SS to enter hardware step mode */
1084 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1085 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1087 /* disable interrupts while stepping */
1088 if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1089 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1090 /* bail out if stepping setup has failed */
1091 if (retval != ERROR_OK)
1092 return retval;
1094 if (target->smp && (current == 1)) {
1096 * isolate current target so that it doesn't get resumed
1097 * together with the others
1099 retval = arm_cti_gate_channel(armv8->cti, 1);
1100 /* resume all other targets in the group */
1101 if (retval == ERROR_OK)
1102 retval = aarch64_step_restart_smp(target);
1103 if (retval != ERROR_OK) {
1104 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1105 return retval;
1107 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1110 /* all other targets running, restore and restart the current target */
1111 retval = aarch64_restore_one(target, current, &address, 0, 0);
1112 if (retval == ERROR_OK)
1113 retval = aarch64_restart_one(target, RESTART_LAZY);
1115 if (retval != ERROR_OK)
1116 return retval;
1118 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1119 if (!handle_breakpoints)
1120 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1122 int64_t then = timeval_ms();
1123 for (;;) {
1124 int stepped;
1125 uint32_t prsr;
1127 retval = aarch64_check_state_one(target,
1128 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1129 if (retval != ERROR_OK || stepped)
1130 break;
1132 if (timeval_ms() > then + 100) {
1133 LOG_ERROR("timeout waiting for target %s halt after step",
1134 target_name(target));
1135 retval = ERROR_TARGET_TIMEOUT;
1136 break;
1141 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1142 * causes a timeout. The core takes the step but doesn't complete it and so
1143 * debug state is never entered. However, you can manually halt the core
1144 * as an external debug even is also a WFI wakeup event.
1146 if (retval == ERROR_TARGET_TIMEOUT)
1147 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1149 /* restore EDECR */
1150 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1151 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1152 if (retval != ERROR_OK)
1153 return retval;
1155 /* restore interrupts */
1156 if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1157 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1158 if (retval != ERROR_OK)
1159 return ERROR_OK;
1162 if (saved_retval != ERROR_OK)
1163 return saved_retval;
1165 return aarch64_poll(target);
1168 static int aarch64_restore_context(struct target *target, bool bpwp)
1170 struct armv8_common *armv8 = target_to_armv8(target);
1171 struct arm *arm = &armv8->arm;
1173 int retval;
1175 LOG_DEBUG("%s", target_name(target));
1177 if (armv8->pre_restore_context)
1178 armv8->pre_restore_context(target);
1180 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1181 if (retval == ERROR_OK) {
1182 /* registers are now invalid */
1183 register_cache_invalidate(arm->core_cache);
1184 register_cache_invalidate(arm->core_cache->next);
1187 return retval;
1191 * Cortex-A8 Breakpoint and watchpoint functions
1194 /* Setup hardware Breakpoint Register Pair */
1195 static int aarch64_set_breakpoint(struct target *target,
1196 struct breakpoint *breakpoint, uint8_t matchmode)
1198 int retval;
1199 int brp_i = 0;
1200 uint32_t control;
1201 uint8_t byte_addr_select = 0x0F;
1202 struct aarch64_common *aarch64 = target_to_aarch64(target);
1203 struct armv8_common *armv8 = &aarch64->armv8_common;
1204 struct aarch64_brp *brp_list = aarch64->brp_list;
1206 if (breakpoint->set) {
1207 LOG_WARNING("breakpoint already set");
1208 return ERROR_OK;
1211 if (breakpoint->type == BKPT_HARD) {
1212 int64_t bpt_value;
1213 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1214 brp_i++;
1215 if (brp_i >= aarch64->brp_num) {
1216 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1217 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1219 breakpoint->set = brp_i + 1;
1220 if (breakpoint->length == 2)
1221 byte_addr_select = (3 << (breakpoint->address & 0x02));
1222 control = ((matchmode & 0x7) << 20)
1223 | (1 << 13)
1224 | (byte_addr_select << 5)
1225 | (3 << 1) | 1;
1226 brp_list[brp_i].used = 1;
1227 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1228 brp_list[brp_i].control = control;
1229 bpt_value = brp_list[brp_i].value;
1231 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1232 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1233 (uint32_t)(bpt_value & 0xFFFFFFFF));
1234 if (retval != ERROR_OK)
1235 return retval;
1236 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1237 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1238 (uint32_t)(bpt_value >> 32));
1239 if (retval != ERROR_OK)
1240 return retval;
1242 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1243 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1244 brp_list[brp_i].control);
1245 if (retval != ERROR_OK)
1246 return retval;
1247 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1248 brp_list[brp_i].control,
1249 brp_list[brp_i].value);
1251 } else if (breakpoint->type == BKPT_SOFT) {
1252 uint8_t code[4];
1254 buf_set_u32(code, 0, 32, armv8_opcode(armv8, ARMV8_OPC_HLT));
1255 retval = target_read_memory(target,
1256 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1257 breakpoint->length, 1,
1258 breakpoint->orig_instr);
1259 if (retval != ERROR_OK)
1260 return retval;
1262 armv8_cache_d_inner_flush_virt(armv8,
1263 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1264 breakpoint->length);
1266 retval = target_write_memory(target,
1267 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1268 breakpoint->length, 1, code);
1269 if (retval != ERROR_OK)
1270 return retval;
1272 armv8_cache_d_inner_flush_virt(armv8,
1273 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1274 breakpoint->length);
1276 armv8_cache_i_inner_inval_virt(armv8,
1277 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1278 breakpoint->length);
1280 breakpoint->set = 0x11; /* Any nice value but 0 */
1283 /* Ensure that halting debug mode is enable */
1284 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1285 if (retval != ERROR_OK) {
1286 LOG_DEBUG("Failed to set DSCR.HDE");
1287 return retval;
1290 return ERROR_OK;
1293 static int aarch64_set_context_breakpoint(struct target *target,
1294 struct breakpoint *breakpoint, uint8_t matchmode)
1296 int retval = ERROR_FAIL;
1297 int brp_i = 0;
1298 uint32_t control;
1299 uint8_t byte_addr_select = 0x0F;
1300 struct aarch64_common *aarch64 = target_to_aarch64(target);
1301 struct armv8_common *armv8 = &aarch64->armv8_common;
1302 struct aarch64_brp *brp_list = aarch64->brp_list;
1304 if (breakpoint->set) {
1305 LOG_WARNING("breakpoint already set");
1306 return retval;
1308 /*check available context BRPs*/
1309 while ((brp_list[brp_i].used ||
1310 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1311 brp_i++;
1313 if (brp_i >= aarch64->brp_num) {
1314 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1315 return ERROR_FAIL;
1318 breakpoint->set = brp_i + 1;
1319 control = ((matchmode & 0x7) << 20)
1320 | (1 << 13)
1321 | (byte_addr_select << 5)
1322 | (3 << 1) | 1;
1323 brp_list[brp_i].used = 1;
1324 brp_list[brp_i].value = (breakpoint->asid);
1325 brp_list[brp_i].control = control;
1326 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1327 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1328 brp_list[brp_i].value);
1329 if (retval != ERROR_OK)
1330 return retval;
1331 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1332 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1333 brp_list[brp_i].control);
1334 if (retval != ERROR_OK)
1335 return retval;
1336 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1337 brp_list[brp_i].control,
1338 brp_list[brp_i].value);
1339 return ERROR_OK;
1343 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1345 int retval = ERROR_FAIL;
1346 int brp_1 = 0; /* holds the contextID pair */
1347 int brp_2 = 0; /* holds the IVA pair */
1348 uint32_t control_CTX, control_IVA;
1349 uint8_t CTX_byte_addr_select = 0x0F;
1350 uint8_t IVA_byte_addr_select = 0x0F;
1351 uint8_t CTX_machmode = 0x03;
1352 uint8_t IVA_machmode = 0x01;
1353 struct aarch64_common *aarch64 = target_to_aarch64(target);
1354 struct armv8_common *armv8 = &aarch64->armv8_common;
1355 struct aarch64_brp *brp_list = aarch64->brp_list;
1357 if (breakpoint->set) {
1358 LOG_WARNING("breakpoint already set");
1359 return retval;
1361 /*check available context BRPs*/
1362 while ((brp_list[brp_1].used ||
1363 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1364 brp_1++;
1366 printf("brp(CTX) found num: %d\n", brp_1);
1367 if (brp_1 >= aarch64->brp_num) {
1368 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1369 return ERROR_FAIL;
1372 while ((brp_list[brp_2].used ||
1373 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1374 brp_2++;
1376 printf("brp(IVA) found num: %d\n", brp_2);
1377 if (brp_2 >= aarch64->brp_num) {
1378 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1379 return ERROR_FAIL;
1382 breakpoint->set = brp_1 + 1;
1383 breakpoint->linked_BRP = brp_2;
1384 control_CTX = ((CTX_machmode & 0x7) << 20)
1385 | (brp_2 << 16)
1386 | (0 << 14)
1387 | (CTX_byte_addr_select << 5)
1388 | (3 << 1) | 1;
1389 brp_list[brp_1].used = 1;
1390 brp_list[brp_1].value = (breakpoint->asid);
1391 brp_list[brp_1].control = control_CTX;
1392 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1393 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1394 brp_list[brp_1].value);
1395 if (retval != ERROR_OK)
1396 return retval;
1397 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1398 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1399 brp_list[brp_1].control);
1400 if (retval != ERROR_OK)
1401 return retval;
1403 control_IVA = ((IVA_machmode & 0x7) << 20)
1404 | (brp_1 << 16)
1405 | (1 << 13)
1406 | (IVA_byte_addr_select << 5)
1407 | (3 << 1) | 1;
1408 brp_list[brp_2].used = 1;
1409 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1410 brp_list[brp_2].control = control_IVA;
1411 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1412 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1413 brp_list[brp_2].value & 0xFFFFFFFF);
1414 if (retval != ERROR_OK)
1415 return retval;
1416 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1417 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1418 brp_list[brp_2].value >> 32);
1419 if (retval != ERROR_OK)
1420 return retval;
1421 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1422 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1423 brp_list[brp_2].control);
1424 if (retval != ERROR_OK)
1425 return retval;
1427 return ERROR_OK;
1430 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1432 int retval;
1433 struct aarch64_common *aarch64 = target_to_aarch64(target);
1434 struct armv8_common *armv8 = &aarch64->armv8_common;
1435 struct aarch64_brp *brp_list = aarch64->brp_list;
1437 if (!breakpoint->set) {
1438 LOG_WARNING("breakpoint not set");
1439 return ERROR_OK;
1442 if (breakpoint->type == BKPT_HARD) {
1443 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1444 int brp_i = breakpoint->set - 1;
1445 int brp_j = breakpoint->linked_BRP;
1446 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1447 LOG_DEBUG("Invalid BRP number in breakpoint");
1448 return ERROR_OK;
1450 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1451 brp_list[brp_i].control, brp_list[brp_i].value);
1452 brp_list[brp_i].used = 0;
1453 brp_list[brp_i].value = 0;
1454 brp_list[brp_i].control = 0;
1455 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1456 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1457 brp_list[brp_i].control);
1458 if (retval != ERROR_OK)
1459 return retval;
1460 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1461 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1462 (uint32_t)brp_list[brp_i].value);
1463 if (retval != ERROR_OK)
1464 return retval;
1465 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1466 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1467 (uint32_t)brp_list[brp_i].value);
1468 if (retval != ERROR_OK)
1469 return retval;
1470 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1471 LOG_DEBUG("Invalid BRP number in breakpoint");
1472 return ERROR_OK;
1474 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1475 brp_list[brp_j].control, brp_list[brp_j].value);
1476 brp_list[brp_j].used = 0;
1477 brp_list[brp_j].value = 0;
1478 brp_list[brp_j].control = 0;
1479 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1480 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1481 brp_list[brp_j].control);
1482 if (retval != ERROR_OK)
1483 return retval;
1484 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1485 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1486 (uint32_t)brp_list[brp_j].value);
1487 if (retval != ERROR_OK)
1488 return retval;
1489 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1490 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1491 (uint32_t)brp_list[brp_j].value);
1492 if (retval != ERROR_OK)
1493 return retval;
1495 breakpoint->linked_BRP = 0;
1496 breakpoint->set = 0;
1497 return ERROR_OK;
1499 } else {
1500 int brp_i = breakpoint->set - 1;
1501 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1502 LOG_DEBUG("Invalid BRP number in breakpoint");
1503 return ERROR_OK;
1505 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1506 brp_list[brp_i].control, brp_list[brp_i].value);
1507 brp_list[brp_i].used = 0;
1508 brp_list[brp_i].value = 0;
1509 brp_list[brp_i].control = 0;
1510 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1511 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1512 brp_list[brp_i].control);
1513 if (retval != ERROR_OK)
1514 return retval;
1515 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1516 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1517 brp_list[brp_i].value);
1518 if (retval != ERROR_OK)
1519 return retval;
1521 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1522 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1523 (uint32_t)brp_list[brp_i].value);
1524 if (retval != ERROR_OK)
1525 return retval;
1526 breakpoint->set = 0;
1527 return ERROR_OK;
1529 } else {
1530 /* restore original instruction (kept in target endianness) */
1532 armv8_cache_d_inner_flush_virt(armv8,
1533 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1534 breakpoint->length);
1536 if (breakpoint->length == 4) {
1537 retval = target_write_memory(target,
1538 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1539 4, 1, breakpoint->orig_instr);
1540 if (retval != ERROR_OK)
1541 return retval;
1542 } else {
1543 retval = target_write_memory(target,
1544 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1545 2, 1, breakpoint->orig_instr);
1546 if (retval != ERROR_OK)
1547 return retval;
1550 armv8_cache_d_inner_flush_virt(armv8,
1551 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1552 breakpoint->length);
1554 armv8_cache_i_inner_inval_virt(armv8,
1555 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1556 breakpoint->length);
1558 breakpoint->set = 0;
1560 return ERROR_OK;
1563 static int aarch64_add_breakpoint(struct target *target,
1564 struct breakpoint *breakpoint)
1566 struct aarch64_common *aarch64 = target_to_aarch64(target);
1568 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1569 LOG_INFO("no hardware breakpoint available");
1570 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1573 if (breakpoint->type == BKPT_HARD)
1574 aarch64->brp_num_available--;
1576 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1579 static int aarch64_add_context_breakpoint(struct target *target,
1580 struct breakpoint *breakpoint)
1582 struct aarch64_common *aarch64 = target_to_aarch64(target);
1584 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1585 LOG_INFO("no hardware breakpoint available");
1586 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1589 if (breakpoint->type == BKPT_HARD)
1590 aarch64->brp_num_available--;
1592 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1595 static int aarch64_add_hybrid_breakpoint(struct target *target,
1596 struct breakpoint *breakpoint)
1598 struct aarch64_common *aarch64 = target_to_aarch64(target);
1600 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1601 LOG_INFO("no hardware breakpoint available");
1602 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1605 if (breakpoint->type == BKPT_HARD)
1606 aarch64->brp_num_available--;
1608 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1612 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1614 struct aarch64_common *aarch64 = target_to_aarch64(target);
1616 #if 0
1617 /* It is perfectly possible to remove breakpoints while the target is running */
1618 if (target->state != TARGET_HALTED) {
1619 LOG_WARNING("target not halted");
1620 return ERROR_TARGET_NOT_HALTED;
1622 #endif
1624 if (breakpoint->set) {
1625 aarch64_unset_breakpoint(target, breakpoint);
1626 if (breakpoint->type == BKPT_HARD)
1627 aarch64->brp_num_available++;
1630 return ERROR_OK;
1634 * Cortex-A8 Reset functions
1637 static int aarch64_assert_reset(struct target *target)
1639 struct armv8_common *armv8 = target_to_armv8(target);
1641 LOG_DEBUG(" ");
1643 /* FIXME when halt is requested, make it work somehow... */
1645 /* Issue some kind of warm reset. */
1646 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1647 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1648 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1649 /* REVISIT handle "pulls" cases, if there's
1650 * hardware that needs them to work.
1652 jtag_add_reset(0, 1);
1653 } else {
1654 LOG_ERROR("%s: how to reset?", target_name(target));
1655 return ERROR_FAIL;
1658 /* registers are now invalid */
1659 if (target_was_examined(target)) {
1660 register_cache_invalidate(armv8->arm.core_cache);
1661 register_cache_invalidate(armv8->arm.core_cache->next);
1664 target->state = TARGET_RESET;
1666 return ERROR_OK;
1669 static int aarch64_deassert_reset(struct target *target)
1671 int retval;
1673 LOG_DEBUG(" ");
1675 /* be certain SRST is off */
1676 jtag_add_reset(0, 0);
1678 if (!target_was_examined(target))
1679 return ERROR_OK;
1681 retval = aarch64_poll(target);
1682 if (retval != ERROR_OK)
1683 return retval;
1685 if (target->reset_halt) {
1686 if (target->state != TARGET_HALTED) {
1687 LOG_WARNING("%s: ran after reset and before halt ...",
1688 target_name(target));
1689 retval = target_halt(target);
1690 if (retval != ERROR_OK)
1691 return retval;
1695 return aarch64_init_debug_access(target);
1698 static int aarch64_write_cpu_memory_slow(struct target *target,
1699 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1701 struct armv8_common *armv8 = target_to_armv8(target);
1702 struct arm_dpm *dpm = &armv8->dpm;
1703 struct arm *arm = &armv8->arm;
1704 int retval;
1706 armv8_reg_current(arm, 1)->dirty = true;
1708 /* change DCC to normal mode if necessary */
1709 if (*dscr & DSCR_MA) {
1710 *dscr &= ~DSCR_MA;
1711 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1712 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1713 if (retval != ERROR_OK)
1714 return retval;
1717 while (count) {
1718 uint32_t data, opcode;
1720 /* write the data to store into DTRRX */
1721 if (size == 1)
1722 data = *buffer;
1723 else if (size == 2)
1724 data = target_buffer_get_u16(target, buffer);
1725 else
1726 data = target_buffer_get_u32(target, buffer);
1727 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1728 armv8->debug_base + CPUV8_DBG_DTRRX, data);
1729 if (retval != ERROR_OK)
1730 return retval;
1732 if (arm->core_state == ARM_STATE_AARCH64)
1733 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
1734 else
1735 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1736 if (retval != ERROR_OK)
1737 return retval;
1739 if (size == 1)
1740 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
1741 else if (size == 2)
1742 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
1743 else
1744 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
1745 retval = dpm->instr_execute(dpm, opcode);
1746 if (retval != ERROR_OK)
1747 return retval;
1749 /* Advance */
1750 buffer += size;
1751 --count;
1754 return ERROR_OK;
1757 static int aarch64_write_cpu_memory_fast(struct target *target,
1758 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1760 struct armv8_common *armv8 = target_to_armv8(target);
1761 struct arm *arm = &armv8->arm;
1762 int retval;
1764 armv8_reg_current(arm, 1)->dirty = true;
1766 /* Step 1.d - Change DCC to memory mode */
1767 *dscr |= DSCR_MA;
1768 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1769 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1770 if (retval != ERROR_OK)
1771 return retval;
1774 /* Step 2.a - Do the write */
1775 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1776 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
1777 if (retval != ERROR_OK)
1778 return retval;
1780 /* Step 3.a - Switch DTR mode back to Normal mode */
1781 *dscr &= ~DSCR_MA;
1782 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1783 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1784 if (retval != ERROR_OK)
1785 return retval;
1787 return ERROR_OK;
1790 static int aarch64_write_cpu_memory(struct target *target,
1791 uint64_t address, uint32_t size,
1792 uint32_t count, const uint8_t *buffer)
1794 /* write memory through APB-AP */
1795 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1796 struct armv8_common *armv8 = target_to_armv8(target);
1797 struct arm_dpm *dpm = &armv8->dpm;
1798 struct arm *arm = &armv8->arm;
1799 uint32_t dscr;
1801 if (target->state != TARGET_HALTED) {
1802 LOG_WARNING("target not halted");
1803 return ERROR_TARGET_NOT_HALTED;
1806 /* Mark register X0 as dirty, as it will be used
1807 * for transferring the data.
1808 * It will be restored automatically when exiting
1809 * debug mode
1811 armv8_reg_current(arm, 0)->dirty = true;
1813 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1815 /* Read DSCR */
1816 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1817 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1818 if (retval != ERROR_OK)
1819 return retval;
1821 /* Set Normal access mode */
1822 dscr = (dscr & ~DSCR_MA);
1823 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1824 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1825 if (retval != ERROR_OK)
1826 return retval;
1828 if (arm->core_state == ARM_STATE_AARCH64) {
1829 /* Write X0 with value 'address' using write procedure */
1830 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1831 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1832 retval = dpm->instr_write_data_dcc_64(dpm,
1833 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
1834 } else {
1835 /* Write R0 with value 'address' using write procedure */
1836 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1837 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1838 retval = dpm->instr_write_data_dcc(dpm,
1839 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
1842 if (retval != ERROR_OK)
1843 return retval;
1845 if (size == 4 && (address % 4) == 0)
1846 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
1847 else
1848 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
1850 if (retval != ERROR_OK) {
1851 /* Unset DTR mode */
1852 mem_ap_read_atomic_u32(armv8->debug_ap,
1853 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1854 dscr &= ~DSCR_MA;
1855 mem_ap_write_atomic_u32(armv8->debug_ap,
1856 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1859 /* Check for sticky abort flags in the DSCR */
1860 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1861 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1862 if (retval != ERROR_OK)
1863 return retval;
1865 dpm->dscr = dscr;
1866 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1867 /* Abort occurred - clear it and exit */
1868 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1869 armv8_dpm_handle_exception(dpm, true);
1870 return ERROR_FAIL;
1873 /* Done */
1874 return ERROR_OK;
1877 static int aarch64_read_cpu_memory_slow(struct target *target,
1878 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
1880 struct armv8_common *armv8 = target_to_armv8(target);
1881 struct arm_dpm *dpm = &armv8->dpm;
1882 struct arm *arm = &armv8->arm;
1883 int retval;
1885 armv8_reg_current(arm, 1)->dirty = true;
1887 /* change DCC to normal mode (if necessary) */
1888 if (*dscr & DSCR_MA) {
1889 *dscr &= DSCR_MA;
1890 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1891 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1892 if (retval != ERROR_OK)
1893 return retval;
1896 while (count) {
1897 uint32_t opcode, data;
1899 if (size == 1)
1900 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
1901 else if (size == 2)
1902 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
1903 else
1904 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
1905 retval = dpm->instr_execute(dpm, opcode);
1906 if (retval != ERROR_OK)
1907 return retval;
1909 if (arm->core_state == ARM_STATE_AARCH64)
1910 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
1911 else
1912 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1913 if (retval != ERROR_OK)
1914 return retval;
1916 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1917 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
1918 if (retval != ERROR_OK)
1919 return retval;
1921 if (size == 1)
1922 *buffer = (uint8_t)data;
1923 else if (size == 2)
1924 target_buffer_set_u16(target, buffer, (uint16_t)data);
1925 else
1926 target_buffer_set_u32(target, buffer, data);
1928 /* Advance */
1929 buffer += size;
1930 --count;
1933 return ERROR_OK;
1936 static int aarch64_read_cpu_memory_fast(struct target *target,
1937 uint32_t count, uint8_t *buffer, uint32_t *dscr)
1939 struct armv8_common *armv8 = target_to_armv8(target);
1940 struct arm_dpm *dpm = &armv8->dpm;
1941 struct arm *arm = &armv8->arm;
1942 int retval;
1943 uint32_t value;
1945 /* Mark X1 as dirty */
1946 armv8_reg_current(arm, 1)->dirty = true;
1948 if (arm->core_state == ARM_STATE_AARCH64) {
1949 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1950 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1951 } else {
1952 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1953 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1956 if (retval != ERROR_OK)
1957 return retval;
1959 /* Step 1.e - Change DCC to memory mode */
1960 *dscr |= DSCR_MA;
1961 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1962 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1963 if (retval != ERROR_OK)
1964 return retval;
1966 /* Step 1.f - read DBGDTRTX and discard the value */
1967 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1968 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1969 if (retval != ERROR_OK)
1970 return retval;
1972 count--;
1973 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1974 * Abort flags are sticky, so can be read at end of transactions
1976 * This data is read in aligned to 32 bit boundary.
1979 if (count) {
1980 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1981 * increments X0 by 4. */
1982 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
1983 armv8->debug_base + CPUV8_DBG_DTRTX);
1984 if (retval != ERROR_OK)
1985 return retval;
1988 /* Step 3.a - set DTR access mode back to Normal mode */
1989 *dscr &= ~DSCR_MA;
1990 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1991 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1992 if (retval != ERROR_OK)
1993 return retval;
1995 /* Step 3.b - read DBGDTRTX for the final value */
1996 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1997 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1998 if (retval != ERROR_OK)
1999 return retval;
2001 target_buffer_set_u32(target, buffer + count * 4, value);
2002 return retval;
2005 static int aarch64_read_cpu_memory(struct target *target,
2006 target_addr_t address, uint32_t size,
2007 uint32_t count, uint8_t *buffer)
2009 /* read memory through APB-AP */
2010 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2011 struct armv8_common *armv8 = target_to_armv8(target);
2012 struct arm_dpm *dpm = &armv8->dpm;
2013 struct arm *arm = &armv8->arm;
2014 uint32_t dscr;
2016 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2017 address, size, count);
2019 if (target->state != TARGET_HALTED) {
2020 LOG_WARNING("target not halted");
2021 return ERROR_TARGET_NOT_HALTED;
2024 /* Mark register X0 as dirty, as it will be used
2025 * for transferring the data.
2026 * It will be restored automatically when exiting
2027 * debug mode
2029 armv8_reg_current(arm, 0)->dirty = true;
2031 /* Read DSCR */
2032 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2033 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2034 if (retval != ERROR_OK)
2035 return retval;
2037 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2039 /* Set Normal access mode */
2040 dscr &= ~DSCR_MA;
2041 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2042 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2043 if (retval != ERROR_OK)
2044 return retval;
2046 if (arm->core_state == ARM_STATE_AARCH64) {
2047 /* Write X0 with value 'address' using write procedure */
2048 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2049 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2050 retval = dpm->instr_write_data_dcc_64(dpm,
2051 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2052 } else {
2053 /* Write R0 with value 'address' using write procedure */
2054 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2055 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2056 retval = dpm->instr_write_data_dcc(dpm,
2057 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2060 if (retval != ERROR_OK)
2061 return retval;
2063 if (size == 4 && (address % 4) == 0)
2064 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2065 else
2066 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2068 if (dscr & DSCR_MA) {
2069 dscr &= ~DSCR_MA;
2070 mem_ap_write_atomic_u32(armv8->debug_ap,
2071 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2074 if (retval != ERROR_OK)
2075 return retval;
2077 /* Check for sticky abort flags in the DSCR */
2078 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2079 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2080 if (retval != ERROR_OK)
2081 return retval;
2083 dpm->dscr = dscr;
2085 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2086 /* Abort occurred - clear it and exit */
2087 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2088 armv8_dpm_handle_exception(dpm, true);
2089 return ERROR_FAIL;
2092 /* Done */
2093 return ERROR_OK;
2096 static int aarch64_read_phys_memory(struct target *target,
2097 target_addr_t address, uint32_t size,
2098 uint32_t count, uint8_t *buffer)
2100 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2102 if (count && buffer) {
2103 /* read memory through APB-AP */
2104 retval = aarch64_mmu_modify(target, 0);
2105 if (retval != ERROR_OK)
2106 return retval;
2107 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2109 return retval;
2112 static int aarch64_read_memory(struct target *target, target_addr_t address,
2113 uint32_t size, uint32_t count, uint8_t *buffer)
2115 int mmu_enabled = 0;
2116 int retval;
2118 /* determine if MMU was enabled on target stop */
2119 retval = aarch64_mmu(target, &mmu_enabled);
2120 if (retval != ERROR_OK)
2121 return retval;
2123 if (mmu_enabled) {
2124 /* enable MMU as we could have disabled it for phys access */
2125 retval = aarch64_mmu_modify(target, 1);
2126 if (retval != ERROR_OK)
2127 return retval;
2129 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2132 static int aarch64_write_phys_memory(struct target *target,
2133 target_addr_t address, uint32_t size,
2134 uint32_t count, const uint8_t *buffer)
2136 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2138 if (count && buffer) {
2139 /* write memory through APB-AP */
2140 retval = aarch64_mmu_modify(target, 0);
2141 if (retval != ERROR_OK)
2142 return retval;
2143 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2146 return retval;
2149 static int aarch64_write_memory(struct target *target, target_addr_t address,
2150 uint32_t size, uint32_t count, const uint8_t *buffer)
2152 int mmu_enabled = 0;
2153 int retval;
2155 /* determine if MMU was enabled on target stop */
2156 retval = aarch64_mmu(target, &mmu_enabled);
2157 if (retval != ERROR_OK)
2158 return retval;
2160 if (mmu_enabled) {
2161 /* enable MMU as we could have disabled it for phys access */
2162 retval = aarch64_mmu_modify(target, 1);
2163 if (retval != ERROR_OK)
2164 return retval;
2166 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2169 static int aarch64_handle_target_request(void *priv)
2171 struct target *target = priv;
2172 struct armv8_common *armv8 = target_to_armv8(target);
2173 int retval;
2175 if (!target_was_examined(target))
2176 return ERROR_OK;
2177 if (!target->dbg_msg_enabled)
2178 return ERROR_OK;
2180 if (target->state == TARGET_RUNNING) {
2181 uint32_t request;
2182 uint32_t dscr;
2183 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2184 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2186 /* check if we have data */
2187 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2188 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2189 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2190 if (retval == ERROR_OK) {
2191 target_request(target, request);
2192 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2193 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2198 return ERROR_OK;
2201 static int aarch64_examine_first(struct target *target)
2203 struct aarch64_common *aarch64 = target_to_aarch64(target);
2204 struct armv8_common *armv8 = &aarch64->armv8_common;
2205 struct adiv5_dap *swjdp = armv8->arm.dap;
2206 struct aarch64_private_config *pc;
2207 int i;
2208 int retval = ERROR_OK;
2209 uint64_t debug, ttypr;
2210 uint32_t cpuid;
2211 uint32_t tmp0, tmp1, tmp2, tmp3;
2212 debug = ttypr = cpuid = 0;
2214 /* Search for the APB-AB - it is needed for access to debug registers */
2215 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2216 if (retval != ERROR_OK) {
2217 LOG_ERROR("Could not find APB-AP for debug access");
2218 return retval;
2221 retval = mem_ap_init(armv8->debug_ap);
2222 if (retval != ERROR_OK) {
2223 LOG_ERROR("Could not initialize the APB-AP");
2224 return retval;
2227 armv8->debug_ap->memaccess_tck = 10;
2229 if (!target->dbgbase_set) {
2230 uint32_t dbgbase;
2231 /* Get ROM Table base */
2232 uint32_t apid;
2233 int32_t coreidx = target->coreid;
2234 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2235 if (retval != ERROR_OK)
2236 return retval;
2237 /* Lookup 0x15 -- Processor DAP */
2238 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2239 &armv8->debug_base, &coreidx);
2240 if (retval != ERROR_OK)
2241 return retval;
2242 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2243 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2244 } else
2245 armv8->debug_base = target->dbgbase;
2247 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2248 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2249 if (retval != ERROR_OK) {
2250 LOG_DEBUG("Examine %s failed", "oslock");
2251 return retval;
2254 retval = mem_ap_read_u32(armv8->debug_ap,
2255 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2256 if (retval != ERROR_OK) {
2257 LOG_DEBUG("Examine %s failed", "CPUID");
2258 return retval;
2261 retval = mem_ap_read_u32(armv8->debug_ap,
2262 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2263 retval += mem_ap_read_u32(armv8->debug_ap,
2264 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2265 if (retval != ERROR_OK) {
2266 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2267 return retval;
2269 retval = mem_ap_read_u32(armv8->debug_ap,
2270 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2271 retval += mem_ap_read_u32(armv8->debug_ap,
2272 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2273 if (retval != ERROR_OK) {
2274 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2275 return retval;
2278 retval = dap_run(armv8->debug_ap->dap);
2279 if (retval != ERROR_OK) {
2280 LOG_ERROR("%s: examination failed\n", target_name(target));
2281 return retval;
2284 ttypr |= tmp1;
2285 ttypr = (ttypr << 32) | tmp0;
2286 debug |= tmp3;
2287 debug = (debug << 32) | tmp2;
2289 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2290 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2291 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2293 if (target->private_config == NULL)
2294 return ERROR_FAIL;
2296 pc = (struct aarch64_private_config *)target->private_config;
2297 if (pc->cti == NULL)
2298 return ERROR_FAIL;
2300 armv8->cti = pc->cti;
2302 retval = aarch64_dpm_setup(aarch64, debug);
2303 if (retval != ERROR_OK)
2304 return retval;
2306 /* Setup Breakpoint Register Pairs */
2307 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2308 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2309 aarch64->brp_num_available = aarch64->brp_num;
2310 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2311 for (i = 0; i < aarch64->brp_num; i++) {
2312 aarch64->brp_list[i].used = 0;
2313 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2314 aarch64->brp_list[i].type = BRP_NORMAL;
2315 else
2316 aarch64->brp_list[i].type = BRP_CONTEXT;
2317 aarch64->brp_list[i].value = 0;
2318 aarch64->brp_list[i].control = 0;
2319 aarch64->brp_list[i].BRPn = i;
2322 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2324 target->state = TARGET_UNKNOWN;
2325 target->debug_reason = DBG_REASON_NOTHALTED;
2326 aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2327 target_set_examined(target);
2328 return ERROR_OK;
2331 static int aarch64_examine(struct target *target)
2333 int retval = ERROR_OK;
2335 /* don't re-probe hardware after each reset */
2336 if (!target_was_examined(target))
2337 retval = aarch64_examine_first(target);
2339 /* Configure core debug access */
2340 if (retval == ERROR_OK)
2341 retval = aarch64_init_debug_access(target);
2343 return retval;
2347 * Cortex-A8 target creation and initialization
2350 static int aarch64_init_target(struct command_context *cmd_ctx,
2351 struct target *target)
2353 /* examine_first() does a bunch of this */
2354 return ERROR_OK;
2357 static int aarch64_init_arch_info(struct target *target,
2358 struct aarch64_common *aarch64, struct adiv5_dap *dap)
2360 struct armv8_common *armv8 = &aarch64->armv8_common;
2362 /* Setup struct aarch64_common */
2363 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2364 armv8->arm.dap = dap;
2366 /* register arch-specific functions */
2367 armv8->examine_debug_reason = NULL;
2368 armv8->post_debug_entry = aarch64_post_debug_entry;
2369 armv8->pre_restore_context = NULL;
2370 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2372 armv8_init_arch_info(target, armv8);
2373 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2375 return ERROR_OK;
2378 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2380 struct aarch64_private_config *pc = target->private_config;
2381 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2383 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2384 return ERROR_FAIL;
2386 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2389 static void aarch64_deinit_target(struct target *target)
2391 struct aarch64_common *aarch64 = target_to_aarch64(target);
2392 struct armv8_common *armv8 = &aarch64->armv8_common;
2393 struct arm_dpm *dpm = &armv8->dpm;
2395 armv8_free_reg_cache(target);
2396 free(aarch64->brp_list);
2397 free(dpm->dbp);
2398 free(dpm->dwp);
2399 free(target->private_config);
2400 free(aarch64);
2403 static int aarch64_mmu(struct target *target, int *enabled)
2405 if (target->state != TARGET_HALTED) {
2406 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2407 return ERROR_TARGET_INVALID;
2410 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2411 return ERROR_OK;
2414 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2415 target_addr_t *phys)
2417 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2421 * private target configuration items
2423 enum aarch64_cfg_param {
2424 CFG_CTI,
2427 static const Jim_Nvp nvp_config_opts[] = {
2428 { .name = "-cti", .value = CFG_CTI },
2429 { .name = NULL, .value = -1 }
2432 static int aarch64_jim_configure(struct target *target, Jim_GetOptInfo *goi)
2434 struct aarch64_private_config *pc;
2435 Jim_Nvp *n;
2436 int e;
2438 pc = (struct aarch64_private_config *)target->private_config;
2439 if (pc == NULL) {
2440 pc = calloc(1, sizeof(struct aarch64_private_config));
2441 target->private_config = pc;
2445 * Call adiv5_jim_configure() to parse the common DAP options
2446 * It will return JIM_CONTINUE if it didn't find any known
2447 * options, JIM_OK if it correctly parsed the topmost option
2448 * and JIM_ERR if an error occured during parameter evaluation.
2449 * For JIM_CONTINUE, we check our own params.
2451 e = adiv5_jim_configure(target, goi);
2452 if (e != JIM_CONTINUE)
2453 return e;
2455 /* parse config or cget options ... */
2456 if (goi->argc > 0) {
2457 Jim_SetEmptyResult(goi->interp);
2459 /* check first if topmost item is for us */
2460 e = Jim_Nvp_name2value_obj(goi->interp, nvp_config_opts,
2461 goi->argv[0], &n);
2462 if (e != JIM_OK)
2463 return JIM_CONTINUE;
2465 e = Jim_GetOpt_Obj(goi, NULL);
2466 if (e != JIM_OK)
2467 return e;
2469 switch (n->value) {
2470 case CFG_CTI: {
2471 if (goi->isconfigure) {
2472 Jim_Obj *o_cti;
2473 struct arm_cti *cti;
2474 e = Jim_GetOpt_Obj(goi, &o_cti);
2475 if (e != JIM_OK)
2476 return e;
2477 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2478 if (cti == NULL) {
2479 Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
2480 return JIM_ERR;
2482 pc->cti = cti;
2483 } else {
2484 if (goi->argc != 0) {
2485 Jim_WrongNumArgs(goi->interp,
2486 goi->argc, goi->argv,
2487 "NO PARAMS");
2488 return JIM_ERR;
2491 if (pc == NULL || pc->cti == NULL) {
2492 Jim_SetResultString(goi->interp, "CTI not configured", -1);
2493 return JIM_ERR;
2495 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2497 break;
2500 default:
2501 return JIM_CONTINUE;
2505 return JIM_OK;
2508 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2510 struct target *target = get_current_target(CMD_CTX);
2511 struct armv8_common *armv8 = target_to_armv8(target);
2513 return armv8_handle_cache_info_command(CMD_CTX,
2514 &armv8->armv8_mmu.armv8_cache);
2518 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2520 struct target *target = get_current_target(CMD_CTX);
2521 if (!target_was_examined(target)) {
2522 LOG_ERROR("target not examined yet");
2523 return ERROR_FAIL;
2526 return aarch64_init_debug_access(target);
2528 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2530 struct target *target = get_current_target(CMD_CTX);
2531 /* check target is an smp target */
2532 struct target_list *head;
2533 struct target *curr;
2534 head = target->head;
2535 target->smp = 0;
2536 if (head != (struct target_list *)NULL) {
2537 while (head != (struct target_list *)NULL) {
2538 curr = head->target;
2539 curr->smp = 0;
2540 head = head->next;
2542 /* fixes the target display to the debugger */
2543 target->gdb_service->target = target;
2545 return ERROR_OK;
2548 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2550 struct target *target = get_current_target(CMD_CTX);
2551 struct target_list *head;
2552 struct target *curr;
2553 head = target->head;
2554 if (head != (struct target_list *)NULL) {
2555 target->smp = 1;
2556 while (head != (struct target_list *)NULL) {
2557 curr = head->target;
2558 curr->smp = 1;
2559 head = head->next;
2562 return ERROR_OK;
2565 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2567 struct target *target = get_current_target(CMD_CTX);
2568 struct aarch64_common *aarch64 = target_to_aarch64(target);
2570 static const Jim_Nvp nvp_maskisr_modes[] = {
2571 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2572 { .name = "on", .value = AARCH64_ISRMASK_ON },
2573 { .name = NULL, .value = -1 },
2575 const Jim_Nvp *n;
2577 if (CMD_ARGC > 0) {
2578 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2579 if (n->name == NULL) {
2580 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2581 return ERROR_COMMAND_SYNTAX_ERROR;
2584 aarch64->isrmasking_mode = n->value;
2587 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
2588 command_print(CMD_CTX, "aarch64 interrupt mask %s", n->name);
2590 return ERROR_OK;
2593 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
2595 struct command_context *context;
2596 struct target *target;
2597 struct arm *arm;
2598 int retval;
2599 bool is_mcr = false;
2600 int arg_cnt = 0;
2602 if (Jim_CompareStringImmediate(interp, argv[0], "mcr")) {
2603 is_mcr = true;
2604 arg_cnt = 7;
2605 } else {
2606 arg_cnt = 6;
2609 context = current_command_context(interp);
2610 assert(context != NULL);
2612 target = get_current_target(context);
2613 if (target == NULL) {
2614 LOG_ERROR("%s: no current target", __func__);
2615 return JIM_ERR;
2617 if (!target_was_examined(target)) {
2618 LOG_ERROR("%s: not yet examined", target_name(target));
2619 return JIM_ERR;
2622 arm = target_to_arm(target);
2623 if (!is_arm(arm)) {
2624 LOG_ERROR("%s: not an ARM", target_name(target));
2625 return JIM_ERR;
2628 if (target->state != TARGET_HALTED)
2629 return ERROR_TARGET_NOT_HALTED;
2631 if (arm->core_state == ARM_STATE_AARCH64) {
2632 LOG_ERROR("%s: not 32-bit arm target", target_name(target));
2633 return JIM_ERR;
2636 if (argc != arg_cnt) {
2637 LOG_ERROR("%s: wrong number of arguments", __func__);
2638 return JIM_ERR;
2641 int cpnum;
2642 uint32_t op1;
2643 uint32_t op2;
2644 uint32_t CRn;
2645 uint32_t CRm;
2646 uint32_t value;
2647 long l;
2649 /* NOTE: parameter sequence matches ARM instruction set usage:
2650 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
2651 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
2652 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
2654 retval = Jim_GetLong(interp, argv[1], &l);
2655 if (retval != JIM_OK)
2656 return retval;
2657 if (l & ~0xf) {
2658 LOG_ERROR("%s: %s %d out of range", __func__,
2659 "coprocessor", (int) l);
2660 return JIM_ERR;
2662 cpnum = l;
2664 retval = Jim_GetLong(interp, argv[2], &l);
2665 if (retval != JIM_OK)
2666 return retval;
2667 if (l & ~0x7) {
2668 LOG_ERROR("%s: %s %d out of range", __func__,
2669 "op1", (int) l);
2670 return JIM_ERR;
2672 op1 = l;
2674 retval = Jim_GetLong(interp, argv[3], &l);
2675 if (retval != JIM_OK)
2676 return retval;
2677 if (l & ~0xf) {
2678 LOG_ERROR("%s: %s %d out of range", __func__,
2679 "CRn", (int) l);
2680 return JIM_ERR;
2682 CRn = l;
2684 retval = Jim_GetLong(interp, argv[4], &l);
2685 if (retval != JIM_OK)
2686 return retval;
2687 if (l & ~0xf) {
2688 LOG_ERROR("%s: %s %d out of range", __func__,
2689 "CRm", (int) l);
2690 return JIM_ERR;
2692 CRm = l;
2694 retval = Jim_GetLong(interp, argv[5], &l);
2695 if (retval != JIM_OK)
2696 return retval;
2697 if (l & ~0x7) {
2698 LOG_ERROR("%s: %s %d out of range", __func__,
2699 "op2", (int) l);
2700 return JIM_ERR;
2702 op2 = l;
2704 value = 0;
2706 if (is_mcr == true) {
2707 retval = Jim_GetLong(interp, argv[6], &l);
2708 if (retval != JIM_OK)
2709 return retval;
2710 value = l;
2712 /* NOTE: parameters reordered! */
2713 /* ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2) */
2714 retval = arm->mcr(target, cpnum, op1, op2, CRn, CRm, value);
2715 if (retval != ERROR_OK)
2716 return JIM_ERR;
2717 } else {
2718 /* NOTE: parameters reordered! */
2719 /* ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2) */
2720 retval = arm->mrc(target, cpnum, op1, op2, CRn, CRm, &value);
2721 if (retval != ERROR_OK)
2722 return JIM_ERR;
2724 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
2727 return JIM_OK;
2730 static const struct command_registration aarch64_exec_command_handlers[] = {
2732 .name = "cache_info",
2733 .handler = aarch64_handle_cache_info_command,
2734 .mode = COMMAND_EXEC,
2735 .help = "display information about target caches",
2736 .usage = "",
2739 .name = "dbginit",
2740 .handler = aarch64_handle_dbginit_command,
2741 .mode = COMMAND_EXEC,
2742 .help = "Initialize core debug",
2743 .usage = "",
2745 { .name = "smp_off",
2746 .handler = aarch64_handle_smp_off_command,
2747 .mode = COMMAND_EXEC,
2748 .help = "Stop smp handling",
2749 .usage = "",
2752 .name = "smp_on",
2753 .handler = aarch64_handle_smp_on_command,
2754 .mode = COMMAND_EXEC,
2755 .help = "Restart smp handling",
2756 .usage = "",
2759 .name = "maskisr",
2760 .handler = aarch64_mask_interrupts_command,
2761 .mode = COMMAND_ANY,
2762 .help = "mask aarch64 interrupts during single-step",
2763 .usage = "['on'|'off']",
2766 .name = "mcr",
2767 .mode = COMMAND_EXEC,
2768 .jim_handler = jim_mcrmrc,
2769 .help = "write coprocessor register",
2770 .usage = "cpnum op1 CRn CRm op2 value",
2773 .name = "mrc",
2774 .mode = COMMAND_EXEC,
2775 .jim_handler = jim_mcrmrc,
2776 .help = "read coprocessor register",
2777 .usage = "cpnum op1 CRn CRm op2",
2781 COMMAND_REGISTRATION_DONE
2784 static const struct command_registration aarch64_command_handlers[] = {
2786 .chain = armv8_command_handlers,
2789 .name = "aarch64",
2790 .mode = COMMAND_ANY,
2791 .help = "Aarch64 command group",
2792 .usage = "",
2793 .chain = aarch64_exec_command_handlers,
2795 COMMAND_REGISTRATION_DONE
2798 struct target_type aarch64_target = {
2799 .name = "aarch64",
2801 .poll = aarch64_poll,
2802 .arch_state = armv8_arch_state,
2804 .halt = aarch64_halt,
2805 .resume = aarch64_resume,
2806 .step = aarch64_step,
2808 .assert_reset = aarch64_assert_reset,
2809 .deassert_reset = aarch64_deassert_reset,
2811 /* REVISIT allow exporting VFP3 registers ... */
2812 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2814 .read_memory = aarch64_read_memory,
2815 .write_memory = aarch64_write_memory,
2817 .add_breakpoint = aarch64_add_breakpoint,
2818 .add_context_breakpoint = aarch64_add_context_breakpoint,
2819 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2820 .remove_breakpoint = aarch64_remove_breakpoint,
2821 .add_watchpoint = NULL,
2822 .remove_watchpoint = NULL,
2824 .commands = aarch64_command_handlers,
2825 .target_create = aarch64_target_create,
2826 .target_jim_configure = aarch64_jim_configure,
2827 .init_target = aarch64_init_target,
2828 .deinit_target = aarch64_deinit_target,
2829 .examine = aarch64_examine,
2831 .read_phys_memory = aarch64_read_phys_memory,
2832 .write_phys_memory = aarch64_write_phys_memory,
2833 .mmu = aarch64_mmu,
2834 .virt2phys = aarch64_virt2phys,