Fix compile failure on MacOSX
[openocd.git] / src / target / aarch64.c
blob5e5d3fc7fa7babeee7d87f78e1d4b3d4b4649f3f
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
33 enum restart_mode {
34 RESTART_LAZY,
35 RESTART_SYNC,
38 enum halt_mode {
39 HALT_LAZY,
40 HALT_SYNC,
43 static int aarch64_poll(struct target *target);
44 static int aarch64_debug_entry(struct target *target);
45 static int aarch64_restore_context(struct target *target, bool bpwp);
46 static int aarch64_set_breakpoint(struct target *target,
47 struct breakpoint *breakpoint, uint8_t matchmode);
48 static int aarch64_set_context_breakpoint(struct target *target,
49 struct breakpoint *breakpoint, uint8_t matchmode);
50 static int aarch64_set_hybrid_breakpoint(struct target *target,
51 struct breakpoint *breakpoint);
52 static int aarch64_unset_breakpoint(struct target *target,
53 struct breakpoint *breakpoint);
54 static int aarch64_mmu(struct target *target, int *enabled);
55 static int aarch64_virt2phys(struct target *target,
56 target_addr_t virt, target_addr_t *phys);
57 static int aarch64_read_apb_ap_memory(struct target *target,
58 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
60 #define foreach_smp_target(pos, head) \
61 for (pos = head; (pos != NULL); pos = pos->next)
63 static int aarch64_restore_system_control_reg(struct target *target)
65 enum arm_mode target_mode = ARM_MODE_ANY;
66 int retval = ERROR_OK;
67 uint32_t instr;
69 struct aarch64_common *aarch64 = target_to_aarch64(target);
70 struct armv8_common *armv8 = target_to_armv8(target);
72 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
73 aarch64->system_control_reg_curr = aarch64->system_control_reg;
74 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
76 switch (armv8->arm.core_mode) {
77 case ARMV8_64_EL0T:
78 target_mode = ARMV8_64_EL1H;
79 /* fall through */
80 case ARMV8_64_EL1T:
81 case ARMV8_64_EL1H:
82 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
83 break;
84 case ARMV8_64_EL2T:
85 case ARMV8_64_EL2H:
86 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
87 break;
88 case ARMV8_64_EL3H:
89 case ARMV8_64_EL3T:
90 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
91 break;
93 case ARM_MODE_SVC:
94 case ARM_MODE_ABT:
95 case ARM_MODE_FIQ:
96 case ARM_MODE_IRQ:
97 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
98 break;
100 default:
101 LOG_INFO("cannot read system control register in this mode");
102 return ERROR_FAIL;
105 if (target_mode != ARM_MODE_ANY)
106 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
108 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
109 if (retval != ERROR_OK)
110 return retval;
112 if (target_mode != ARM_MODE_ANY)
113 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
116 return retval;
119 /* modify system_control_reg in order to enable or disable mmu for :
120 * - virt2phys address conversion
121 * - read or write memory in phys or virt address */
122 static int aarch64_mmu_modify(struct target *target, int enable)
124 struct aarch64_common *aarch64 = target_to_aarch64(target);
125 struct armv8_common *armv8 = &aarch64->armv8_common;
126 int retval = ERROR_OK;
127 uint32_t instr = 0;
129 if (enable) {
130 /* if mmu enabled at target stop and mmu not enable */
131 if (!(aarch64->system_control_reg & 0x1U)) {
132 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
133 return ERROR_FAIL;
135 if (!(aarch64->system_control_reg_curr & 0x1U))
136 aarch64->system_control_reg_curr |= 0x1U;
137 } else {
138 if (aarch64->system_control_reg_curr & 0x4U) {
139 /* data cache is active */
140 aarch64->system_control_reg_curr &= ~0x4U;
141 /* flush data cache armv8 function to be called */
142 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
143 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
145 if ((aarch64->system_control_reg_curr & 0x1U)) {
146 aarch64->system_control_reg_curr &= ~0x1U;
150 switch (armv8->arm.core_mode) {
151 case ARMV8_64_EL0T:
152 case ARMV8_64_EL1T:
153 case ARMV8_64_EL1H:
154 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
155 break;
156 case ARMV8_64_EL2T:
157 case ARMV8_64_EL2H:
158 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
159 break;
160 case ARMV8_64_EL3H:
161 case ARMV8_64_EL3T:
162 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
163 break;
164 default:
165 LOG_DEBUG("unknown cpu state 0x%x" PRIx32, armv8->arm.core_state);
166 break;
169 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
170 aarch64->system_control_reg_curr);
171 return retval;
175 * Basic debug access, very low level assumes state is saved
177 static int aarch64_init_debug_access(struct target *target)
179 struct armv8_common *armv8 = target_to_armv8(target);
180 int retval;
181 uint32_t dummy;
183 LOG_DEBUG(" ");
185 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
186 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
187 if (retval != ERROR_OK) {
188 LOG_DEBUG("Examine %s failed", "oslock");
189 return retval;
192 /* Clear Sticky Power Down status Bit in PRSR to enable access to
193 the registers in the Core Power Domain */
194 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
195 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
196 if (retval != ERROR_OK)
197 return retval;
200 * Static CTI configuration:
201 * Channel 0 -> trigger outputs HALT request to PE
202 * Channel 1 -> trigger outputs Resume request to PE
203 * Gate all channel trigger events from entering the CTM
206 /* Enable CTI */
207 retval = arm_cti_enable(armv8->cti, true);
208 /* By default, gate all channel events to and from the CTM */
209 if (retval == ERROR_OK)
210 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
211 /* output halt requests to PE on channel 0 event */
212 if (retval == ERROR_OK)
213 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
214 /* output restart requests to PE on channel 1 event */
215 if (retval == ERROR_OK)
216 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
217 if (retval != ERROR_OK)
218 return retval;
220 /* Resync breakpoint registers */
222 return ERROR_OK;
225 /* Write to memory mapped registers directly with no cache or mmu handling */
226 static int aarch64_dap_write_memap_register_u32(struct target *target,
227 uint32_t address,
228 uint32_t value)
230 int retval;
231 struct armv8_common *armv8 = target_to_armv8(target);
233 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
235 return retval;
238 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
240 struct arm_dpm *dpm = &a8->armv8_common.dpm;
241 int retval;
243 dpm->arm = &a8->armv8_common.arm;
244 dpm->didr = debug;
246 retval = armv8_dpm_setup(dpm);
247 if (retval == ERROR_OK)
248 retval = armv8_dpm_initialize(dpm);
250 return retval;
253 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
255 struct armv8_common *armv8 = target_to_armv8(target);
256 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
259 static int aarch64_check_state_one(struct target *target,
260 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
262 struct armv8_common *armv8 = target_to_armv8(target);
263 uint32_t prsr;
264 int retval;
266 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
267 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
268 if (retval != ERROR_OK)
269 return retval;
271 if (p_prsr)
272 *p_prsr = prsr;
274 if (p_result)
275 *p_result = (prsr & mask) == (val & mask);
277 return ERROR_OK;
280 static int aarch64_wait_halt_one(struct target *target)
282 int retval = ERROR_OK;
283 uint32_t prsr;
285 int64_t then = timeval_ms();
286 for (;;) {
287 int halted;
289 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
290 if (retval != ERROR_OK || halted)
291 break;
293 if (timeval_ms() > then + 1000) {
294 retval = ERROR_TARGET_TIMEOUT;
295 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
296 break;
299 return retval;
302 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
304 int retval = ERROR_OK;
305 struct target_list *head = target->head;
306 struct target *first = NULL;
308 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
310 while (head != NULL) {
311 struct target *curr = head->target;
312 struct armv8_common *armv8 = target_to_armv8(curr);
313 head = head->next;
315 if (exc_target && curr == target)
316 continue;
317 if (!target_was_examined(curr))
318 continue;
319 if (curr->state != TARGET_RUNNING)
320 continue;
322 /* HACK: mark this target as prepared for halting */
323 curr->debug_reason = DBG_REASON_DBGRQ;
325 /* open the gate for channel 0 to let HALT requests pass to the CTM */
326 retval = arm_cti_ungate_channel(armv8->cti, 0);
327 if (retval == ERROR_OK)
328 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
329 if (retval != ERROR_OK)
330 break;
332 LOG_DEBUG("target %s prepared", target_name(curr));
334 if (first == NULL)
335 first = curr;
338 if (p_first) {
339 if (exc_target && first)
340 *p_first = first;
341 else
342 *p_first = target;
345 return retval;
348 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
350 int retval = ERROR_OK;
351 struct armv8_common *armv8 = target_to_armv8(target);
353 LOG_DEBUG("%s", target_name(target));
355 /* allow Halting Debug Mode */
356 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
357 if (retval != ERROR_OK)
358 return retval;
360 /* trigger an event on channel 0, this outputs a halt request to the PE */
361 retval = arm_cti_pulse_channel(armv8->cti, 0);
362 if (retval != ERROR_OK)
363 return retval;
365 if (mode == HALT_SYNC) {
366 retval = aarch64_wait_halt_one(target);
367 if (retval != ERROR_OK) {
368 if (retval == ERROR_TARGET_TIMEOUT)
369 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
370 return retval;
374 return ERROR_OK;
377 static int aarch64_halt_smp(struct target *target, bool exc_target)
379 struct target *next = target;
380 int retval;
382 /* prepare halt on all PEs of the group */
383 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
385 if (exc_target && next == target)
386 return retval;
388 /* halt the target PE */
389 if (retval == ERROR_OK)
390 retval = aarch64_halt_one(next, HALT_LAZY);
392 if (retval != ERROR_OK)
393 return retval;
395 /* wait for all PEs to halt */
396 int64_t then = timeval_ms();
397 for (;;) {
398 bool all_halted = true;
399 struct target_list *head;
400 struct target *curr;
402 foreach_smp_target(head, target->head) {
403 int halted;
405 curr = head->target;
407 if (!target_was_examined(curr))
408 continue;
410 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
411 if (retval != ERROR_OK || !halted) {
412 all_halted = false;
413 break;
417 if (all_halted)
418 break;
420 if (timeval_ms() > then + 1000) {
421 retval = ERROR_TARGET_TIMEOUT;
422 break;
426 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
427 * and it looks like the CTI's are not connected by a common
428 * trigger matrix. It seems that we need to halt one core in each
429 * cluster explicitly. So if we find that a core has not halted
430 * yet, we trigger an explicit halt for the second cluster.
432 retval = aarch64_halt_one(curr, HALT_LAZY);
433 if (retval != ERROR_OK)
434 break;
437 return retval;
440 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
442 struct target *gdb_target = NULL;
443 struct target_list *head;
444 struct target *curr;
446 if (debug_reason == DBG_REASON_NOTHALTED) {
447 LOG_INFO("Halting remaining targets in SMP group");
448 aarch64_halt_smp(target, true);
451 /* poll all targets in the group, but skip the target that serves GDB */
452 foreach_smp_target(head, target->head) {
453 curr = head->target;
454 /* skip calling context */
455 if (curr == target)
456 continue;
457 if (!target_was_examined(curr))
458 continue;
459 /* skip targets that were already halted */
460 if (curr->state == TARGET_HALTED)
461 continue;
462 /* remember the gdb_service->target */
463 if (curr->gdb_service != NULL)
464 gdb_target = curr->gdb_service->target;
465 /* skip it */
466 if (curr == gdb_target)
467 continue;
469 /* avoid recursion in aarch64_poll() */
470 curr->smp = 0;
471 aarch64_poll(curr);
472 curr->smp = 1;
475 /* after all targets were updated, poll the gdb serving target */
476 if (gdb_target != NULL && gdb_target != target)
477 aarch64_poll(gdb_target);
479 return ERROR_OK;
483 * Aarch64 Run control
486 static int aarch64_poll(struct target *target)
488 enum target_state prev_target_state;
489 int retval = ERROR_OK;
490 int halted;
492 retval = aarch64_check_state_one(target,
493 PRSR_HALT, PRSR_HALT, &halted, NULL);
494 if (retval != ERROR_OK)
495 return retval;
497 if (halted) {
498 prev_target_state = target->state;
499 if (prev_target_state != TARGET_HALTED) {
500 enum target_debug_reason debug_reason = target->debug_reason;
502 /* We have a halting debug event */
503 target->state = TARGET_HALTED;
504 LOG_DEBUG("Target %s halted", target_name(target));
505 retval = aarch64_debug_entry(target);
506 if (retval != ERROR_OK)
507 return retval;
509 if (target->smp)
510 update_halt_gdb(target, debug_reason);
512 switch (prev_target_state) {
513 case TARGET_RUNNING:
514 case TARGET_UNKNOWN:
515 case TARGET_RESET:
516 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
517 break;
518 case TARGET_DEBUG_RUNNING:
519 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
520 break;
521 default:
522 break;
525 } else
526 target->state = TARGET_RUNNING;
528 return retval;
531 static int aarch64_halt(struct target *target)
533 if (target->smp)
534 return aarch64_halt_smp(target, false);
536 return aarch64_halt_one(target, HALT_SYNC);
539 static int aarch64_restore_one(struct target *target, int current,
540 uint64_t *address, int handle_breakpoints, int debug_execution)
542 struct armv8_common *armv8 = target_to_armv8(target);
543 struct arm *arm = &armv8->arm;
544 int retval;
545 uint64_t resume_pc;
547 LOG_DEBUG("%s", target_name(target));
549 if (!debug_execution)
550 target_free_all_working_areas(target);
552 /* current = 1: continue on current pc, otherwise continue at <address> */
553 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
554 if (!current)
555 resume_pc = *address;
556 else
557 *address = resume_pc;
559 /* Make sure that the Armv7 gdb thumb fixups does not
560 * kill the return address
562 switch (arm->core_state) {
563 case ARM_STATE_ARM:
564 resume_pc &= 0xFFFFFFFC;
565 break;
566 case ARM_STATE_AARCH64:
567 resume_pc &= 0xFFFFFFFFFFFFFFFC;
568 break;
569 case ARM_STATE_THUMB:
570 case ARM_STATE_THUMB_EE:
571 /* When the return address is loaded into PC
572 * bit 0 must be 1 to stay in Thumb state
574 resume_pc |= 0x1;
575 break;
576 case ARM_STATE_JAZELLE:
577 LOG_ERROR("How do I resume into Jazelle state??");
578 return ERROR_FAIL;
580 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
581 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
582 arm->pc->dirty = 1;
583 arm->pc->valid = 1;
585 /* called it now before restoring context because it uses cpu
586 * register r0 for restoring system control register */
587 retval = aarch64_restore_system_control_reg(target);
588 if (retval == ERROR_OK)
589 retval = aarch64_restore_context(target, handle_breakpoints);
591 return retval;
595 * prepare single target for restart
599 static int aarch64_prepare_restart_one(struct target *target)
601 struct armv8_common *armv8 = target_to_armv8(target);
602 int retval;
603 uint32_t dscr;
604 uint32_t tmp;
606 LOG_DEBUG("%s", target_name(target));
608 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
609 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
610 if (retval != ERROR_OK)
611 return retval;
613 if ((dscr & DSCR_ITE) == 0)
614 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
615 if ((dscr & DSCR_ERR) != 0)
616 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
618 /* acknowledge a pending CTI halt event */
619 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
621 * open the CTI gate for channel 1 so that the restart events
622 * get passed along to all PEs. Also close gate for channel 0
623 * to isolate the PE from halt events.
625 if (retval == ERROR_OK)
626 retval = arm_cti_ungate_channel(armv8->cti, 1);
627 if (retval == ERROR_OK)
628 retval = arm_cti_gate_channel(armv8->cti, 0);
630 /* make sure that DSCR.HDE is set */
631 if (retval == ERROR_OK) {
632 dscr |= DSCR_HDE;
633 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
634 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
637 /* clear sticky bits in PRSR, SDR is now 0 */
638 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
639 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
641 return retval;
644 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
646 struct armv8_common *armv8 = target_to_armv8(target);
647 int retval;
649 LOG_DEBUG("%s", target_name(target));
651 /* trigger an event on channel 1, generates a restart request to the PE */
652 retval = arm_cti_pulse_channel(armv8->cti, 1);
653 if (retval != ERROR_OK)
654 return retval;
656 if (mode == RESTART_SYNC) {
657 int64_t then = timeval_ms();
658 for (;;) {
659 int resumed;
661 * if PRSR.SDR is set now, the target did restart, even
662 * if it's now already halted again (e.g. due to breakpoint)
664 retval = aarch64_check_state_one(target,
665 PRSR_SDR, PRSR_SDR, &resumed, NULL);
666 if (retval != ERROR_OK || resumed)
667 break;
669 if (timeval_ms() > then + 1000) {
670 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
671 retval = ERROR_TARGET_TIMEOUT;
672 break;
677 if (retval != ERROR_OK)
678 return retval;
680 target->debug_reason = DBG_REASON_NOTHALTED;
681 target->state = TARGET_RUNNING;
683 return ERROR_OK;
686 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
688 int retval;
690 LOG_DEBUG("%s", target_name(target));
692 retval = aarch64_prepare_restart_one(target);
693 if (retval == ERROR_OK)
694 retval = aarch64_do_restart_one(target, mode);
696 return retval;
700 * prepare all but the current target for restart
702 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
704 int retval = ERROR_OK;
705 struct target_list *head;
706 struct target *first = NULL;
707 uint64_t address;
709 foreach_smp_target(head, target->head) {
710 struct target *curr = head->target;
712 /* skip calling target */
713 if (curr == target)
714 continue;
715 if (!target_was_examined(curr))
716 continue;
717 if (curr->state != TARGET_HALTED)
718 continue;
720 /* resume at current address, not in step mode */
721 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
722 if (retval == ERROR_OK)
723 retval = aarch64_prepare_restart_one(curr);
724 if (retval != ERROR_OK) {
725 LOG_ERROR("failed to restore target %s", target_name(curr));
726 break;
728 /* remember the first valid target in the group */
729 if (first == NULL)
730 first = curr;
733 if (p_first)
734 *p_first = first;
736 return retval;
740 static int aarch64_step_restart_smp(struct target *target)
742 int retval = ERROR_OK;
743 struct target_list *head;
744 struct target *first = NULL;
746 LOG_DEBUG("%s", target_name(target));
748 retval = aarch64_prep_restart_smp(target, 0, &first);
749 if (retval != ERROR_OK)
750 return retval;
752 if (first != NULL)
753 retval = aarch64_do_restart_one(first, RESTART_LAZY);
754 if (retval != ERROR_OK) {
755 LOG_DEBUG("error restarting target %s", target_name(first));
756 return retval;
759 int64_t then = timeval_ms();
760 for (;;) {
761 struct target *curr = target;
762 bool all_resumed = true;
764 foreach_smp_target(head, target->head) {
765 uint32_t prsr;
766 int resumed;
768 curr = head->target;
770 if (curr == target)
771 continue;
773 retval = aarch64_check_state_one(curr,
774 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
775 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
776 all_resumed = false;
777 break;
780 if (curr->state != TARGET_RUNNING) {
781 curr->state = TARGET_RUNNING;
782 curr->debug_reason = DBG_REASON_NOTHALTED;
783 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
787 if (all_resumed)
788 break;
790 if (timeval_ms() > then + 1000) {
791 LOG_ERROR("%s: timeout waiting for target resume", __func__);
792 retval = ERROR_TARGET_TIMEOUT;
793 break;
796 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
797 * and it looks like the CTI's are not connected by a common
798 * trigger matrix. It seems that we need to halt one core in each
799 * cluster explicitly. So if we find that a core has not halted
800 * yet, we trigger an explicit resume for the second cluster.
802 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
803 if (retval != ERROR_OK)
804 break;
807 return retval;
810 static int aarch64_resume(struct target *target, int current,
811 target_addr_t address, int handle_breakpoints, int debug_execution)
813 int retval = 0;
814 uint64_t addr = address;
816 if (target->state != TARGET_HALTED)
817 return ERROR_TARGET_NOT_HALTED;
820 * If this target is part of a SMP group, prepare the others
821 * targets for resuming. This involves restoring the complete
822 * target register context and setting up CTI gates to accept
823 * resume events from the trigger matrix.
825 if (target->smp) {
826 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
827 if (retval != ERROR_OK)
828 return retval;
831 /* all targets prepared, restore and restart the current target */
832 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
833 debug_execution);
834 if (retval == ERROR_OK)
835 retval = aarch64_restart_one(target, RESTART_SYNC);
836 if (retval != ERROR_OK)
837 return retval;
839 if (target->smp) {
840 int64_t then = timeval_ms();
841 for (;;) {
842 struct target *curr = target;
843 struct target_list *head;
844 bool all_resumed = true;
846 foreach_smp_target(head, target->head) {
847 uint32_t prsr;
848 int resumed;
850 curr = head->target;
851 if (curr == target)
852 continue;
853 if (!target_was_examined(curr))
854 continue;
856 retval = aarch64_check_state_one(curr,
857 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
858 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
859 all_resumed = false;
860 break;
863 if (curr->state != TARGET_RUNNING) {
864 curr->state = TARGET_RUNNING;
865 curr->debug_reason = DBG_REASON_NOTHALTED;
866 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
870 if (all_resumed)
871 break;
873 if (timeval_ms() > then + 1000) {
874 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
875 retval = ERROR_TARGET_TIMEOUT;
876 break;
880 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
881 * and it looks like the CTI's are not connected by a common
882 * trigger matrix. It seems that we need to halt one core in each
883 * cluster explicitly. So if we find that a core has not halted
884 * yet, we trigger an explicit resume for the second cluster.
886 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
887 if (retval != ERROR_OK)
888 break;
892 if (retval != ERROR_OK)
893 return retval;
895 target->debug_reason = DBG_REASON_NOTHALTED;
897 if (!debug_execution) {
898 target->state = TARGET_RUNNING;
899 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
900 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
901 } else {
902 target->state = TARGET_DEBUG_RUNNING;
903 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
904 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
907 return ERROR_OK;
910 static int aarch64_debug_entry(struct target *target)
912 int retval = ERROR_OK;
913 struct armv8_common *armv8 = target_to_armv8(target);
914 struct arm_dpm *dpm = &armv8->dpm;
915 enum arm_state core_state;
916 uint32_t dscr;
918 /* make sure to clear all sticky errors */
919 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
920 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
921 if (retval == ERROR_OK)
922 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
923 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
924 if (retval == ERROR_OK)
925 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
927 if (retval != ERROR_OK)
928 return retval;
930 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
932 dpm->dscr = dscr;
933 core_state = armv8_dpm_get_core_state(dpm);
934 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
935 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
937 /* close the CTI gate for all events */
938 if (retval == ERROR_OK)
939 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
940 /* discard async exceptions */
941 if (retval == ERROR_OK)
942 retval = dpm->instr_cpsr_sync(dpm);
943 if (retval != ERROR_OK)
944 return retval;
946 /* Examine debug reason */
947 armv8_dpm_report_dscr(dpm, dscr);
949 /* save address of instruction that triggered the watchpoint? */
950 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
951 uint32_t tmp;
952 uint64_t wfar = 0;
954 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
955 armv8->debug_base + CPUV8_DBG_WFAR1,
956 &tmp);
957 if (retval != ERROR_OK)
958 return retval;
959 wfar = tmp;
960 wfar = (wfar << 32);
961 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
962 armv8->debug_base + CPUV8_DBG_WFAR0,
963 &tmp);
964 if (retval != ERROR_OK)
965 return retval;
966 wfar |= tmp;
967 armv8_dpm_report_wfar(&armv8->dpm, wfar);
970 retval = armv8_dpm_read_current_registers(&armv8->dpm);
972 if (retval == ERROR_OK && armv8->post_debug_entry)
973 retval = armv8->post_debug_entry(target);
975 return retval;
978 static int aarch64_post_debug_entry(struct target *target)
980 struct aarch64_common *aarch64 = target_to_aarch64(target);
981 struct armv8_common *armv8 = &aarch64->armv8_common;
982 int retval;
983 enum arm_mode target_mode = ARM_MODE_ANY;
984 uint32_t instr;
986 switch (armv8->arm.core_mode) {
987 case ARMV8_64_EL0T:
988 target_mode = ARMV8_64_EL1H;
989 /* fall through */
990 case ARMV8_64_EL1T:
991 case ARMV8_64_EL1H:
992 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
993 break;
994 case ARMV8_64_EL2T:
995 case ARMV8_64_EL2H:
996 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
997 break;
998 case ARMV8_64_EL3H:
999 case ARMV8_64_EL3T:
1000 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1001 break;
1003 case ARM_MODE_SVC:
1004 case ARM_MODE_ABT:
1005 case ARM_MODE_FIQ:
1006 case ARM_MODE_IRQ:
1007 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1008 break;
1010 default:
1011 LOG_INFO("cannot read system control register in this mode");
1012 return ERROR_FAIL;
1015 if (target_mode != ARM_MODE_ANY)
1016 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1018 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1019 if (retval != ERROR_OK)
1020 return retval;
1022 if (target_mode != ARM_MODE_ANY)
1023 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1025 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1026 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1028 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1029 armv8_identify_cache(armv8);
1030 armv8_read_mpidr(armv8);
1033 armv8->armv8_mmu.mmu_enabled =
1034 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1035 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1036 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1037 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1038 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1039 return ERROR_OK;
1043 * single-step a target
1045 static int aarch64_step(struct target *target, int current, target_addr_t address,
1046 int handle_breakpoints)
1048 struct armv8_common *armv8 = target_to_armv8(target);
1049 int saved_retval = ERROR_OK;
1050 int retval;
1051 uint32_t edecr;
1053 if (target->state != TARGET_HALTED) {
1054 LOG_WARNING("target not halted");
1055 return ERROR_TARGET_NOT_HALTED;
1058 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1059 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1060 /* make sure EDECR.SS is not set when restoring the register */
1062 if (retval == ERROR_OK) {
1063 edecr &= ~0x4;
1064 /* set EDECR.SS to enter hardware step mode */
1065 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1066 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1068 /* disable interrupts while stepping */
1069 if (retval == ERROR_OK)
1070 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1071 /* bail out if stepping setup has failed */
1072 if (retval != ERROR_OK)
1073 return retval;
1075 if (target->smp && !handle_breakpoints) {
1077 * isolate current target so that it doesn't get resumed
1078 * together with the others
1080 retval = arm_cti_gate_channel(armv8->cti, 1);
1081 /* resume all other targets in the group */
1082 if (retval == ERROR_OK)
1083 retval = aarch64_step_restart_smp(target);
1084 if (retval != ERROR_OK) {
1085 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1086 return retval;
1088 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1091 /* all other targets running, restore and restart the current target */
1092 retval = aarch64_restore_one(target, current, &address, 0, 0);
1093 if (retval == ERROR_OK)
1094 retval = aarch64_restart_one(target, RESTART_LAZY);
1096 if (retval != ERROR_OK)
1097 return retval;
1099 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1100 if (!handle_breakpoints)
1101 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1103 int64_t then = timeval_ms();
1104 for (;;) {
1105 int stepped;
1106 uint32_t prsr;
1108 retval = aarch64_check_state_one(target,
1109 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1110 if (retval != ERROR_OK || stepped)
1111 break;
1113 if (timeval_ms() > then + 1000) {
1114 LOG_ERROR("timeout waiting for target %s halt after step",
1115 target_name(target));
1116 retval = ERROR_TARGET_TIMEOUT;
1117 break;
1121 if (retval == ERROR_TARGET_TIMEOUT)
1122 saved_retval = retval;
1124 /* restore EDECR */
1125 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1126 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1127 if (retval != ERROR_OK)
1128 return retval;
1130 /* restore interrupts */
1131 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1132 if (retval != ERROR_OK)
1133 return ERROR_OK;
1135 if (saved_retval != ERROR_OK)
1136 return saved_retval;
1138 return aarch64_poll(target);
1141 static int aarch64_restore_context(struct target *target, bool bpwp)
1143 struct armv8_common *armv8 = target_to_armv8(target);
1144 struct arm *arm = &armv8->arm;
1146 int retval;
1148 LOG_DEBUG("%s", target_name(target));
1150 if (armv8->pre_restore_context)
1151 armv8->pre_restore_context(target);
1153 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1154 if (retval == ERROR_OK) {
1155 /* registers are now invalid */
1156 register_cache_invalidate(arm->core_cache);
1157 register_cache_invalidate(arm->core_cache->next);
1160 return retval;
1164 * Cortex-A8 Breakpoint and watchpoint functions
1167 /* Setup hardware Breakpoint Register Pair */
1168 static int aarch64_set_breakpoint(struct target *target,
1169 struct breakpoint *breakpoint, uint8_t matchmode)
1171 int retval;
1172 int brp_i = 0;
1173 uint32_t control;
1174 uint8_t byte_addr_select = 0x0F;
1175 struct aarch64_common *aarch64 = target_to_aarch64(target);
1176 struct armv8_common *armv8 = &aarch64->armv8_common;
1177 struct aarch64_brp *brp_list = aarch64->brp_list;
1179 if (breakpoint->set) {
1180 LOG_WARNING("breakpoint already set");
1181 return ERROR_OK;
1184 if (breakpoint->type == BKPT_HARD) {
1185 int64_t bpt_value;
1186 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1187 brp_i++;
1188 if (brp_i >= aarch64->brp_num) {
1189 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1190 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1192 breakpoint->set = brp_i + 1;
1193 if (breakpoint->length == 2)
1194 byte_addr_select = (3 << (breakpoint->address & 0x02));
1195 control = ((matchmode & 0x7) << 20)
1196 | (1 << 13)
1197 | (byte_addr_select << 5)
1198 | (3 << 1) | 1;
1199 brp_list[brp_i].used = 1;
1200 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1201 brp_list[brp_i].control = control;
1202 bpt_value = brp_list[brp_i].value;
1204 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1205 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1206 (uint32_t)(bpt_value & 0xFFFFFFFF));
1207 if (retval != ERROR_OK)
1208 return retval;
1209 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1210 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1211 (uint32_t)(bpt_value >> 32));
1212 if (retval != ERROR_OK)
1213 return retval;
1215 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1216 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1217 brp_list[brp_i].control);
1218 if (retval != ERROR_OK)
1219 return retval;
1220 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1221 brp_list[brp_i].control,
1222 brp_list[brp_i].value);
1224 } else if (breakpoint->type == BKPT_SOFT) {
1225 uint8_t code[4];
1227 buf_set_u32(code, 0, 32, armv8_opcode(armv8, ARMV8_OPC_HLT));
1228 retval = target_read_memory(target,
1229 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1230 breakpoint->length, 1,
1231 breakpoint->orig_instr);
1232 if (retval != ERROR_OK)
1233 return retval;
1235 armv8_cache_d_inner_flush_virt(armv8,
1236 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1237 breakpoint->length);
1239 retval = target_write_memory(target,
1240 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1241 breakpoint->length, 1, code);
1242 if (retval != ERROR_OK)
1243 return retval;
1245 armv8_cache_d_inner_flush_virt(armv8,
1246 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1247 breakpoint->length);
1249 armv8_cache_i_inner_inval_virt(armv8,
1250 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1251 breakpoint->length);
1253 breakpoint->set = 0x11; /* Any nice value but 0 */
1256 /* Ensure that halting debug mode is enable */
1257 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1258 if (retval != ERROR_OK) {
1259 LOG_DEBUG("Failed to set DSCR.HDE");
1260 return retval;
1263 return ERROR_OK;
1266 static int aarch64_set_context_breakpoint(struct target *target,
1267 struct breakpoint *breakpoint, uint8_t matchmode)
1269 int retval = ERROR_FAIL;
1270 int brp_i = 0;
1271 uint32_t control;
1272 uint8_t byte_addr_select = 0x0F;
1273 struct aarch64_common *aarch64 = target_to_aarch64(target);
1274 struct armv8_common *armv8 = &aarch64->armv8_common;
1275 struct aarch64_brp *brp_list = aarch64->brp_list;
1277 if (breakpoint->set) {
1278 LOG_WARNING("breakpoint already set");
1279 return retval;
1281 /*check available context BRPs*/
1282 while ((brp_list[brp_i].used ||
1283 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1284 brp_i++;
1286 if (brp_i >= aarch64->brp_num) {
1287 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1288 return ERROR_FAIL;
1291 breakpoint->set = brp_i + 1;
1292 control = ((matchmode & 0x7) << 20)
1293 | (1 << 13)
1294 | (byte_addr_select << 5)
1295 | (3 << 1) | 1;
1296 brp_list[brp_i].used = 1;
1297 brp_list[brp_i].value = (breakpoint->asid);
1298 brp_list[brp_i].control = control;
1299 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1300 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1301 brp_list[brp_i].value);
1302 if (retval != ERROR_OK)
1303 return retval;
1304 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1305 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1306 brp_list[brp_i].control);
1307 if (retval != ERROR_OK)
1308 return retval;
1309 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1310 brp_list[brp_i].control,
1311 brp_list[brp_i].value);
1312 return ERROR_OK;
1316 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1318 int retval = ERROR_FAIL;
1319 int brp_1 = 0; /* holds the contextID pair */
1320 int brp_2 = 0; /* holds the IVA pair */
1321 uint32_t control_CTX, control_IVA;
1322 uint8_t CTX_byte_addr_select = 0x0F;
1323 uint8_t IVA_byte_addr_select = 0x0F;
1324 uint8_t CTX_machmode = 0x03;
1325 uint8_t IVA_machmode = 0x01;
1326 struct aarch64_common *aarch64 = target_to_aarch64(target);
1327 struct armv8_common *armv8 = &aarch64->armv8_common;
1328 struct aarch64_brp *brp_list = aarch64->brp_list;
1330 if (breakpoint->set) {
1331 LOG_WARNING("breakpoint already set");
1332 return retval;
1334 /*check available context BRPs*/
1335 while ((brp_list[brp_1].used ||
1336 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1337 brp_1++;
1339 printf("brp(CTX) found num: %d\n", brp_1);
1340 if (brp_1 >= aarch64->brp_num) {
1341 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1342 return ERROR_FAIL;
1345 while ((brp_list[brp_2].used ||
1346 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1347 brp_2++;
1349 printf("brp(IVA) found num: %d\n", brp_2);
1350 if (brp_2 >= aarch64->brp_num) {
1351 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1352 return ERROR_FAIL;
1355 breakpoint->set = brp_1 + 1;
1356 breakpoint->linked_BRP = brp_2;
1357 control_CTX = ((CTX_machmode & 0x7) << 20)
1358 | (brp_2 << 16)
1359 | (0 << 14)
1360 | (CTX_byte_addr_select << 5)
1361 | (3 << 1) | 1;
1362 brp_list[brp_1].used = 1;
1363 brp_list[brp_1].value = (breakpoint->asid);
1364 brp_list[brp_1].control = control_CTX;
1365 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1366 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1367 brp_list[brp_1].value);
1368 if (retval != ERROR_OK)
1369 return retval;
1370 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1371 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1372 brp_list[brp_1].control);
1373 if (retval != ERROR_OK)
1374 return retval;
1376 control_IVA = ((IVA_machmode & 0x7) << 20)
1377 | (brp_1 << 16)
1378 | (1 << 13)
1379 | (IVA_byte_addr_select << 5)
1380 | (3 << 1) | 1;
1381 brp_list[brp_2].used = 1;
1382 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1383 brp_list[brp_2].control = control_IVA;
1384 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1385 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1386 brp_list[brp_2].value & 0xFFFFFFFF);
1387 if (retval != ERROR_OK)
1388 return retval;
1389 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1390 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1391 brp_list[brp_2].value >> 32);
1392 if (retval != ERROR_OK)
1393 return retval;
1394 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1395 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1396 brp_list[brp_2].control);
1397 if (retval != ERROR_OK)
1398 return retval;
1400 return ERROR_OK;
1403 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1405 int retval;
1406 struct aarch64_common *aarch64 = target_to_aarch64(target);
1407 struct armv8_common *armv8 = &aarch64->armv8_common;
1408 struct aarch64_brp *brp_list = aarch64->brp_list;
1410 if (!breakpoint->set) {
1411 LOG_WARNING("breakpoint not set");
1412 return ERROR_OK;
1415 if (breakpoint->type == BKPT_HARD) {
1416 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1417 int brp_i = breakpoint->set - 1;
1418 int brp_j = breakpoint->linked_BRP;
1419 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1420 LOG_DEBUG("Invalid BRP number in breakpoint");
1421 return ERROR_OK;
1423 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1424 brp_list[brp_i].control, brp_list[brp_i].value);
1425 brp_list[brp_i].used = 0;
1426 brp_list[brp_i].value = 0;
1427 brp_list[brp_i].control = 0;
1428 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1429 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1430 brp_list[brp_i].control);
1431 if (retval != ERROR_OK)
1432 return retval;
1433 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1434 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1435 (uint32_t)brp_list[brp_i].value);
1436 if (retval != ERROR_OK)
1437 return retval;
1438 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1439 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1440 (uint32_t)brp_list[brp_i].value);
1441 if (retval != ERROR_OK)
1442 return retval;
1443 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1444 LOG_DEBUG("Invalid BRP number in breakpoint");
1445 return ERROR_OK;
1447 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1448 brp_list[brp_j].control, brp_list[brp_j].value);
1449 brp_list[brp_j].used = 0;
1450 brp_list[brp_j].value = 0;
1451 brp_list[brp_j].control = 0;
1452 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1453 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1454 brp_list[brp_j].control);
1455 if (retval != ERROR_OK)
1456 return retval;
1457 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1458 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1459 (uint32_t)brp_list[brp_j].value);
1460 if (retval != ERROR_OK)
1461 return retval;
1462 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1463 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1464 (uint32_t)brp_list[brp_j].value);
1465 if (retval != ERROR_OK)
1466 return retval;
1468 breakpoint->linked_BRP = 0;
1469 breakpoint->set = 0;
1470 return ERROR_OK;
1472 } else {
1473 int brp_i = breakpoint->set - 1;
1474 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1475 LOG_DEBUG("Invalid BRP number in breakpoint");
1476 return ERROR_OK;
1478 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1479 brp_list[brp_i].control, brp_list[brp_i].value);
1480 brp_list[brp_i].used = 0;
1481 brp_list[brp_i].value = 0;
1482 brp_list[brp_i].control = 0;
1483 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1484 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1485 brp_list[brp_i].control);
1486 if (retval != ERROR_OK)
1487 return retval;
1488 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1489 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1490 brp_list[brp_i].value);
1491 if (retval != ERROR_OK)
1492 return retval;
1494 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1495 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1496 (uint32_t)brp_list[brp_i].value);
1497 if (retval != ERROR_OK)
1498 return retval;
1499 breakpoint->set = 0;
1500 return ERROR_OK;
1502 } else {
1503 /* restore original instruction (kept in target endianness) */
1505 armv8_cache_d_inner_flush_virt(armv8,
1506 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1507 breakpoint->length);
1509 if (breakpoint->length == 4) {
1510 retval = target_write_memory(target,
1511 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1512 4, 1, breakpoint->orig_instr);
1513 if (retval != ERROR_OK)
1514 return retval;
1515 } else {
1516 retval = target_write_memory(target,
1517 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1518 2, 1, breakpoint->orig_instr);
1519 if (retval != ERROR_OK)
1520 return retval;
1523 armv8_cache_d_inner_flush_virt(armv8,
1524 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1525 breakpoint->length);
1527 armv8_cache_i_inner_inval_virt(armv8,
1528 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1529 breakpoint->length);
1531 breakpoint->set = 0;
1533 return ERROR_OK;
1536 static int aarch64_add_breakpoint(struct target *target,
1537 struct breakpoint *breakpoint)
1539 struct aarch64_common *aarch64 = target_to_aarch64(target);
1541 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1542 LOG_INFO("no hardware breakpoint available");
1543 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1546 if (breakpoint->type == BKPT_HARD)
1547 aarch64->brp_num_available--;
1549 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1552 static int aarch64_add_context_breakpoint(struct target *target,
1553 struct breakpoint *breakpoint)
1555 struct aarch64_common *aarch64 = target_to_aarch64(target);
1557 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1558 LOG_INFO("no hardware breakpoint available");
1559 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1562 if (breakpoint->type == BKPT_HARD)
1563 aarch64->brp_num_available--;
1565 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1568 static int aarch64_add_hybrid_breakpoint(struct target *target,
1569 struct breakpoint *breakpoint)
1571 struct aarch64_common *aarch64 = target_to_aarch64(target);
1573 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1574 LOG_INFO("no hardware breakpoint available");
1575 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1578 if (breakpoint->type == BKPT_HARD)
1579 aarch64->brp_num_available--;
1581 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1585 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1587 struct aarch64_common *aarch64 = target_to_aarch64(target);
1589 #if 0
1590 /* It is perfectly possible to remove breakpoints while the target is running */
1591 if (target->state != TARGET_HALTED) {
1592 LOG_WARNING("target not halted");
1593 return ERROR_TARGET_NOT_HALTED;
1595 #endif
1597 if (breakpoint->set) {
1598 aarch64_unset_breakpoint(target, breakpoint);
1599 if (breakpoint->type == BKPT_HARD)
1600 aarch64->brp_num_available++;
1603 return ERROR_OK;
1607 * Cortex-A8 Reset functions
1610 static int aarch64_assert_reset(struct target *target)
1612 struct armv8_common *armv8 = target_to_armv8(target);
1614 LOG_DEBUG(" ");
1616 /* FIXME when halt is requested, make it work somehow... */
1618 /* Issue some kind of warm reset. */
1619 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1620 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1621 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1622 /* REVISIT handle "pulls" cases, if there's
1623 * hardware that needs them to work.
1625 jtag_add_reset(0, 1);
1626 } else {
1627 LOG_ERROR("%s: how to reset?", target_name(target));
1628 return ERROR_FAIL;
1631 /* registers are now invalid */
1632 if (target_was_examined(target)) {
1633 register_cache_invalidate(armv8->arm.core_cache);
1634 register_cache_invalidate(armv8->arm.core_cache->next);
1637 target->state = TARGET_RESET;
1639 return ERROR_OK;
1642 static int aarch64_deassert_reset(struct target *target)
1644 int retval;
1646 LOG_DEBUG(" ");
1648 /* be certain SRST is off */
1649 jtag_add_reset(0, 0);
1651 if (!target_was_examined(target))
1652 return ERROR_OK;
1654 retval = aarch64_poll(target);
1655 if (retval != ERROR_OK)
1656 return retval;
1658 if (target->reset_halt) {
1659 if (target->state != TARGET_HALTED) {
1660 LOG_WARNING("%s: ran after reset and before halt ...",
1661 target_name(target));
1662 retval = target_halt(target);
1663 if (retval != ERROR_OK)
1664 return retval;
1668 return aarch64_init_debug_access(target);
1671 static int aarch64_write_apb_ap_memory(struct target *target,
1672 uint64_t address, uint32_t size,
1673 uint32_t count, const uint8_t *buffer)
1675 /* write memory through APB-AP */
1676 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1677 struct armv8_common *armv8 = target_to_armv8(target);
1678 struct arm_dpm *dpm = &armv8->dpm;
1679 struct arm *arm = &armv8->arm;
1680 int total_bytes = count * size;
1681 int total_u32;
1682 int start_byte = address & 0x3;
1683 int end_byte = (address + total_bytes) & 0x3;
1684 struct reg *reg;
1685 uint32_t dscr;
1686 uint8_t *tmp_buff = NULL;
1688 if (target->state != TARGET_HALTED) {
1689 LOG_WARNING("target not halted");
1690 return ERROR_TARGET_NOT_HALTED;
1693 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1695 /* Mark register R0 as dirty, as it will be used
1696 * for transferring the data.
1697 * It will be restored automatically when exiting
1698 * debug mode
1700 reg = armv8_reg_current(arm, 1);
1701 reg->dirty = true;
1703 reg = armv8_reg_current(arm, 0);
1704 reg->dirty = true;
1706 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1708 /* The algorithm only copies 32 bit words, so the buffer
1709 * should be expanded to include the words at either end.
1710 * The first and last words will be read first to avoid
1711 * corruption if needed.
1713 tmp_buff = malloc(total_u32 * 4);
1715 if ((start_byte != 0) && (total_u32 > 1)) {
1716 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1717 * the other bytes in the word.
1719 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1720 if (retval != ERROR_OK)
1721 goto error_free_buff_w;
1724 /* If end of write is not aligned, or the write is less than 4 bytes */
1725 if ((end_byte != 0) ||
1726 ((total_u32 == 1) && (total_bytes != 4))) {
1728 /* Read the last word to avoid corruption during 32 bit write */
1729 int mem_offset = (total_u32-1) * 4;
1730 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1731 if (retval != ERROR_OK)
1732 goto error_free_buff_w;
1735 /* Copy the write buffer over the top of the temporary buffer */
1736 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1738 /* We now have a 32 bit aligned buffer that can be written */
1740 /* Read DSCR */
1741 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1742 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1743 if (retval != ERROR_OK)
1744 goto error_free_buff_w;
1746 /* Set Normal access mode */
1747 dscr = (dscr & ~DSCR_MA);
1748 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1749 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1751 if (arm->core_state == ARM_STATE_AARCH64) {
1752 /* Write X0 with value 'address' using write procedure */
1753 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1754 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1755 retval = dpm->instr_write_data_dcc_64(dpm,
1756 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1757 } else {
1758 /* Write R0 with value 'address' using write procedure */
1759 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1760 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1761 dpm->instr_write_data_dcc(dpm,
1762 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1765 /* Step 1.d - Change DCC to memory mode */
1766 dscr = dscr | DSCR_MA;
1767 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1768 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1769 if (retval != ERROR_OK)
1770 goto error_unset_dtr_w;
1773 /* Step 2.a - Do the write */
1774 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1775 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1776 if (retval != ERROR_OK)
1777 goto error_unset_dtr_w;
1779 /* Step 3.a - Switch DTR mode back to Normal mode */
1780 dscr = (dscr & ~DSCR_MA);
1781 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1782 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1783 if (retval != ERROR_OK)
1784 goto error_unset_dtr_w;
1786 /* Check for sticky abort flags in the DSCR */
1787 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1788 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1789 if (retval != ERROR_OK)
1790 goto error_free_buff_w;
1792 dpm->dscr = dscr;
1793 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1794 /* Abort occurred - clear it and exit */
1795 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1796 armv8_dpm_handle_exception(dpm);
1797 goto error_free_buff_w;
1800 /* Done */
1801 free(tmp_buff);
1802 return ERROR_OK;
1804 error_unset_dtr_w:
1805 /* Unset DTR mode */
1806 mem_ap_read_atomic_u32(armv8->debug_ap,
1807 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1808 dscr = (dscr & ~DSCR_MA);
1809 mem_ap_write_atomic_u32(armv8->debug_ap,
1810 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1811 error_free_buff_w:
1812 LOG_ERROR("error");
1813 free(tmp_buff);
1814 return ERROR_FAIL;
1817 static int aarch64_read_apb_ap_memory(struct target *target,
1818 target_addr_t address, uint32_t size,
1819 uint32_t count, uint8_t *buffer)
1821 /* read memory through APB-AP */
1822 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1823 struct armv8_common *armv8 = target_to_armv8(target);
1824 struct arm_dpm *dpm = &armv8->dpm;
1825 struct arm *arm = &armv8->arm;
1826 int total_bytes = count * size;
1827 int total_u32;
1828 int start_byte = address & 0x3;
1829 int end_byte = (address + total_bytes) & 0x3;
1830 struct reg *reg;
1831 uint32_t dscr;
1832 uint8_t *tmp_buff = NULL;
1833 uint8_t *u8buf_ptr;
1834 uint32_t value;
1836 if (target->state != TARGET_HALTED) {
1837 LOG_WARNING("target not halted");
1838 return ERROR_TARGET_NOT_HALTED;
1841 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1842 /* Mark register X0, X1 as dirty, as it will be used
1843 * for transferring the data.
1844 * It will be restored automatically when exiting
1845 * debug mode
1847 reg = armv8_reg_current(arm, 1);
1848 reg->dirty = true;
1850 reg = armv8_reg_current(arm, 0);
1851 reg->dirty = true;
1853 /* Read DSCR */
1854 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1855 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1857 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1859 /* Set Normal access mode */
1860 dscr = (dscr & ~DSCR_MA);
1861 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1862 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1864 if (arm->core_state == ARM_STATE_AARCH64) {
1865 /* Write X0 with value 'address' using write procedure */
1866 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1867 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1868 retval += dpm->instr_write_data_dcc_64(dpm,
1869 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1870 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1871 retval += dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1872 /* Step 1.e - Change DCC to memory mode */
1873 dscr = dscr | DSCR_MA;
1874 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1875 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1876 /* Step 1.f - read DBGDTRTX and discard the value */
1877 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1878 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1879 } else {
1880 /* Write R0 with value 'address' using write procedure */
1881 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1882 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1883 retval += dpm->instr_write_data_dcc(dpm,
1884 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1885 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1886 retval += dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1887 /* Step 1.e - Change DCC to memory mode */
1888 dscr = dscr | DSCR_MA;
1889 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1890 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1891 /* Step 1.f - read DBGDTRTX and discard the value */
1892 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1893 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1896 if (retval != ERROR_OK)
1897 goto error_unset_dtr_r;
1899 /* Optimize the read as much as we can, either way we read in a single pass */
1900 if ((start_byte) || (end_byte)) {
1901 /* The algorithm only copies 32 bit words, so the buffer
1902 * should be expanded to include the words at either end.
1903 * The first and last words will be read into a temp buffer
1904 * to avoid corruption
1906 tmp_buff = malloc(total_u32 * 4);
1907 if (!tmp_buff)
1908 goto error_unset_dtr_r;
1910 /* use the tmp buffer to read the entire data */
1911 u8buf_ptr = tmp_buff;
1912 } else
1913 /* address and read length are aligned so read directly into the passed buffer */
1914 u8buf_ptr = buffer;
1916 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1917 * Abort flags are sticky, so can be read at end of transactions
1919 * This data is read in aligned to 32 bit boundary.
1922 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1923 * increments X0 by 4. */
1924 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
1925 armv8->debug_base + CPUV8_DBG_DTRTX);
1926 if (retval != ERROR_OK)
1927 goto error_unset_dtr_r;
1929 /* Step 3.a - set DTR access mode back to Normal mode */
1930 dscr = (dscr & ~DSCR_MA);
1931 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1932 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1933 if (retval != ERROR_OK)
1934 goto error_free_buff_r;
1936 /* Step 3.b - read DBGDTRTX for the final value */
1937 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1938 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1939 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
1941 /* Check for sticky abort flags in the DSCR */
1942 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1943 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1944 if (retval != ERROR_OK)
1945 goto error_free_buff_r;
1947 dpm->dscr = dscr;
1949 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1950 /* Abort occurred - clear it and exit */
1951 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1952 armv8_dpm_handle_exception(dpm);
1953 goto error_free_buff_r;
1956 /* check if we need to copy aligned data by applying any shift necessary */
1957 if (tmp_buff) {
1958 memcpy(buffer, tmp_buff + start_byte, total_bytes);
1959 free(tmp_buff);
1962 /* Done */
1963 return ERROR_OK;
1965 error_unset_dtr_r:
1966 /* Unset DTR mode */
1967 mem_ap_read_atomic_u32(armv8->debug_ap,
1968 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1969 dscr = (dscr & ~DSCR_MA);
1970 mem_ap_write_atomic_u32(armv8->debug_ap,
1971 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1972 error_free_buff_r:
1973 LOG_ERROR("error");
1974 free(tmp_buff);
1975 return ERROR_FAIL;
1978 static int aarch64_read_phys_memory(struct target *target,
1979 target_addr_t address, uint32_t size,
1980 uint32_t count, uint8_t *buffer)
1982 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1984 if (count && buffer) {
1985 /* read memory through APB-AP */
1986 retval = aarch64_mmu_modify(target, 0);
1987 if (retval != ERROR_OK)
1988 return retval;
1989 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1991 return retval;
1994 static int aarch64_read_memory(struct target *target, target_addr_t address,
1995 uint32_t size, uint32_t count, uint8_t *buffer)
1997 int mmu_enabled = 0;
1998 int retval;
2000 /* determine if MMU was enabled on target stop */
2001 retval = aarch64_mmu(target, &mmu_enabled);
2002 if (retval != ERROR_OK)
2003 return retval;
2005 if (mmu_enabled) {
2006 /* enable MMU as we could have disabled it for phys access */
2007 retval = aarch64_mmu_modify(target, 1);
2008 if (retval != ERROR_OK)
2009 return retval;
2011 return aarch64_read_apb_ap_memory(target, address, size, count, buffer);
2014 static int aarch64_write_phys_memory(struct target *target,
2015 target_addr_t address, uint32_t size,
2016 uint32_t count, const uint8_t *buffer)
2018 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2020 if (count && buffer) {
2021 /* write memory through APB-AP */
2022 retval = aarch64_mmu_modify(target, 0);
2023 if (retval != ERROR_OK)
2024 return retval;
2025 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2028 return retval;
2031 static int aarch64_write_memory(struct target *target, target_addr_t address,
2032 uint32_t size, uint32_t count, const uint8_t *buffer)
2034 int mmu_enabled = 0;
2035 int retval;
2037 /* determine if MMU was enabled on target stop */
2038 retval = aarch64_mmu(target, &mmu_enabled);
2039 if (retval != ERROR_OK)
2040 return retval;
2042 if (mmu_enabled) {
2043 /* enable MMU as we could have disabled it for phys access */
2044 retval = aarch64_mmu_modify(target, 1);
2045 if (retval != ERROR_OK)
2046 return retval;
2048 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2051 static int aarch64_handle_target_request(void *priv)
2053 struct target *target = priv;
2054 struct armv8_common *armv8 = target_to_armv8(target);
2055 int retval;
2057 if (!target_was_examined(target))
2058 return ERROR_OK;
2059 if (!target->dbg_msg_enabled)
2060 return ERROR_OK;
2062 if (target->state == TARGET_RUNNING) {
2063 uint32_t request;
2064 uint32_t dscr;
2065 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2066 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2068 /* check if we have data */
2069 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2070 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2071 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2072 if (retval == ERROR_OK) {
2073 target_request(target, request);
2074 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2075 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2080 return ERROR_OK;
2083 static int aarch64_examine_first(struct target *target)
2085 struct aarch64_common *aarch64 = target_to_aarch64(target);
2086 struct armv8_common *armv8 = &aarch64->armv8_common;
2087 struct adiv5_dap *swjdp = armv8->arm.dap;
2088 uint32_t cti_base;
2089 int i;
2090 int retval = ERROR_OK;
2091 uint64_t debug, ttypr;
2092 uint32_t cpuid;
2093 uint32_t tmp0, tmp1;
2094 debug = ttypr = cpuid = 0;
2096 retval = dap_dp_init(swjdp);
2097 if (retval != ERROR_OK)
2098 return retval;
2100 /* Search for the APB-AB - it is needed for access to debug registers */
2101 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2102 if (retval != ERROR_OK) {
2103 LOG_ERROR("Could not find APB-AP for debug access");
2104 return retval;
2107 retval = mem_ap_init(armv8->debug_ap);
2108 if (retval != ERROR_OK) {
2109 LOG_ERROR("Could not initialize the APB-AP");
2110 return retval;
2113 armv8->debug_ap->memaccess_tck = 10;
2115 if (!target->dbgbase_set) {
2116 uint32_t dbgbase;
2117 /* Get ROM Table base */
2118 uint32_t apid;
2119 int32_t coreidx = target->coreid;
2120 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2121 if (retval != ERROR_OK)
2122 return retval;
2123 /* Lookup 0x15 -- Processor DAP */
2124 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2125 &armv8->debug_base, &coreidx);
2126 if (retval != ERROR_OK)
2127 return retval;
2128 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2129 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2130 } else
2131 armv8->debug_base = target->dbgbase;
2133 uint32_t prsr;
2134 int64_t then = timeval_ms();
2135 do {
2136 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2137 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
2138 if (retval == ERROR_OK) {
2139 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2140 armv8->debug_base + CPUV8_DBG_PRCR, PRCR_COREPURQ|PRCR_CORENPDRQ);
2141 if (retval != ERROR_OK) {
2142 LOG_DEBUG("write to PRCR failed");
2143 break;
2147 if (timeval_ms() > then + 1000) {
2148 retval = ERROR_TARGET_TIMEOUT;
2149 break;
2152 } while ((prsr & PRSR_PU) == 0);
2154 if (retval != ERROR_OK) {
2155 LOG_ERROR("target %s: failed to set power state of the core.", target_name(target));
2156 return retval;
2159 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2160 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2161 if (retval != ERROR_OK) {
2162 LOG_DEBUG("Examine %s failed", "oslock");
2163 return retval;
2166 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2167 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2168 if (retval != ERROR_OK) {
2169 LOG_DEBUG("Examine %s failed", "CPUID");
2170 return retval;
2173 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2174 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2175 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2176 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2177 if (retval != ERROR_OK) {
2178 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2179 return retval;
2181 ttypr |= tmp1;
2182 ttypr = (ttypr << 32) | tmp0;
2184 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2185 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
2186 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2187 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
2188 if (retval != ERROR_OK) {
2189 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2190 return retval;
2192 debug |= tmp1;
2193 debug = (debug << 32) | tmp0;
2195 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2196 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2197 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2199 if (target->ctibase == 0) {
2200 /* assume a v8 rom table layout */
2201 cti_base = armv8->debug_base + 0x10000;
2202 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, cti_base);
2203 } else
2204 cti_base = target->ctibase;
2206 armv8->cti = arm_cti_create(armv8->debug_ap, cti_base);
2207 if (armv8->cti == NULL)
2208 return ERROR_FAIL;
2210 retval = aarch64_dpm_setup(aarch64, debug);
2211 if (retval != ERROR_OK)
2212 return retval;
2214 /* Setup Breakpoint Register Pairs */
2215 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2216 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2217 aarch64->brp_num_available = aarch64->brp_num;
2218 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2219 for (i = 0; i < aarch64->brp_num; i++) {
2220 aarch64->brp_list[i].used = 0;
2221 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2222 aarch64->brp_list[i].type = BRP_NORMAL;
2223 else
2224 aarch64->brp_list[i].type = BRP_CONTEXT;
2225 aarch64->brp_list[i].value = 0;
2226 aarch64->brp_list[i].control = 0;
2227 aarch64->brp_list[i].BRPn = i;
2230 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2232 target->state = TARGET_RUNNING;
2233 target->debug_reason = DBG_REASON_NOTHALTED;
2235 target_set_examined(target);
2236 return ERROR_OK;
2239 static int aarch64_examine(struct target *target)
2241 int retval = ERROR_OK;
2243 /* don't re-probe hardware after each reset */
2244 if (!target_was_examined(target))
2245 retval = aarch64_examine_first(target);
2247 /* Configure core debug access */
2248 if (retval == ERROR_OK)
2249 retval = aarch64_init_debug_access(target);
2251 return retval;
2255 * Cortex-A8 target creation and initialization
2258 static int aarch64_init_target(struct command_context *cmd_ctx,
2259 struct target *target)
2261 /* examine_first() does a bunch of this */
2262 return ERROR_OK;
2265 static int aarch64_init_arch_info(struct target *target,
2266 struct aarch64_common *aarch64, struct jtag_tap *tap)
2268 struct armv8_common *armv8 = &aarch64->armv8_common;
2270 /* Setup struct aarch64_common */
2271 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2272 /* tap has no dap initialized */
2273 if (!tap->dap) {
2274 tap->dap = dap_init();
2275 tap->dap->tap = tap;
2277 armv8->arm.dap = tap->dap;
2279 /* register arch-specific functions */
2280 armv8->examine_debug_reason = NULL;
2281 armv8->post_debug_entry = aarch64_post_debug_entry;
2282 armv8->pre_restore_context = NULL;
2283 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2285 armv8_init_arch_info(target, armv8);
2286 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2288 return ERROR_OK;
2291 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2293 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2295 return aarch64_init_arch_info(target, aarch64, target->tap);
2298 static int aarch64_mmu(struct target *target, int *enabled)
2300 if (target->state != TARGET_HALTED) {
2301 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2302 return ERROR_TARGET_INVALID;
2305 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2306 return ERROR_OK;
2309 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2310 target_addr_t *phys)
2312 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2315 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2317 struct target *target = get_current_target(CMD_CTX);
2318 struct armv8_common *armv8 = target_to_armv8(target);
2320 return armv8_handle_cache_info_command(CMD_CTX,
2321 &armv8->armv8_mmu.armv8_cache);
2325 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2327 struct target *target = get_current_target(CMD_CTX);
2328 if (!target_was_examined(target)) {
2329 LOG_ERROR("target not examined yet");
2330 return ERROR_FAIL;
2333 return aarch64_init_debug_access(target);
2335 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2337 struct target *target = get_current_target(CMD_CTX);
2338 /* check target is an smp target */
2339 struct target_list *head;
2340 struct target *curr;
2341 head = target->head;
2342 target->smp = 0;
2343 if (head != (struct target_list *)NULL) {
2344 while (head != (struct target_list *)NULL) {
2345 curr = head->target;
2346 curr->smp = 0;
2347 head = head->next;
2349 /* fixes the target display to the debugger */
2350 target->gdb_service->target = target;
2352 return ERROR_OK;
2355 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2357 struct target *target = get_current_target(CMD_CTX);
2358 struct target_list *head;
2359 struct target *curr;
2360 head = target->head;
2361 if (head != (struct target_list *)NULL) {
2362 target->smp = 1;
2363 while (head != (struct target_list *)NULL) {
2364 curr = head->target;
2365 curr->smp = 1;
2366 head = head->next;
2369 return ERROR_OK;
2372 static const struct command_registration aarch64_exec_command_handlers[] = {
2374 .name = "cache_info",
2375 .handler = aarch64_handle_cache_info_command,
2376 .mode = COMMAND_EXEC,
2377 .help = "display information about target caches",
2378 .usage = "",
2381 .name = "dbginit",
2382 .handler = aarch64_handle_dbginit_command,
2383 .mode = COMMAND_EXEC,
2384 .help = "Initialize core debug",
2385 .usage = "",
2387 { .name = "smp_off",
2388 .handler = aarch64_handle_smp_off_command,
2389 .mode = COMMAND_EXEC,
2390 .help = "Stop smp handling",
2391 .usage = "",
2394 .name = "smp_on",
2395 .handler = aarch64_handle_smp_on_command,
2396 .mode = COMMAND_EXEC,
2397 .help = "Restart smp handling",
2398 .usage = "",
2401 COMMAND_REGISTRATION_DONE
2403 static const struct command_registration aarch64_command_handlers[] = {
2405 .chain = armv8_command_handlers,
2408 .name = "aarch64",
2409 .mode = COMMAND_ANY,
2410 .help = "Aarch64 command group",
2411 .usage = "",
2412 .chain = aarch64_exec_command_handlers,
2414 COMMAND_REGISTRATION_DONE
2417 struct target_type aarch64_target = {
2418 .name = "aarch64",
2420 .poll = aarch64_poll,
2421 .arch_state = armv8_arch_state,
2423 .halt = aarch64_halt,
2424 .resume = aarch64_resume,
2425 .step = aarch64_step,
2427 .assert_reset = aarch64_assert_reset,
2428 .deassert_reset = aarch64_deassert_reset,
2430 /* REVISIT allow exporting VFP3 registers ... */
2431 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2433 .read_memory = aarch64_read_memory,
2434 .write_memory = aarch64_write_memory,
2436 .add_breakpoint = aarch64_add_breakpoint,
2437 .add_context_breakpoint = aarch64_add_context_breakpoint,
2438 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2439 .remove_breakpoint = aarch64_remove_breakpoint,
2440 .add_watchpoint = NULL,
2441 .remove_watchpoint = NULL,
2443 .commands = aarch64_command_handlers,
2444 .target_create = aarch64_target_create,
2445 .init_target = aarch64_init_target,
2446 .examine = aarch64_examine,
2448 .read_phys_memory = aarch64_read_phys_memory,
2449 .write_phys_memory = aarch64_write_phys_memory,
2450 .mmu = aarch64_mmu,
2451 .virt2phys = aarch64_virt2phys,