target/xtensa: virtualize XDM registers
[openocd.git] / src / target / nds32_v3_common.c
blob50c758259275570bff84fbbd3a362ba7fae41c84
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 /***************************************************************************
4 * Copyright (C) 2013 Andes Technology *
5 * Hsiangkai Wang <hkwang@andestech.com> *
6 ***************************************************************************/
8 #ifdef HAVE_CONFIG_H
9 #include "config.h"
10 #endif
12 #include "breakpoints.h"
13 #include "nds32_reg.h"
14 #include "nds32_disassembler.h"
15 #include "nds32.h"
16 #include "nds32_aice.h"
17 #include "nds32_v3_common.h"
19 static struct nds32_v3_common_callback *v3_common_callback;
21 static int nds32_v3_register_mapping(struct nds32 *nds32, int reg_no)
23 if (reg_no == PC)
24 return IR11;
26 return reg_no;
29 static int nds32_v3_get_debug_reason(struct nds32 *nds32, uint32_t *reason)
31 uint32_t edmsw;
32 struct aice_port_s *aice = target_to_aice(nds32->target);
33 aice_read_debug_reg(aice, NDS_EDM_SR_EDMSW, &edmsw);
35 *reason = (edmsw >> 12) & 0x0F;
37 return ERROR_OK;
40 /**
41 * Save processor state. This is called after a HALT instruction
42 * succeeds, and on other occasions the processor enters debug mode
43 * (breakpoint, watchpoint, etc).
45 static int nds32_v3_debug_entry(struct nds32 *nds32, bool enable_watchpoint)
47 LOG_DEBUG("nds32_v3_debug_entry");
49 enum target_state backup_state = nds32->target->state;
50 nds32->target->state = TARGET_HALTED;
52 if (nds32->init_arch_info_after_halted == false) {
53 /* init architecture info according to config registers */
54 CHECK_RETVAL(nds32_config(nds32));
56 nds32->init_arch_info_after_halted = true;
59 /* REVISIT entire cache should already be invalid !!! */
60 register_cache_invalidate(nds32->core_cache);
62 /* deactivate all hardware breakpoints */
63 CHECK_RETVAL(v3_common_callback->deactivate_hardware_breakpoint(nds32->target));
65 if (enable_watchpoint)
66 CHECK_RETVAL(v3_common_callback->deactivate_hardware_watchpoint(nds32->target));
68 struct breakpoint *syscall_break = &(nds32->syscall_break);
69 if (nds32->virtual_hosting) {
70 if (syscall_break->is_set) {
71 /** disable virtual hosting */
73 /* remove breakpoint at syscall entry */
74 target_remove_breakpoint(nds32->target, syscall_break);
75 syscall_break->is_set = false;
77 uint32_t value_pc;
78 nds32_get_mapped_reg(nds32, PC, &value_pc);
79 if (value_pc == syscall_break->address)
80 /** process syscall for virtual hosting */
81 nds32->hit_syscall = true;
85 if (nds32_examine_debug_reason(nds32) != ERROR_OK) {
86 nds32->target->state = backup_state;
88 /* re-activate all hardware breakpoints & watchpoints */
89 CHECK_RETVAL(v3_common_callback->activate_hardware_breakpoint(nds32->target));
91 if (enable_watchpoint)
92 CHECK_RETVAL(v3_common_callback->activate_hardware_watchpoint(nds32->target));
94 return ERROR_FAIL;
97 /* Save registers. */
98 nds32_full_context(nds32);
100 /* check interrupt level */
101 v3_common_callback->check_interrupt_stack(nds32);
103 return ERROR_OK;
107 * Restore processor state.
109 static int nds32_v3_leave_debug_state(struct nds32 *nds32, bool enable_watchpoint)
111 LOG_DEBUG("nds32_v3_leave_debug_state");
113 struct target *target = nds32->target;
115 /* activate all hardware breakpoints */
116 CHECK_RETVAL(v3_common_callback->activate_hardware_breakpoint(target));
118 if (enable_watchpoint) {
119 /* activate all watchpoints */
120 CHECK_RETVAL(v3_common_callback->activate_hardware_watchpoint(target));
123 /* restore interrupt stack */
124 v3_common_callback->restore_interrupt_stack(nds32);
126 /* REVISIT once we start caring about MMU and cache state,
127 * address it here ...
130 /* restore PSW, PC, and R0 ... after flushing any modified
131 * registers.
133 CHECK_RETVAL(nds32_restore_context(target));
135 if (nds32->virtual_hosting) {
136 /** enable virtual hosting */
137 uint32_t value_ir3;
138 uint32_t entry_size;
139 uint32_t syscall_address;
141 /* get syscall entry address */
142 nds32_get_mapped_reg(nds32, IR3, &value_ir3);
143 entry_size = 0x4 << (((value_ir3 >> 14) & 0x3) << 1);
144 syscall_address = (value_ir3 & 0xFFFF0000) + entry_size * 8; /* The index of SYSCALL is 8 */
146 if (nds32->hit_syscall) {
147 /* single step to skip syscall entry */
148 /* use IRET to skip syscall */
149 struct aice_port_s *aice = target_to_aice(target);
150 uint32_t value_ir9;
151 uint32_t value_ir6;
152 uint32_t syscall_id;
154 nds32_get_mapped_reg(nds32, IR6, &value_ir6);
155 syscall_id = (value_ir6 >> 16) & 0x7FFF;
157 if (syscall_id == NDS32_SYSCALL_EXIT) {
158 /* If target hits exit syscall, do not use IRET to skip handler. */
159 aice_step(aice);
160 } else {
161 /* use api->read/write_reg to skip nds32 register cache */
162 uint32_t value_dimbr;
163 aice_read_debug_reg(aice, NDS_EDM_SR_DIMBR, &value_dimbr);
164 aice_write_register(aice, IR11, value_dimbr + 0xC);
166 aice_read_register(aice, IR9, &value_ir9);
167 value_ir9 += 4; /* syscall is always 4 bytes */
168 aice_write_register(aice, IR9, value_ir9);
170 /* backup hardware breakpoint 0 */
171 uint32_t backup_bpa, backup_bpam, backup_bpc;
172 aice_read_debug_reg(aice, NDS_EDM_SR_BPA0, &backup_bpa);
173 aice_read_debug_reg(aice, NDS_EDM_SR_BPAM0, &backup_bpam);
174 aice_read_debug_reg(aice, NDS_EDM_SR_BPC0, &backup_bpc);
176 /* use hardware breakpoint 0 to stop cpu after skipping syscall */
177 aice_write_debug_reg(aice, NDS_EDM_SR_BPA0, value_ir9);
178 aice_write_debug_reg(aice, NDS_EDM_SR_BPAM0, 0);
179 aice_write_debug_reg(aice, NDS_EDM_SR_BPC0, 0xA);
181 /* Execute two IRET.
182 * First IRET is used to quit debug mode.
183 * Second IRET is used to quit current syscall. */
184 uint32_t dim_inst[4] = {NOP, NOP, IRET, IRET};
185 aice_execute(aice, dim_inst, 4);
187 /* restore origin hardware breakpoint 0 */
188 aice_write_debug_reg(aice, NDS_EDM_SR_BPA0, backup_bpa);
189 aice_write_debug_reg(aice, NDS_EDM_SR_BPAM0, backup_bpam);
190 aice_write_debug_reg(aice, NDS_EDM_SR_BPC0, backup_bpc);
193 nds32->hit_syscall = false;
196 /* insert breakpoint at syscall entry */
197 struct breakpoint *syscall_break = &(nds32->syscall_break);
199 syscall_break->address = syscall_address;
200 syscall_break->type = BKPT_SOFT;
201 syscall_break->is_set = true;
202 target_add_breakpoint(target, syscall_break);
205 return ERROR_OK;
208 static int nds32_v3_get_exception_address(struct nds32 *nds32,
209 uint32_t *address, uint32_t reason)
211 LOG_DEBUG("nds32_v3_get_exception_address");
213 struct aice_port_s *aice = target_to_aice(nds32->target);
214 struct target *target = nds32->target;
215 uint32_t edmsw;
216 uint32_t edm_cfg;
217 uint32_t match_bits;
218 uint32_t match_count;
219 int32_t i;
220 static int32_t number_of_hard_break;
221 uint32_t bp_control;
223 if (number_of_hard_break == 0) {
224 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CFG, &edm_cfg);
225 number_of_hard_break = (edm_cfg & 0x7) + 1;
228 aice_read_debug_reg(aice, NDS_EDM_SR_EDMSW, &edmsw);
229 /* clear matching bits (write-one-clear) */
230 aice_write_debug_reg(aice, NDS_EDM_SR_EDMSW, edmsw);
231 match_bits = (edmsw >> 4) & 0xFF;
232 match_count = 0;
233 for (i = 0 ; i < number_of_hard_break ; i++) {
234 if (match_bits & (1 << i)) {
235 aice_read_debug_reg(aice, NDS_EDM_SR_BPA0 + i, address);
236 match_count++;
238 /* If target hits multiple read/access watchpoint,
239 * select the first one. */
240 aice_read_debug_reg(aice, NDS_EDM_SR_BPC0 + i, &bp_control);
241 if (0x3 == (bp_control & 0x3)) {
242 match_count = 1;
243 break;
248 if (match_count > 1) { /* multiple hits */
249 *address = 0;
250 return ERROR_OK;
251 } else if (match_count == 1) {
252 uint32_t val_pc;
253 uint32_t opcode;
254 struct nds32_instruction instruction;
255 struct watchpoint *wp;
256 bool hit;
258 nds32_get_mapped_reg(nds32, PC, &val_pc);
260 if ((reason == NDS32_DEBUG_DATA_ADDR_WATCHPOINT_NEXT_PRECISE) ||
261 (reason == NDS32_DEBUG_DATA_VALUE_WATCHPOINT_NEXT_PRECISE)) {
262 if (edmsw & 0x4) /* check EDMSW.IS_16BIT */
263 val_pc -= 2;
264 else
265 val_pc -= 4;
268 nds32_read_opcode(nds32, val_pc, &opcode);
269 nds32_evaluate_opcode(nds32, opcode, val_pc, &instruction);
271 LOG_DEBUG("PC: 0x%08" PRIx32 ", access start: 0x%08" PRIx32 ", end: 0x%08" PRIx32,
272 val_pc, instruction.access_start, instruction.access_end);
274 /* check if multiple hits in the access range */
275 uint32_t in_range_watch_count = 0;
276 for (wp = target->watchpoints; wp; wp = wp->next) {
277 if ((instruction.access_start <= wp->address) &&
278 (wp->address < instruction.access_end))
279 in_range_watch_count++;
281 if (in_range_watch_count > 1) {
282 /* Hit LSMW instruction. */
283 *address = 0;
284 return ERROR_OK;
287 /* dispel false match */
288 hit = false;
289 for (wp = target->watchpoints; wp; wp = wp->next) {
290 if (((*address ^ wp->address) & (~wp->mask)) == 0) {
291 uint32_t watch_start;
292 uint32_t watch_end;
294 watch_start = wp->address;
295 watch_end = wp->address + wp->length;
297 if ((watch_end <= instruction.access_start) ||
298 (instruction.access_end <= watch_start))
299 continue;
301 hit = true;
302 break;
306 if (hit)
307 return ERROR_OK;
308 else
309 return ERROR_FAIL;
310 } else if (match_count == 0) {
311 /* global stop is precise exception */
312 if ((reason == NDS32_DEBUG_LOAD_STORE_GLOBAL_STOP) && nds32->global_stop) {
313 /* parse instruction to get correct access address */
314 uint32_t val_pc;
315 uint32_t opcode;
316 struct nds32_instruction instruction;
318 nds32_get_mapped_reg(nds32, PC, &val_pc);
319 nds32_read_opcode(nds32, val_pc, &opcode);
320 nds32_evaluate_opcode(nds32, opcode, val_pc, &instruction);
322 *address = instruction.access_start;
324 return ERROR_OK;
328 *address = 0xFFFFFFFF;
329 return ERROR_FAIL;
332 void nds32_v3_common_register_callback(struct nds32_v3_common_callback *callback)
334 v3_common_callback = callback;
337 /** target_type functions: */
338 /* target request support */
339 int nds32_v3_target_request_data(struct target *target,
340 uint32_t size, uint8_t *buffer)
342 /* AndesCore could use DTR register to communicate with OpenOCD
343 * to output messages
344 * Target data will be put in buffer
345 * The format of DTR is as follow
346 * DTR[31:16] => length, DTR[15:8] => size, DTR[7:0] => target_req_cmd
347 * target_req_cmd has three possible values:
348 * TARGET_REQ_TRACEMSG
349 * TARGET_REQ_DEBUGMSG
350 * TARGET_REQ_DEBUGCHAR
351 * if size == 0, target will call target_asciimsg(),
352 * else call target_hexmsg()
354 LOG_WARNING("Not implemented: %s", __func__);
356 return ERROR_OK;
359 int nds32_v3_checksum_memory(struct target *target,
360 target_addr_t address, uint32_t count, uint32_t *checksum)
362 LOG_WARNING("Not implemented: %s", __func__);
364 return ERROR_FAIL;
368 * find out which watchpoint hits
369 * get exception address and compare the address to watchpoints
371 int nds32_v3_hit_watchpoint(struct target *target,
372 struct watchpoint **hit_watchpoint)
374 static struct watchpoint scan_all_watchpoint;
376 uint32_t exception_address;
377 struct watchpoint *wp;
378 struct nds32 *nds32 = target_to_nds32(target);
380 exception_address = nds32->watched_address;
382 if (exception_address == 0xFFFFFFFF)
383 return ERROR_FAIL;
385 if (exception_address == 0) {
386 scan_all_watchpoint.address = 0;
387 scan_all_watchpoint.rw = WPT_WRITE;
388 scan_all_watchpoint.next = 0;
389 scan_all_watchpoint.unique_id = 0x5CA8;
391 *hit_watchpoint = &scan_all_watchpoint;
392 return ERROR_OK;
395 for (wp = target->watchpoints; wp; wp = wp->next) {
396 if (((exception_address ^ wp->address) & (~wp->mask)) == 0) {
397 *hit_watchpoint = wp;
399 return ERROR_OK;
403 return ERROR_FAIL;
406 int nds32_v3_target_create_common(struct target *target, struct nds32 *nds32)
408 nds32->register_map = nds32_v3_register_mapping;
409 nds32->get_debug_reason = nds32_v3_get_debug_reason;
410 nds32->enter_debug_state = nds32_v3_debug_entry;
411 nds32->leave_debug_state = nds32_v3_leave_debug_state;
412 nds32->get_watched_address = nds32_v3_get_exception_address;
414 /* Init target->arch_info in nds32_init_arch_info().
415 * After this, user could use target_to_nds32() to get nds32 object */
416 nds32_init_arch_info(target, nds32);
418 return ERROR_OK;
421 int nds32_v3_run_algorithm(struct target *target,
422 int num_mem_params,
423 struct mem_param *mem_params,
424 int num_reg_params,
425 struct reg_param *reg_params,
426 target_addr_t entry_point,
427 target_addr_t exit_point,
428 int timeout_ms,
429 void *arch_info)
431 LOG_WARNING("Not implemented: %s", __func__);
433 return ERROR_FAIL;
436 int nds32_v3_read_buffer(struct target *target, target_addr_t address,
437 uint32_t size, uint8_t *buffer)
439 struct nds32 *nds32 = target_to_nds32(target);
440 struct nds32_memory *memory = &(nds32->memory);
442 if ((memory->access_channel == NDS_MEMORY_ACC_CPU) &&
443 (target->state != TARGET_HALTED)) {
444 LOG_WARNING("target was not halted");
445 return ERROR_TARGET_NOT_HALTED;
448 target_addr_t physical_address;
449 /* BUG: If access range crosses multiple pages, the translation will not correct
450 * for second page or so. */
452 /* When DEX is set to one, hardware will enforce the following behavior without
453 * modifying the corresponding control bits in PSW.
455 * Disable all interrupts
456 * Become superuser mode
457 * Turn off IT/DT
458 * Use MMU_CFG.DE as the data access endian
459 * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
460 * Disable audio special features
461 * Disable inline function call
463 * Because hardware will turn off IT/DT by default, it MUST translate virtual address
464 * to physical address.
466 if (target->type->virt2phys(target, address, &physical_address) == ERROR_OK)
467 address = physical_address;
468 else
469 return ERROR_FAIL;
471 int result;
472 struct aice_port_s *aice = target_to_aice(target);
473 /* give arbitrary initial value to avoid warning messages */
474 enum nds_memory_access origin_access_channel = NDS_MEMORY_ACC_CPU;
476 if (nds32->hit_syscall) {
477 /* Use bus mode to access memory during virtual hosting */
478 origin_access_channel = memory->access_channel;
479 memory->access_channel = NDS_MEMORY_ACC_BUS;
480 aice_memory_access(aice, NDS_MEMORY_ACC_BUS);
483 result = nds32_read_buffer(target, address, size, buffer);
485 if (nds32->hit_syscall) {
486 /* Restore access_channel after virtual hosting */
487 memory->access_channel = origin_access_channel;
488 aice_memory_access(aice, origin_access_channel);
491 return result;
494 int nds32_v3_write_buffer(struct target *target, target_addr_t address,
495 uint32_t size, const uint8_t *buffer)
497 struct nds32 *nds32 = target_to_nds32(target);
498 struct nds32_memory *memory = &(nds32->memory);
500 if ((memory->access_channel == NDS_MEMORY_ACC_CPU) &&
501 (target->state != TARGET_HALTED)) {
502 LOG_WARNING("target was not halted");
503 return ERROR_TARGET_NOT_HALTED;
506 target_addr_t physical_address;
507 /* BUG: If access range crosses multiple pages, the translation will not correct
508 * for second page or so. */
510 /* When DEX is set to one, hardware will enforce the following behavior without
511 * modifying the corresponding control bits in PSW.
513 * Disable all interrupts
514 * Become superuser mode
515 * Turn off IT/DT
516 * Use MMU_CFG.DE as the data access endian
517 * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
518 * Disable audio special features
519 * Disable inline function call
521 * Because hardware will turn off IT/DT by default, it MUST translate virtual address
522 * to physical address.
524 if (target->type->virt2phys(target, address, &physical_address) == ERROR_OK)
525 address = physical_address;
526 else
527 return ERROR_FAIL;
529 if (nds32->hit_syscall) {
530 struct aice_port_s *aice = target_to_aice(target);
531 enum nds_memory_access origin_access_channel;
532 origin_access_channel = memory->access_channel;
534 /* If target has no cache, use BUS mode to access memory. */
535 if ((memory->dcache.line_size == 0)
536 || (memory->dcache.enable == false)) {
537 /* There is no Dcache or Dcache is disabled. */
538 memory->access_channel = NDS_MEMORY_ACC_BUS;
539 aice_memory_access(aice, NDS_MEMORY_ACC_BUS);
542 int result;
543 result = nds32_gdb_fileio_write_memory(nds32, address, size, buffer);
545 if (origin_access_channel == NDS_MEMORY_ACC_CPU) {
546 memory->access_channel = NDS_MEMORY_ACC_CPU;
547 aice_memory_access(aice, NDS_MEMORY_ACC_CPU);
550 return result;
553 return nds32_write_buffer(target, address, size, buffer);
556 int nds32_v3_read_memory(struct target *target, target_addr_t address,
557 uint32_t size, uint32_t count, uint8_t *buffer)
559 struct nds32 *nds32 = target_to_nds32(target);
560 struct nds32_memory *memory = &(nds32->memory);
562 if ((memory->access_channel == NDS_MEMORY_ACC_CPU) &&
563 (target->state != TARGET_HALTED)) {
564 LOG_WARNING("target was not halted");
565 return ERROR_TARGET_NOT_HALTED;
568 target_addr_t physical_address;
569 /* BUG: If access range crosses multiple pages, the translation will not correct
570 * for second page or so. */
572 /* When DEX is set to one, hardware will enforce the following behavior without
573 * modifying the corresponding control bits in PSW.
575 * Disable all interrupts
576 * Become superuser mode
577 * Turn off IT/DT
578 * Use MMU_CFG.DE as the data access endian
579 * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
580 * Disable audio special features
581 * Disable inline function call
583 * Because hardware will turn off IT/DT by default, it MUST translate virtual address
584 * to physical address.
586 if (target->type->virt2phys(target, address, &physical_address) == ERROR_OK)
587 address = physical_address;
588 else
589 return ERROR_FAIL;
591 struct aice_port_s *aice = target_to_aice(target);
592 /* give arbitrary initial value to avoid warning messages */
593 enum nds_memory_access origin_access_channel = NDS_MEMORY_ACC_CPU;
594 int result;
596 if (nds32->hit_syscall) {
597 /* Use bus mode to access memory during virtual hosting */
598 origin_access_channel = memory->access_channel;
599 memory->access_channel = NDS_MEMORY_ACC_BUS;
600 aice_memory_access(aice, NDS_MEMORY_ACC_BUS);
603 result = nds32_read_memory(target, address, size, count, buffer);
605 if (nds32->hit_syscall) {
606 /* Restore access_channel after virtual hosting */
607 memory->access_channel = origin_access_channel;
608 aice_memory_access(aice, origin_access_channel);
611 return result;
614 int nds32_v3_write_memory(struct target *target, target_addr_t address,
615 uint32_t size, uint32_t count, const uint8_t *buffer)
617 struct nds32 *nds32 = target_to_nds32(target);
618 struct nds32_memory *memory = &(nds32->memory);
620 if ((memory->access_channel == NDS_MEMORY_ACC_CPU) &&
621 (target->state != TARGET_HALTED)) {
622 LOG_WARNING("target was not halted");
623 return ERROR_TARGET_NOT_HALTED;
626 target_addr_t physical_address;
627 /* BUG: If access range crosses multiple pages, the translation will not correct
628 * for second page or so. */
630 /* When DEX is set to one, hardware will enforce the following behavior without
631 * modifying the corresponding control bits in PSW.
633 * Disable all interrupts
634 * Become superuser mode
635 * Turn off IT/DT
636 * Use MMU_CFG.DE as the data access endian
637 * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
638 * Disable audio special features
639 * Disable inline function call
641 * Because hardware will turn off IT/DT by default, it MUST translate virtual address
642 * to physical address.
644 if (target->type->virt2phys(target, address, &physical_address) == ERROR_OK)
645 address = physical_address;
646 else
647 return ERROR_FAIL;
649 return nds32_write_memory(target, address, size, count, buffer);
652 int nds32_v3_init_target(struct command_context *cmd_ctx,
653 struct target *target)
655 /* Initialize anything we can set up without talking to the target */
656 struct nds32 *nds32 = target_to_nds32(target);
658 nds32_init(nds32);
660 target->fileio_info = malloc(sizeof(struct gdb_fileio_info));
661 target->fileio_info->identifier = NULL;
663 return ERROR_OK;