1 /***************************************************************************
2 * Copyright (C) 2013 Andes Technology *
3 * Hsiangkai Wang <hkwang@andestech.com> *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
19 ***************************************************************************/
25 #include "breakpoints.h"
26 #include "nds32_reg.h"
27 #include "nds32_disassembler.h"
29 #include "nds32_aice.h"
30 #include "nds32_v3_common.h"
32 static struct breakpoint syscall_breakpoint
= {
44 static struct nds32_v3_common_callback
*v3_common_callback
;
46 static int nds32_v3_register_mapping(struct nds32
*nds32
, int reg_no
)
54 static int nds32_v3_get_debug_reason(struct nds32
*nds32
, uint32_t *reason
)
57 struct aice_port_s
*aice
= target_to_aice(nds32
->target
);
58 aice_read_debug_reg(aice
, NDS_EDM_SR_EDMSW
, &edmsw
);
60 *reason
= (edmsw
>> 12) & 0x0F;
66 * Save processor state. This is called after a HALT instruction
67 * succeeds, and on other occasions the processor enters debug mode
68 * (breakpoint, watchpoint, etc).
70 static int nds32_v3_debug_entry(struct nds32
*nds32
, bool enable_watchpoint
)
72 LOG_DEBUG("nds32_v3_debug_entry");
74 enum target_state backup_state
= nds32
->target
->state
;
75 nds32
->target
->state
= TARGET_HALTED
;
77 if (nds32
->init_arch_info_after_halted
== false) {
78 /* init architecture info according to config registers */
79 CHECK_RETVAL(nds32_config(nds32
));
81 nds32
->init_arch_info_after_halted
= true;
84 /* REVISIT entire cache should already be invalid !!! */
85 register_cache_invalidate(nds32
->core_cache
);
87 /* deactivate all hardware breakpoints */
88 CHECK_RETVAL(v3_common_callback
->deactivate_hardware_breakpoint(nds32
->target
));
90 if (enable_watchpoint
)
91 CHECK_RETVAL(v3_common_callback
->deactivate_hardware_watchpoint(nds32
->target
));
93 if (nds32
->virtual_hosting
) {
94 if (syscall_breakpoint
.set
) {
95 /** disable virtual hosting */
97 /* remove breakpoint at syscall entry */
98 target_remove_breakpoint(nds32
->target
, &syscall_breakpoint
);
99 syscall_breakpoint
.set
= 0;
102 nds32_get_mapped_reg(nds32
, PC
, &value_pc
);
103 if (value_pc
== syscall_breakpoint
.address
)
104 /** process syscall for virtual hosting */
105 nds32
->hit_syscall
= true;
109 if (ERROR_OK
!= nds32_examine_debug_reason(nds32
)) {
110 nds32
->target
->state
= backup_state
;
112 /* re-activate all hardware breakpoints & watchpoints */
113 CHECK_RETVAL(v3_common_callback
->activate_hardware_breakpoint(nds32
->target
));
115 if (enable_watchpoint
)
116 CHECK_RETVAL(v3_common_callback
->activate_hardware_watchpoint(nds32
->target
));
121 /* Save registers. */
122 nds32_full_context(nds32
);
124 /* check interrupt level */
125 v3_common_callback
->check_interrupt_stack(nds32
);
131 * Restore processor state.
133 static int nds32_v3_leave_debug_state(struct nds32
*nds32
, bool enable_watchpoint
)
135 LOG_DEBUG("nds32_v3_leave_debug_state");
137 struct target
*target
= nds32
->target
;
139 /* activate all hardware breakpoints */
140 CHECK_RETVAL(v3_common_callback
->activate_hardware_breakpoint(target
));
142 if (enable_watchpoint
) {
143 /* activate all watchpoints */
144 CHECK_RETVAL(v3_common_callback
->activate_hardware_watchpoint(target
));
147 /* restore interrupt stack */
148 v3_common_callback
->restore_interrupt_stack(nds32
);
150 /* REVISIT once we start caring about MMU and cache state,
151 * address it here ...
154 /* restore PSW, PC, and R0 ... after flushing any modified
157 CHECK_RETVAL(nds32_restore_context(target
));
159 if (nds32
->virtual_hosting
) {
160 /** enable virtual hosting */
163 uint32_t syscall_address
;
165 /* get syscall entry address */
166 nds32_get_mapped_reg(nds32
, IR3
, &value_ir3
);
167 entry_size
= 0x4 << (((value_ir3
>> 14) & 0x3) << 1);
168 syscall_address
= (value_ir3
& 0xFFFF0000) + entry_size
* 8; /* The index of SYSCALL is 8 */
170 if (nds32
->hit_syscall
) {
171 /* single step to skip syscall entry */
172 /* use IRET to skip syscall */
173 struct aice_port_s
*aice
= target_to_aice(target
);
178 nds32_get_mapped_reg(nds32
, IR6
, &value_ir6
);
179 syscall_id
= (value_ir6
>> 16) & 0x7FFF;
181 if (syscall_id
== NDS32_SYSCALL_EXIT
) {
182 /* If target hits exit syscall, do not use IRET to skip handler. */
185 /* use api->read/write_reg to skip nds32 register cache */
186 uint32_t value_dimbr
;
187 aice_read_debug_reg(aice
, NDS_EDM_SR_DIMBR
, &value_dimbr
);
188 aice_write_register(aice
, IR11
, value_dimbr
+ 0xC);
190 aice_read_register(aice
, IR9
, &value_ir9
);
191 value_ir9
+= 4; /* syscall is always 4 bytes */
192 aice_write_register(aice
, IR9
, value_ir9
);
194 /* backup hardware breakpoint 0 */
195 uint32_t backup_bpa
, backup_bpam
, backup_bpc
;
196 aice_read_debug_reg(aice
, NDS_EDM_SR_BPA0
, &backup_bpa
);
197 aice_read_debug_reg(aice
, NDS_EDM_SR_BPAM0
, &backup_bpam
);
198 aice_read_debug_reg(aice
, NDS_EDM_SR_BPC0
, &backup_bpc
);
200 /* use hardware breakpoint 0 to stop cpu after skipping syscall */
201 aice_write_debug_reg(aice
, NDS_EDM_SR_BPA0
, value_ir9
);
202 aice_write_debug_reg(aice
, NDS_EDM_SR_BPAM0
, 0);
203 aice_write_debug_reg(aice
, NDS_EDM_SR_BPC0
, 0xA);
206 * First IRET is used to quit debug mode.
207 * Second IRET is used to quit current syscall. */
208 uint32_t dim_inst
[4] = {NOP
, NOP
, IRET
, IRET
};
209 aice_execute(aice
, dim_inst
, 4);
211 /* restore origin hardware breakpoint 0 */
212 aice_write_debug_reg(aice
, NDS_EDM_SR_BPA0
, backup_bpa
);
213 aice_write_debug_reg(aice
, NDS_EDM_SR_BPAM0
, backup_bpam
);
214 aice_write_debug_reg(aice
, NDS_EDM_SR_BPC0
, backup_bpc
);
217 nds32
->hit_syscall
= false;
220 /* insert breakpoint at syscall entry */
221 syscall_breakpoint
.address
= syscall_address
;
222 syscall_breakpoint
.type
= BKPT_SOFT
;
223 syscall_breakpoint
.set
= 1;
224 target_add_breakpoint(target
, &syscall_breakpoint
);
230 static int nds32_v3_get_exception_address(struct nds32
*nds32
,
231 uint32_t *address
, uint32_t reason
)
233 LOG_DEBUG("nds32_v3_get_exception_address");
235 struct aice_port_s
*aice
= target_to_aice(nds32
->target
);
236 struct target
*target
= nds32
->target
;
240 uint32_t match_count
;
242 static int32_t number_of_hard_break
;
245 if (number_of_hard_break
== 0) {
246 aice_read_debug_reg(aice
, NDS_EDM_SR_EDM_CFG
, &edm_cfg
);
247 number_of_hard_break
= (edm_cfg
& 0x7) + 1;
250 aice_read_debug_reg(aice
, NDS_EDM_SR_EDMSW
, &edmsw
);
251 /* clear matching bits (write-one-clear) */
252 aice_write_debug_reg(aice
, NDS_EDM_SR_EDMSW
, edmsw
);
253 match_bits
= (edmsw
>> 4) & 0xFF;
255 for (i
= 0 ; i
< number_of_hard_break
; i
++) {
256 if (match_bits
& (1 << i
)) {
257 aice_read_debug_reg(aice
, NDS_EDM_SR_BPA0
+ i
, address
);
260 /* If target hits multiple read/access watchpoint,
261 * select the first one. */
262 aice_read_debug_reg(aice
, NDS_EDM_SR_BPC0
+ i
, &bp_control
);
263 if (0x3 == (bp_control
& 0x3)) {
270 if (match_count
> 1) { /* multiple hits */
273 } else if (match_count
== 1) {
276 struct nds32_instruction instruction
;
277 struct watchpoint
*wp
;
280 nds32_get_mapped_reg(nds32
, PC
, &val_pc
);
282 if ((NDS32_DEBUG_DATA_ADDR_WATCHPOINT_NEXT_PRECISE
== reason
) ||
283 (NDS32_DEBUG_DATA_VALUE_WATCHPOINT_NEXT_PRECISE
== reason
)) {
284 if (edmsw
& 0x4) /* check EDMSW.IS_16BIT */
290 nds32_read_opcode(nds32
, val_pc
, &opcode
);
291 nds32_evaluate_opcode(nds32
, opcode
, val_pc
, &instruction
);
293 LOG_DEBUG("PC: 0x%08x, access start: 0x%08x, end: 0x%08x", val_pc
,
294 instruction
.access_start
, instruction
.access_end
);
296 /* check if multiple hits in the access range */
297 uint32_t in_range_watch_count
= 0;
298 for (wp
= target
->watchpoints
; wp
; wp
= wp
->next
) {
299 if ((instruction
.access_start
<= wp
->address
) &&
300 (wp
->address
< instruction
.access_end
))
301 in_range_watch_count
++;
303 if (in_range_watch_count
> 1) {
304 /* Hit LSMW instruction. */
309 /* dispel false match */
311 for (wp
= target
->watchpoints
; wp
; wp
= wp
->next
) {
312 if (((*address
^ wp
->address
) & (~wp
->mask
)) == 0) {
313 uint32_t watch_start
;
316 watch_start
= wp
->address
;
317 watch_end
= wp
->address
+ wp
->length
;
319 if ((watch_end
<= instruction
.access_start
) ||
320 (instruction
.access_end
<= watch_start
))
332 } else if (match_count
== 0) {
333 /* global stop is precise exception */
334 if ((NDS32_DEBUG_LOAD_STORE_GLOBAL_STOP
== reason
) && nds32
->global_stop
) {
335 /* parse instruction to get correct access address */
338 struct nds32_instruction instruction
;
340 nds32_get_mapped_reg(nds32
, PC
, &val_pc
);
341 nds32_read_opcode(nds32
, val_pc
, &opcode
);
342 nds32_evaluate_opcode(nds32
, opcode
, val_pc
, &instruction
);
344 *address
= instruction
.access_start
;
350 *address
= 0xFFFFFFFF;
354 void nds32_v3_common_register_callback(struct nds32_v3_common_callback
*callback
)
356 v3_common_callback
= callback
;
359 /** target_type functions: */
360 /* target request support */
361 int nds32_v3_target_request_data(struct target
*target
,
362 uint32_t size
, uint8_t *buffer
)
364 /* AndesCore could use DTR register to communicate with OpenOCD
366 * Target data will be put in buffer
367 * The format of DTR is as follow
368 * DTR[31:16] => length, DTR[15:8] => size, DTR[7:0] => target_req_cmd
369 * target_req_cmd has three possible values:
370 * TARGET_REQ_TRACEMSG
371 * TARGET_REQ_DEBUGMSG
372 * TARGET_REQ_DEBUGCHAR
373 * if size == 0, target will call target_asciimsg(),
374 * else call target_hexmsg()
376 LOG_WARNING("Not implemented: %s", __func__
);
381 int nds32_v3_soft_reset_halt(struct target
*target
)
383 struct aice_port_s
*aice
= target_to_aice(target
);
384 return aice_assert_srst(aice
, AICE_RESET_HOLD
);
387 int nds32_v3_checksum_memory(struct target
*target
,
388 uint32_t address
, uint32_t count
, uint32_t *checksum
)
390 LOG_WARNING("Not implemented: %s", __func__
);
396 * find out which watchpoint hits
397 * get exception address and compare the address to watchpoints
399 int nds32_v3_hit_watchpoint(struct target
*target
,
400 struct watchpoint
**hit_watchpoint
)
402 static struct watchpoint scan_all_watchpoint
;
404 uint32_t exception_address
;
405 struct watchpoint
*wp
;
406 struct nds32
*nds32
= target_to_nds32(target
);
408 exception_address
= nds32
->watched_address
;
410 if (exception_address
== 0xFFFFFFFF)
413 if (exception_address
== 0) {
414 scan_all_watchpoint
.address
= 0;
415 scan_all_watchpoint
.rw
= WPT_WRITE
;
416 scan_all_watchpoint
.next
= 0;
417 scan_all_watchpoint
.unique_id
= 0x5CA8;
419 *hit_watchpoint
= &scan_all_watchpoint
;
423 for (wp
= target
->watchpoints
; wp
; wp
= wp
->next
) {
424 if (((exception_address
^ wp
->address
) & (~wp
->mask
)) == 0) {
425 *hit_watchpoint
= wp
;
434 int nds32_v3_target_create_common(struct target
*target
, struct nds32
*nds32
)
436 nds32
->register_map
= nds32_v3_register_mapping
;
437 nds32
->get_debug_reason
= nds32_v3_get_debug_reason
;
438 nds32
->enter_debug_state
= nds32_v3_debug_entry
;
439 nds32
->leave_debug_state
= nds32_v3_leave_debug_state
;
440 nds32
->get_watched_address
= nds32_v3_get_exception_address
;
442 /* Init target->arch_info in nds32_init_arch_info().
443 * After this, user could use target_to_nds32() to get nds32 object */
444 nds32_init_arch_info(target
, nds32
);
449 int nds32_v3_run_algorithm(struct target
*target
,
451 struct mem_param
*mem_params
,
453 struct reg_param
*reg_params
,
454 uint32_t entry_point
,
459 LOG_WARNING("Not implemented: %s", __func__
);
464 int nds32_v3_read_buffer(struct target
*target
, uint32_t address
,
465 uint32_t size
, uint8_t *buffer
)
467 struct nds32
*nds32
= target_to_nds32(target
);
468 struct nds32_memory
*memory
= &(nds32
->memory
);
470 if ((NDS_MEMORY_ACC_CPU
== memory
->access_channel
) &&
471 (target
->state
!= TARGET_HALTED
)) {
472 LOG_WARNING("target was not halted");
473 return ERROR_TARGET_NOT_HALTED
;
476 uint32_t physical_address
;
477 /* BUG: If access range crosses multiple pages, the translation will not correct
478 * for second page or so. */
480 /* When DEX is set to one, hardware will enforce the following behavior without
481 * modifying the corresponding control bits in PSW.
483 * Disable all interrupts
484 * Become superuser mode
486 * Use MMU_CFG.DE as the data access endian
487 * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
488 * Disable audio special features
489 * Disable inline function call
491 * Because hardware will turn off IT/DT by default, it MUST translate virtual address
492 * to physical address.
494 if (ERROR_OK
== target
->type
->virt2phys(target
, address
, &physical_address
))
495 address
= physical_address
;
500 struct aice_port_s
*aice
= target_to_aice(target
);
501 /* give arbitrary initial value to avoid warning messages */
502 enum nds_memory_access origin_access_channel
= NDS_MEMORY_ACC_CPU
;
504 if (nds32
->hit_syscall
) {
505 /* Use bus mode to access memory during virtual hosting */
506 origin_access_channel
= memory
->access_channel
;
507 memory
->access_channel
= NDS_MEMORY_ACC_BUS
;
508 aice_memory_access(aice
, NDS_MEMORY_ACC_BUS
);
511 result
= nds32_read_buffer(target
, address
, size
, buffer
);
513 if (nds32
->hit_syscall
) {
514 /* Restore access_channel after virtual hosting */
515 memory
->access_channel
= origin_access_channel
;
516 aice_memory_access(aice
, origin_access_channel
);
522 int nds32_v3_write_buffer(struct target
*target
, uint32_t address
,
523 uint32_t size
, const uint8_t *buffer
)
525 struct nds32
*nds32
= target_to_nds32(target
);
526 struct nds32_memory
*memory
= &(nds32
->memory
);
528 if ((NDS_MEMORY_ACC_CPU
== memory
->access_channel
) &&
529 (target
->state
!= TARGET_HALTED
)) {
530 LOG_WARNING("target was not halted");
531 return ERROR_TARGET_NOT_HALTED
;
534 uint32_t physical_address
;
535 /* BUG: If access range crosses multiple pages, the translation will not correct
536 * for second page or so. */
538 /* When DEX is set to one, hardware will enforce the following behavior without
539 * modifying the corresponding control bits in PSW.
541 * Disable all interrupts
542 * Become superuser mode
544 * Use MMU_CFG.DE as the data access endian
545 * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
546 * Disable audio special features
547 * Disable inline function call
549 * Because hardware will turn off IT/DT by default, it MUST translate virtual address
550 * to physical address.
552 if (ERROR_OK
== target
->type
->virt2phys(target
, address
, &physical_address
))
553 address
= physical_address
;
557 if (nds32
->hit_syscall
) {
558 /* Use bus mode to access memory during virtual hosting */
559 struct aice_port_s
*aice
= target_to_aice(target
);
560 enum nds_memory_access origin_access_channel
;
563 origin_access_channel
= memory
->access_channel
;
564 memory
->access_channel
= NDS_MEMORY_ACC_BUS
;
565 aice_memory_access(aice
, NDS_MEMORY_ACC_BUS
);
567 result
= nds32_gdb_fileio_write_memory(nds32
, address
, size
, buffer
);
569 memory
->access_channel
= origin_access_channel
;
570 aice_memory_access(aice
, origin_access_channel
);
575 return nds32_write_buffer(target
, address
, size
, buffer
);
578 int nds32_v3_read_memory(struct target
*target
, uint32_t address
,
579 uint32_t size
, uint32_t count
, uint8_t *buffer
)
581 struct nds32
*nds32
= target_to_nds32(target
);
582 struct nds32_memory
*memory
= &(nds32
->memory
);
584 if ((NDS_MEMORY_ACC_CPU
== memory
->access_channel
) &&
585 (target
->state
!= TARGET_HALTED
)) {
586 LOG_WARNING("target was not halted");
587 return ERROR_TARGET_NOT_HALTED
;
590 uint32_t physical_address
;
591 /* BUG: If access range crosses multiple pages, the translation will not correct
592 * for second page or so. */
594 /* When DEX is set to one, hardware will enforce the following behavior without
595 * modifying the corresponding control bits in PSW.
597 * Disable all interrupts
598 * Become superuser mode
600 * Use MMU_CFG.DE as the data access endian
601 * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
602 * Disable audio special features
603 * Disable inline function call
605 * Because hardware will turn off IT/DT by default, it MUST translate virtual address
606 * to physical address.
608 if (ERROR_OK
== target
->type
->virt2phys(target
, address
, &physical_address
))
609 address
= physical_address
;
613 struct aice_port_s
*aice
= target_to_aice(target
);
614 /* give arbitrary initial value to avoid warning messages */
615 enum nds_memory_access origin_access_channel
= NDS_MEMORY_ACC_CPU
;
618 if (nds32
->hit_syscall
) {
619 /* Use bus mode to access memory during virtual hosting */
620 origin_access_channel
= memory
->access_channel
;
621 memory
->access_channel
= NDS_MEMORY_ACC_BUS
;
622 aice_memory_access(aice
, NDS_MEMORY_ACC_BUS
);
625 result
= nds32_read_memory(target
, address
, size
, count
, buffer
);
627 if (nds32
->hit_syscall
) {
628 /* Restore access_channel after virtual hosting */
629 memory
->access_channel
= origin_access_channel
;
630 aice_memory_access(aice
, origin_access_channel
);
636 int nds32_v3_write_memory(struct target
*target
, uint32_t address
,
637 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
639 struct nds32
*nds32
= target_to_nds32(target
);
640 struct nds32_memory
*memory
= &(nds32
->memory
);
642 if ((NDS_MEMORY_ACC_CPU
== memory
->access_channel
) &&
643 (target
->state
!= TARGET_HALTED
)) {
644 LOG_WARNING("target was not halted");
645 return ERROR_TARGET_NOT_HALTED
;
648 uint32_t physical_address
;
649 /* BUG: If access range crosses multiple pages, the translation will not correct
650 * for second page or so. */
652 /* When DEX is set to one, hardware will enforce the following behavior without
653 * modifying the corresponding control bits in PSW.
655 * Disable all interrupts
656 * Become superuser mode
658 * Use MMU_CFG.DE as the data access endian
659 * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
660 * Disable audio special features
661 * Disable inline function call
663 * Because hardware will turn off IT/DT by default, it MUST translate virtual address
664 * to physical address.
666 if (ERROR_OK
== target
->type
->virt2phys(target
, address
, &physical_address
))
667 address
= physical_address
;
671 return nds32_write_memory(target
, address
, size
, count
, buffer
);
674 int nds32_v3_init_target(struct command_context
*cmd_ctx
,
675 struct target
*target
)
677 /* Initialize anything we can set up without talking to the target */
678 struct nds32
*nds32
= target_to_nds32(target
);
682 target
->fileio_info
= malloc(sizeof(struct gdb_fileio_info
));
683 target
->fileio_info
->identifier
= NULL
;