1 /***************************************************************************
2 * Copyright (C) 2013 Andes Technology *
3 * Hsiangkai Wang <hkwang@andestech.com> *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
17 ***************************************************************************/
23 #include "breakpoints.h"
24 #include "nds32_reg.h"
25 #include "nds32_disassembler.h"
27 #include "nds32_aice.h"
28 #include "nds32_v3_common.h"
30 static struct nds32_v3_common_callback
*v3_common_callback
;
32 static int nds32_v3_register_mapping(struct nds32
*nds32
, int reg_no
)
40 static int nds32_v3_get_debug_reason(struct nds32
*nds32
, uint32_t *reason
)
43 struct aice_port_s
*aice
= target_to_aice(nds32
->target
);
44 aice_read_debug_reg(aice
, NDS_EDM_SR_EDMSW
, &edmsw
);
46 *reason
= (edmsw
>> 12) & 0x0F;
52 * Save processor state. This is called after a HALT instruction
53 * succeeds, and on other occasions the processor enters debug mode
54 * (breakpoint, watchpoint, etc).
56 static int nds32_v3_debug_entry(struct nds32
*nds32
, bool enable_watchpoint
)
58 LOG_DEBUG("nds32_v3_debug_entry");
60 enum target_state backup_state
= nds32
->target
->state
;
61 nds32
->target
->state
= TARGET_HALTED
;
63 if (nds32
->init_arch_info_after_halted
== false) {
64 /* init architecture info according to config registers */
65 CHECK_RETVAL(nds32_config(nds32
));
67 nds32
->init_arch_info_after_halted
= true;
70 /* REVISIT entire cache should already be invalid !!! */
71 register_cache_invalidate(nds32
->core_cache
);
73 /* deactivate all hardware breakpoints */
74 CHECK_RETVAL(v3_common_callback
->deactivate_hardware_breakpoint(nds32
->target
));
76 if (enable_watchpoint
)
77 CHECK_RETVAL(v3_common_callback
->deactivate_hardware_watchpoint(nds32
->target
));
79 struct breakpoint
*syscall_break
= &(nds32
->syscall_break
);
80 if (nds32
->virtual_hosting
) {
81 if (syscall_break
->set
) {
82 /** disable virtual hosting */
84 /* remove breakpoint at syscall entry */
85 target_remove_breakpoint(nds32
->target
, syscall_break
);
86 syscall_break
->set
= 0;
89 nds32_get_mapped_reg(nds32
, PC
, &value_pc
);
90 if (value_pc
== syscall_break
->address
)
91 /** process syscall for virtual hosting */
92 nds32
->hit_syscall
= true;
96 if (ERROR_OK
!= nds32_examine_debug_reason(nds32
)) {
97 nds32
->target
->state
= backup_state
;
99 /* re-activate all hardware breakpoints & watchpoints */
100 CHECK_RETVAL(v3_common_callback
->activate_hardware_breakpoint(nds32
->target
));
102 if (enable_watchpoint
)
103 CHECK_RETVAL(v3_common_callback
->activate_hardware_watchpoint(nds32
->target
));
108 /* Save registers. */
109 nds32_full_context(nds32
);
111 /* check interrupt level */
112 v3_common_callback
->check_interrupt_stack(nds32
);
118 * Restore processor state.
120 static int nds32_v3_leave_debug_state(struct nds32
*nds32
, bool enable_watchpoint
)
122 LOG_DEBUG("nds32_v3_leave_debug_state");
124 struct target
*target
= nds32
->target
;
126 /* activate all hardware breakpoints */
127 CHECK_RETVAL(v3_common_callback
->activate_hardware_breakpoint(target
));
129 if (enable_watchpoint
) {
130 /* activate all watchpoints */
131 CHECK_RETVAL(v3_common_callback
->activate_hardware_watchpoint(target
));
134 /* restore interrupt stack */
135 v3_common_callback
->restore_interrupt_stack(nds32
);
137 /* REVISIT once we start caring about MMU and cache state,
138 * address it here ...
141 /* restore PSW, PC, and R0 ... after flushing any modified
144 CHECK_RETVAL(nds32_restore_context(target
));
146 if (nds32
->virtual_hosting
) {
147 /** enable virtual hosting */
150 uint32_t syscall_address
;
152 /* get syscall entry address */
153 nds32_get_mapped_reg(nds32
, IR3
, &value_ir3
);
154 entry_size
= 0x4 << (((value_ir3
>> 14) & 0x3) << 1);
155 syscall_address
= (value_ir3
& 0xFFFF0000) + entry_size
* 8; /* The index of SYSCALL is 8 */
157 if (nds32
->hit_syscall
) {
158 /* single step to skip syscall entry */
159 /* use IRET to skip syscall */
160 struct aice_port_s
*aice
= target_to_aice(target
);
165 nds32_get_mapped_reg(nds32
, IR6
, &value_ir6
);
166 syscall_id
= (value_ir6
>> 16) & 0x7FFF;
168 if (syscall_id
== NDS32_SYSCALL_EXIT
) {
169 /* If target hits exit syscall, do not use IRET to skip handler. */
172 /* use api->read/write_reg to skip nds32 register cache */
173 uint32_t value_dimbr
;
174 aice_read_debug_reg(aice
, NDS_EDM_SR_DIMBR
, &value_dimbr
);
175 aice_write_register(aice
, IR11
, value_dimbr
+ 0xC);
177 aice_read_register(aice
, IR9
, &value_ir9
);
178 value_ir9
+= 4; /* syscall is always 4 bytes */
179 aice_write_register(aice
, IR9
, value_ir9
);
181 /* backup hardware breakpoint 0 */
182 uint32_t backup_bpa
, backup_bpam
, backup_bpc
;
183 aice_read_debug_reg(aice
, NDS_EDM_SR_BPA0
, &backup_bpa
);
184 aice_read_debug_reg(aice
, NDS_EDM_SR_BPAM0
, &backup_bpam
);
185 aice_read_debug_reg(aice
, NDS_EDM_SR_BPC0
, &backup_bpc
);
187 /* use hardware breakpoint 0 to stop cpu after skipping syscall */
188 aice_write_debug_reg(aice
, NDS_EDM_SR_BPA0
, value_ir9
);
189 aice_write_debug_reg(aice
, NDS_EDM_SR_BPAM0
, 0);
190 aice_write_debug_reg(aice
, NDS_EDM_SR_BPC0
, 0xA);
193 * First IRET is used to quit debug mode.
194 * Second IRET is used to quit current syscall. */
195 uint32_t dim_inst
[4] = {NOP
, NOP
, IRET
, IRET
};
196 aice_execute(aice
, dim_inst
, 4);
198 /* restore origin hardware breakpoint 0 */
199 aice_write_debug_reg(aice
, NDS_EDM_SR_BPA0
, backup_bpa
);
200 aice_write_debug_reg(aice
, NDS_EDM_SR_BPAM0
, backup_bpam
);
201 aice_write_debug_reg(aice
, NDS_EDM_SR_BPC0
, backup_bpc
);
204 nds32
->hit_syscall
= false;
207 /* insert breakpoint at syscall entry */
208 struct breakpoint
*syscall_break
= &(nds32
->syscall_break
);
210 syscall_break
->address
= syscall_address
;
211 syscall_break
->type
= BKPT_SOFT
;
212 syscall_break
->set
= 1;
213 target_add_breakpoint(target
, syscall_break
);
219 static int nds32_v3_get_exception_address(struct nds32
*nds32
,
220 uint32_t *address
, uint32_t reason
)
222 LOG_DEBUG("nds32_v3_get_exception_address");
224 struct aice_port_s
*aice
= target_to_aice(nds32
->target
);
225 struct target
*target
= nds32
->target
;
229 uint32_t match_count
;
231 static int32_t number_of_hard_break
;
234 if (number_of_hard_break
== 0) {
235 aice_read_debug_reg(aice
, NDS_EDM_SR_EDM_CFG
, &edm_cfg
);
236 number_of_hard_break
= (edm_cfg
& 0x7) + 1;
239 aice_read_debug_reg(aice
, NDS_EDM_SR_EDMSW
, &edmsw
);
240 /* clear matching bits (write-one-clear) */
241 aice_write_debug_reg(aice
, NDS_EDM_SR_EDMSW
, edmsw
);
242 match_bits
= (edmsw
>> 4) & 0xFF;
244 for (i
= 0 ; i
< number_of_hard_break
; i
++) {
245 if (match_bits
& (1 << i
)) {
246 aice_read_debug_reg(aice
, NDS_EDM_SR_BPA0
+ i
, address
);
249 /* If target hits multiple read/access watchpoint,
250 * select the first one. */
251 aice_read_debug_reg(aice
, NDS_EDM_SR_BPC0
+ i
, &bp_control
);
252 if (0x3 == (bp_control
& 0x3)) {
259 if (match_count
> 1) { /* multiple hits */
262 } else if (match_count
== 1) {
265 struct nds32_instruction instruction
;
266 struct watchpoint
*wp
;
269 nds32_get_mapped_reg(nds32
, PC
, &val_pc
);
271 if ((NDS32_DEBUG_DATA_ADDR_WATCHPOINT_NEXT_PRECISE
== reason
) ||
272 (NDS32_DEBUG_DATA_VALUE_WATCHPOINT_NEXT_PRECISE
== reason
)) {
273 if (edmsw
& 0x4) /* check EDMSW.IS_16BIT */
279 nds32_read_opcode(nds32
, val_pc
, &opcode
);
280 nds32_evaluate_opcode(nds32
, opcode
, val_pc
, &instruction
);
282 LOG_DEBUG("PC: 0x%08" PRIx32
", access start: 0x%08" PRIx32
", end: 0x%08" PRIx32
,
283 val_pc
, instruction
.access_start
, instruction
.access_end
);
285 /* check if multiple hits in the access range */
286 uint32_t in_range_watch_count
= 0;
287 for (wp
= target
->watchpoints
; wp
; wp
= wp
->next
) {
288 if ((instruction
.access_start
<= wp
->address
) &&
289 (wp
->address
< instruction
.access_end
))
290 in_range_watch_count
++;
292 if (in_range_watch_count
> 1) {
293 /* Hit LSMW instruction. */
298 /* dispel false match */
300 for (wp
= target
->watchpoints
; wp
; wp
= wp
->next
) {
301 if (((*address
^ wp
->address
) & (~wp
->mask
)) == 0) {
302 uint32_t watch_start
;
305 watch_start
= wp
->address
;
306 watch_end
= wp
->address
+ wp
->length
;
308 if ((watch_end
<= instruction
.access_start
) ||
309 (instruction
.access_end
<= watch_start
))
321 } else if (match_count
== 0) {
322 /* global stop is precise exception */
323 if ((NDS32_DEBUG_LOAD_STORE_GLOBAL_STOP
== reason
) && nds32
->global_stop
) {
324 /* parse instruction to get correct access address */
327 struct nds32_instruction instruction
;
329 nds32_get_mapped_reg(nds32
, PC
, &val_pc
);
330 nds32_read_opcode(nds32
, val_pc
, &opcode
);
331 nds32_evaluate_opcode(nds32
, opcode
, val_pc
, &instruction
);
333 *address
= instruction
.access_start
;
339 *address
= 0xFFFFFFFF;
343 void nds32_v3_common_register_callback(struct nds32_v3_common_callback
*callback
)
345 v3_common_callback
= callback
;
348 /** target_type functions: */
349 /* target request support */
350 int nds32_v3_target_request_data(struct target
*target
,
351 uint32_t size
, uint8_t *buffer
)
353 /* AndesCore could use DTR register to communicate with OpenOCD
355 * Target data will be put in buffer
356 * The format of DTR is as follow
357 * DTR[31:16] => length, DTR[15:8] => size, DTR[7:0] => target_req_cmd
358 * target_req_cmd has three possible values:
359 * TARGET_REQ_TRACEMSG
360 * TARGET_REQ_DEBUGMSG
361 * TARGET_REQ_DEBUGCHAR
362 * if size == 0, target will call target_asciimsg(),
363 * else call target_hexmsg()
365 LOG_WARNING("Not implemented: %s", __func__
);
370 int nds32_v3_checksum_memory(struct target
*target
,
371 target_addr_t address
, uint32_t count
, uint32_t *checksum
)
373 LOG_WARNING("Not implemented: %s", __func__
);
379 * find out which watchpoint hits
380 * get exception address and compare the address to watchpoints
382 int nds32_v3_hit_watchpoint(struct target
*target
,
383 struct watchpoint
**hit_watchpoint
)
385 static struct watchpoint scan_all_watchpoint
;
387 uint32_t exception_address
;
388 struct watchpoint
*wp
;
389 struct nds32
*nds32
= target_to_nds32(target
);
391 exception_address
= nds32
->watched_address
;
393 if (exception_address
== 0xFFFFFFFF)
396 if (exception_address
== 0) {
397 scan_all_watchpoint
.address
= 0;
398 scan_all_watchpoint
.rw
= WPT_WRITE
;
399 scan_all_watchpoint
.next
= 0;
400 scan_all_watchpoint
.unique_id
= 0x5CA8;
402 *hit_watchpoint
= &scan_all_watchpoint
;
406 for (wp
= target
->watchpoints
; wp
; wp
= wp
->next
) {
407 if (((exception_address
^ wp
->address
) & (~wp
->mask
)) == 0) {
408 *hit_watchpoint
= wp
;
417 int nds32_v3_target_create_common(struct target
*target
, struct nds32
*nds32
)
419 nds32
->register_map
= nds32_v3_register_mapping
;
420 nds32
->get_debug_reason
= nds32_v3_get_debug_reason
;
421 nds32
->enter_debug_state
= nds32_v3_debug_entry
;
422 nds32
->leave_debug_state
= nds32_v3_leave_debug_state
;
423 nds32
->get_watched_address
= nds32_v3_get_exception_address
;
425 /* Init target->arch_info in nds32_init_arch_info().
426 * After this, user could use target_to_nds32() to get nds32 object */
427 nds32_init_arch_info(target
, nds32
);
432 int nds32_v3_run_algorithm(struct target
*target
,
434 struct mem_param
*mem_params
,
436 struct reg_param
*reg_params
,
437 target_addr_t entry_point
,
438 target_addr_t exit_point
,
442 LOG_WARNING("Not implemented: %s", __func__
);
447 int nds32_v3_read_buffer(struct target
*target
, target_addr_t address
,
448 uint32_t size
, uint8_t *buffer
)
450 struct nds32
*nds32
= target_to_nds32(target
);
451 struct nds32_memory
*memory
= &(nds32
->memory
);
453 if ((NDS_MEMORY_ACC_CPU
== memory
->access_channel
) &&
454 (target
->state
!= TARGET_HALTED
)) {
455 LOG_WARNING("target was not halted");
456 return ERROR_TARGET_NOT_HALTED
;
459 target_addr_t physical_address
;
460 /* BUG: If access range crosses multiple pages, the translation will not correct
461 * for second page or so. */
463 /* When DEX is set to one, hardware will enforce the following behavior without
464 * modifying the corresponding control bits in PSW.
466 * Disable all interrupts
467 * Become superuser mode
469 * Use MMU_CFG.DE as the data access endian
470 * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
471 * Disable audio special features
472 * Disable inline function call
474 * Because hardware will turn off IT/DT by default, it MUST translate virtual address
475 * to physical address.
477 if (ERROR_OK
== target
->type
->virt2phys(target
, address
, &physical_address
))
478 address
= physical_address
;
483 struct aice_port_s
*aice
= target_to_aice(target
);
484 /* give arbitrary initial value to avoid warning messages */
485 enum nds_memory_access origin_access_channel
= NDS_MEMORY_ACC_CPU
;
487 if (nds32
->hit_syscall
) {
488 /* Use bus mode to access memory during virtual hosting */
489 origin_access_channel
= memory
->access_channel
;
490 memory
->access_channel
= NDS_MEMORY_ACC_BUS
;
491 aice_memory_access(aice
, NDS_MEMORY_ACC_BUS
);
494 result
= nds32_read_buffer(target
, address
, size
, buffer
);
496 if (nds32
->hit_syscall
) {
497 /* Restore access_channel after virtual hosting */
498 memory
->access_channel
= origin_access_channel
;
499 aice_memory_access(aice
, origin_access_channel
);
505 int nds32_v3_write_buffer(struct target
*target
, target_addr_t address
,
506 uint32_t size
, const uint8_t *buffer
)
508 struct nds32
*nds32
= target_to_nds32(target
);
509 struct nds32_memory
*memory
= &(nds32
->memory
);
511 if ((NDS_MEMORY_ACC_CPU
== memory
->access_channel
) &&
512 (target
->state
!= TARGET_HALTED
)) {
513 LOG_WARNING("target was not halted");
514 return ERROR_TARGET_NOT_HALTED
;
517 target_addr_t physical_address
;
518 /* BUG: If access range crosses multiple pages, the translation will not correct
519 * for second page or so. */
521 /* When DEX is set to one, hardware will enforce the following behavior without
522 * modifying the corresponding control bits in PSW.
524 * Disable all interrupts
525 * Become superuser mode
527 * Use MMU_CFG.DE as the data access endian
528 * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
529 * Disable audio special features
530 * Disable inline function call
532 * Because hardware will turn off IT/DT by default, it MUST translate virtual address
533 * to physical address.
535 if (ERROR_OK
== target
->type
->virt2phys(target
, address
, &physical_address
))
536 address
= physical_address
;
540 if (nds32
->hit_syscall
) {
541 struct aice_port_s
*aice
= target_to_aice(target
);
542 enum nds_memory_access origin_access_channel
;
543 origin_access_channel
= memory
->access_channel
;
545 /* If target has no cache, use BUS mode to access memory. */
546 if ((memory
->dcache
.line_size
== 0)
547 || (memory
->dcache
.enable
== false)) {
548 /* There is no Dcache or Dcache is disabled. */
549 memory
->access_channel
= NDS_MEMORY_ACC_BUS
;
550 aice_memory_access(aice
, NDS_MEMORY_ACC_BUS
);
554 result
= nds32_gdb_fileio_write_memory(nds32
, address
, size
, buffer
);
556 if (NDS_MEMORY_ACC_CPU
== origin_access_channel
) {
557 memory
->access_channel
= NDS_MEMORY_ACC_CPU
;
558 aice_memory_access(aice
, NDS_MEMORY_ACC_CPU
);
564 return nds32_write_buffer(target
, address
, size
, buffer
);
567 int nds32_v3_read_memory(struct target
*target
, target_addr_t address
,
568 uint32_t size
, uint32_t count
, uint8_t *buffer
)
570 struct nds32
*nds32
= target_to_nds32(target
);
571 struct nds32_memory
*memory
= &(nds32
->memory
);
573 if ((NDS_MEMORY_ACC_CPU
== memory
->access_channel
) &&
574 (target
->state
!= TARGET_HALTED
)) {
575 LOG_WARNING("target was not halted");
576 return ERROR_TARGET_NOT_HALTED
;
579 target_addr_t physical_address
;
580 /* BUG: If access range crosses multiple pages, the translation will not correct
581 * for second page or so. */
583 /* When DEX is set to one, hardware will enforce the following behavior without
584 * modifying the corresponding control bits in PSW.
586 * Disable all interrupts
587 * Become superuser mode
589 * Use MMU_CFG.DE as the data access endian
590 * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
591 * Disable audio special features
592 * Disable inline function call
594 * Because hardware will turn off IT/DT by default, it MUST translate virtual address
595 * to physical address.
597 if (ERROR_OK
== target
->type
->virt2phys(target
, address
, &physical_address
))
598 address
= physical_address
;
602 struct aice_port_s
*aice
= target_to_aice(target
);
603 /* give arbitrary initial value to avoid warning messages */
604 enum nds_memory_access origin_access_channel
= NDS_MEMORY_ACC_CPU
;
607 if (nds32
->hit_syscall
) {
608 /* Use bus mode to access memory during virtual hosting */
609 origin_access_channel
= memory
->access_channel
;
610 memory
->access_channel
= NDS_MEMORY_ACC_BUS
;
611 aice_memory_access(aice
, NDS_MEMORY_ACC_BUS
);
614 result
= nds32_read_memory(target
, address
, size
, count
, buffer
);
616 if (nds32
->hit_syscall
) {
617 /* Restore access_channel after virtual hosting */
618 memory
->access_channel
= origin_access_channel
;
619 aice_memory_access(aice
, origin_access_channel
);
625 int nds32_v3_write_memory(struct target
*target
, target_addr_t address
,
626 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
628 struct nds32
*nds32
= target_to_nds32(target
);
629 struct nds32_memory
*memory
= &(nds32
->memory
);
631 if ((NDS_MEMORY_ACC_CPU
== memory
->access_channel
) &&
632 (target
->state
!= TARGET_HALTED
)) {
633 LOG_WARNING("target was not halted");
634 return ERROR_TARGET_NOT_HALTED
;
637 target_addr_t physical_address
;
638 /* BUG: If access range crosses multiple pages, the translation will not correct
639 * for second page or so. */
641 /* When DEX is set to one, hardware will enforce the following behavior without
642 * modifying the corresponding control bits in PSW.
644 * Disable all interrupts
645 * Become superuser mode
647 * Use MMU_CFG.DE as the data access endian
648 * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
649 * Disable audio special features
650 * Disable inline function call
652 * Because hardware will turn off IT/DT by default, it MUST translate virtual address
653 * to physical address.
655 if (ERROR_OK
== target
->type
->virt2phys(target
, address
, &physical_address
))
656 address
= physical_address
;
660 return nds32_write_memory(target
, address
, size
, count
, buffer
);
663 int nds32_v3_init_target(struct command_context
*cmd_ctx
,
664 struct target
*target
)
666 /* Initialize anything we can set up without talking to the target */
667 struct nds32
*nds32
= target_to_nds32(target
);
671 target
->fileio_info
= malloc(sizeof(struct gdb_fileio_info
));
672 target
->fileio_info
->identifier
= NULL
;