1 /***************************************************************************
2 * Copyright (C) 2013 Andes Technology *
3 * Hsiangkai Wang <hkwang@andestech.com> *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
19 ***************************************************************************/
25 #include "breakpoints.h"
26 #include "nds32_reg.h"
27 #include "nds32_disassembler.h"
29 #include "nds32_aice.h"
30 #include "nds32_v3_common.h"
32 static struct nds32_v3_common_callback
*v3_common_callback
;
34 static int nds32_v3_register_mapping(struct nds32
*nds32
, int reg_no
)
42 static int nds32_v3_get_debug_reason(struct nds32
*nds32
, uint32_t *reason
)
45 struct aice_port_s
*aice
= target_to_aice(nds32
->target
);
46 aice_read_debug_reg(aice
, NDS_EDM_SR_EDMSW
, &edmsw
);
48 *reason
= (edmsw
>> 12) & 0x0F;
54 * Save processor state. This is called after a HALT instruction
55 * succeeds, and on other occasions the processor enters debug mode
56 * (breakpoint, watchpoint, etc).
58 static int nds32_v3_debug_entry(struct nds32
*nds32
, bool enable_watchpoint
)
60 LOG_DEBUG("nds32_v3_debug_entry");
62 enum target_state backup_state
= nds32
->target
->state
;
63 nds32
->target
->state
= TARGET_HALTED
;
65 if (nds32
->init_arch_info_after_halted
== false) {
66 /* init architecture info according to config registers */
67 CHECK_RETVAL(nds32_config(nds32
));
69 nds32
->init_arch_info_after_halted
= true;
72 /* REVISIT entire cache should already be invalid !!! */
73 register_cache_invalidate(nds32
->core_cache
);
75 /* deactivate all hardware breakpoints */
76 CHECK_RETVAL(v3_common_callback
->deactivate_hardware_breakpoint(nds32
->target
));
78 if (enable_watchpoint
)
79 CHECK_RETVAL(v3_common_callback
->deactivate_hardware_watchpoint(nds32
->target
));
81 struct breakpoint
*syscall_break
= &(nds32
->syscall_break
);
82 if (nds32
->virtual_hosting
) {
83 if (syscall_break
->set
) {
84 /** disable virtual hosting */
86 /* remove breakpoint at syscall entry */
87 target_remove_breakpoint(nds32
->target
, syscall_break
);
88 syscall_break
->set
= 0;
91 nds32_get_mapped_reg(nds32
, PC
, &value_pc
);
92 if (value_pc
== syscall_break
->address
)
93 /** process syscall for virtual hosting */
94 nds32
->hit_syscall
= true;
98 if (ERROR_OK
!= nds32_examine_debug_reason(nds32
)) {
99 nds32
->target
->state
= backup_state
;
101 /* re-activate all hardware breakpoints & watchpoints */
102 CHECK_RETVAL(v3_common_callback
->activate_hardware_breakpoint(nds32
->target
));
104 if (enable_watchpoint
)
105 CHECK_RETVAL(v3_common_callback
->activate_hardware_watchpoint(nds32
->target
));
110 /* Save registers. */
111 nds32_full_context(nds32
);
113 /* check interrupt level */
114 v3_common_callback
->check_interrupt_stack(nds32
);
120 * Restore processor state.
122 static int nds32_v3_leave_debug_state(struct nds32
*nds32
, bool enable_watchpoint
)
124 LOG_DEBUG("nds32_v3_leave_debug_state");
126 struct target
*target
= nds32
->target
;
128 /* activate all hardware breakpoints */
129 CHECK_RETVAL(v3_common_callback
->activate_hardware_breakpoint(target
));
131 if (enable_watchpoint
) {
132 /* activate all watchpoints */
133 CHECK_RETVAL(v3_common_callback
->activate_hardware_watchpoint(target
));
136 /* restore interrupt stack */
137 v3_common_callback
->restore_interrupt_stack(nds32
);
139 /* REVISIT once we start caring about MMU and cache state,
140 * address it here ...
143 /* restore PSW, PC, and R0 ... after flushing any modified
146 CHECK_RETVAL(nds32_restore_context(target
));
148 if (nds32
->virtual_hosting
) {
149 /** enable virtual hosting */
152 uint32_t syscall_address
;
154 /* get syscall entry address */
155 nds32_get_mapped_reg(nds32
, IR3
, &value_ir3
);
156 entry_size
= 0x4 << (((value_ir3
>> 14) & 0x3) << 1);
157 syscall_address
= (value_ir3
& 0xFFFF0000) + entry_size
* 8; /* The index of SYSCALL is 8 */
159 if (nds32
->hit_syscall
) {
160 /* single step to skip syscall entry */
161 /* use IRET to skip syscall */
162 struct aice_port_s
*aice
= target_to_aice(target
);
167 nds32_get_mapped_reg(nds32
, IR6
, &value_ir6
);
168 syscall_id
= (value_ir6
>> 16) & 0x7FFF;
170 if (syscall_id
== NDS32_SYSCALL_EXIT
) {
171 /* If target hits exit syscall, do not use IRET to skip handler. */
174 /* use api->read/write_reg to skip nds32 register cache */
175 uint32_t value_dimbr
;
176 aice_read_debug_reg(aice
, NDS_EDM_SR_DIMBR
, &value_dimbr
);
177 aice_write_register(aice
, IR11
, value_dimbr
+ 0xC);
179 aice_read_register(aice
, IR9
, &value_ir9
);
180 value_ir9
+= 4; /* syscall is always 4 bytes */
181 aice_write_register(aice
, IR9
, value_ir9
);
183 /* backup hardware breakpoint 0 */
184 uint32_t backup_bpa
, backup_bpam
, backup_bpc
;
185 aice_read_debug_reg(aice
, NDS_EDM_SR_BPA0
, &backup_bpa
);
186 aice_read_debug_reg(aice
, NDS_EDM_SR_BPAM0
, &backup_bpam
);
187 aice_read_debug_reg(aice
, NDS_EDM_SR_BPC0
, &backup_bpc
);
189 /* use hardware breakpoint 0 to stop cpu after skipping syscall */
190 aice_write_debug_reg(aice
, NDS_EDM_SR_BPA0
, value_ir9
);
191 aice_write_debug_reg(aice
, NDS_EDM_SR_BPAM0
, 0);
192 aice_write_debug_reg(aice
, NDS_EDM_SR_BPC0
, 0xA);
195 * First IRET is used to quit debug mode.
196 * Second IRET is used to quit current syscall. */
197 uint32_t dim_inst
[4] = {NOP
, NOP
, IRET
, IRET
};
198 aice_execute(aice
, dim_inst
, 4);
200 /* restore origin hardware breakpoint 0 */
201 aice_write_debug_reg(aice
, NDS_EDM_SR_BPA0
, backup_bpa
);
202 aice_write_debug_reg(aice
, NDS_EDM_SR_BPAM0
, backup_bpam
);
203 aice_write_debug_reg(aice
, NDS_EDM_SR_BPC0
, backup_bpc
);
206 nds32
->hit_syscall
= false;
209 /* insert breakpoint at syscall entry */
210 struct breakpoint
*syscall_break
= &(nds32
->syscall_break
);
212 syscall_break
->address
= syscall_address
;
213 syscall_break
->type
= BKPT_SOFT
;
214 syscall_break
->set
= 1;
215 target_add_breakpoint(target
, syscall_break
);
221 static int nds32_v3_get_exception_address(struct nds32
*nds32
,
222 uint32_t *address
, uint32_t reason
)
224 LOG_DEBUG("nds32_v3_get_exception_address");
226 struct aice_port_s
*aice
= target_to_aice(nds32
->target
);
227 struct target
*target
= nds32
->target
;
231 uint32_t match_count
;
233 static int32_t number_of_hard_break
;
236 if (number_of_hard_break
== 0) {
237 aice_read_debug_reg(aice
, NDS_EDM_SR_EDM_CFG
, &edm_cfg
);
238 number_of_hard_break
= (edm_cfg
& 0x7) + 1;
241 aice_read_debug_reg(aice
, NDS_EDM_SR_EDMSW
, &edmsw
);
242 /* clear matching bits (write-one-clear) */
243 aice_write_debug_reg(aice
, NDS_EDM_SR_EDMSW
, edmsw
);
244 match_bits
= (edmsw
>> 4) & 0xFF;
246 for (i
= 0 ; i
< number_of_hard_break
; i
++) {
247 if (match_bits
& (1 << i
)) {
248 aice_read_debug_reg(aice
, NDS_EDM_SR_BPA0
+ i
, address
);
251 /* If target hits multiple read/access watchpoint,
252 * select the first one. */
253 aice_read_debug_reg(aice
, NDS_EDM_SR_BPC0
+ i
, &bp_control
);
254 if (0x3 == (bp_control
& 0x3)) {
261 if (match_count
> 1) { /* multiple hits */
264 } else if (match_count
== 1) {
267 struct nds32_instruction instruction
;
268 struct watchpoint
*wp
;
271 nds32_get_mapped_reg(nds32
, PC
, &val_pc
);
273 if ((NDS32_DEBUG_DATA_ADDR_WATCHPOINT_NEXT_PRECISE
== reason
) ||
274 (NDS32_DEBUG_DATA_VALUE_WATCHPOINT_NEXT_PRECISE
== reason
)) {
275 if (edmsw
& 0x4) /* check EDMSW.IS_16BIT */
281 nds32_read_opcode(nds32
, val_pc
, &opcode
);
282 nds32_evaluate_opcode(nds32
, opcode
, val_pc
, &instruction
);
284 LOG_DEBUG("PC: 0x%08" PRIx32
", access start: 0x%08" PRIx32
", end: 0x%08" PRIx32
,
285 val_pc
, instruction
.access_start
, instruction
.access_end
);
287 /* check if multiple hits in the access range */
288 uint32_t in_range_watch_count
= 0;
289 for (wp
= target
->watchpoints
; wp
; wp
= wp
->next
) {
290 if ((instruction
.access_start
<= wp
->address
) &&
291 (wp
->address
< instruction
.access_end
))
292 in_range_watch_count
++;
294 if (in_range_watch_count
> 1) {
295 /* Hit LSMW instruction. */
300 /* dispel false match */
302 for (wp
= target
->watchpoints
; wp
; wp
= wp
->next
) {
303 if (((*address
^ wp
->address
) & (~wp
->mask
)) == 0) {
304 uint32_t watch_start
;
307 watch_start
= wp
->address
;
308 watch_end
= wp
->address
+ wp
->length
;
310 if ((watch_end
<= instruction
.access_start
) ||
311 (instruction
.access_end
<= watch_start
))
323 } else if (match_count
== 0) {
324 /* global stop is precise exception */
325 if ((NDS32_DEBUG_LOAD_STORE_GLOBAL_STOP
== reason
) && nds32
->global_stop
) {
326 /* parse instruction to get correct access address */
329 struct nds32_instruction instruction
;
331 nds32_get_mapped_reg(nds32
, PC
, &val_pc
);
332 nds32_read_opcode(nds32
, val_pc
, &opcode
);
333 nds32_evaluate_opcode(nds32
, opcode
, val_pc
, &instruction
);
335 *address
= instruction
.access_start
;
341 *address
= 0xFFFFFFFF;
345 void nds32_v3_common_register_callback(struct nds32_v3_common_callback
*callback
)
347 v3_common_callback
= callback
;
350 /** target_type functions: */
351 /* target request support */
352 int nds32_v3_target_request_data(struct target
*target
,
353 uint32_t size
, uint8_t *buffer
)
355 /* AndesCore could use DTR register to communicate with OpenOCD
357 * Target data will be put in buffer
358 * The format of DTR is as follow
359 * DTR[31:16] => length, DTR[15:8] => size, DTR[7:0] => target_req_cmd
360 * target_req_cmd has three possible values:
361 * TARGET_REQ_TRACEMSG
362 * TARGET_REQ_DEBUGMSG
363 * TARGET_REQ_DEBUGCHAR
364 * if size == 0, target will call target_asciimsg(),
365 * else call target_hexmsg()
367 LOG_WARNING("Not implemented: %s", __func__
);
372 int nds32_v3_checksum_memory(struct target
*target
,
373 uint32_t address
, uint32_t count
, uint32_t *checksum
)
375 LOG_WARNING("Not implemented: %s", __func__
);
381 * find out which watchpoint hits
382 * get exception address and compare the address to watchpoints
384 int nds32_v3_hit_watchpoint(struct target
*target
,
385 struct watchpoint
**hit_watchpoint
)
387 static struct watchpoint scan_all_watchpoint
;
389 uint32_t exception_address
;
390 struct watchpoint
*wp
;
391 struct nds32
*nds32
= target_to_nds32(target
);
393 exception_address
= nds32
->watched_address
;
395 if (exception_address
== 0xFFFFFFFF)
398 if (exception_address
== 0) {
399 scan_all_watchpoint
.address
= 0;
400 scan_all_watchpoint
.rw
= WPT_WRITE
;
401 scan_all_watchpoint
.next
= 0;
402 scan_all_watchpoint
.unique_id
= 0x5CA8;
404 *hit_watchpoint
= &scan_all_watchpoint
;
408 for (wp
= target
->watchpoints
; wp
; wp
= wp
->next
) {
409 if (((exception_address
^ wp
->address
) & (~wp
->mask
)) == 0) {
410 *hit_watchpoint
= wp
;
419 int nds32_v3_target_create_common(struct target
*target
, struct nds32
*nds32
)
421 nds32
->register_map
= nds32_v3_register_mapping
;
422 nds32
->get_debug_reason
= nds32_v3_get_debug_reason
;
423 nds32
->enter_debug_state
= nds32_v3_debug_entry
;
424 nds32
->leave_debug_state
= nds32_v3_leave_debug_state
;
425 nds32
->get_watched_address
= nds32_v3_get_exception_address
;
427 /* Init target->arch_info in nds32_init_arch_info().
428 * After this, user could use target_to_nds32() to get nds32 object */
429 nds32_init_arch_info(target
, nds32
);
434 int nds32_v3_run_algorithm(struct target
*target
,
436 struct mem_param
*mem_params
,
438 struct reg_param
*reg_params
,
439 uint32_t entry_point
,
444 LOG_WARNING("Not implemented: %s", __func__
);
449 int nds32_v3_read_buffer(struct target
*target
, uint32_t address
,
450 uint32_t size
, uint8_t *buffer
)
452 struct nds32
*nds32
= target_to_nds32(target
);
453 struct nds32_memory
*memory
= &(nds32
->memory
);
455 if ((NDS_MEMORY_ACC_CPU
== memory
->access_channel
) &&
456 (target
->state
!= TARGET_HALTED
)) {
457 LOG_WARNING("target was not halted");
458 return ERROR_TARGET_NOT_HALTED
;
461 uint32_t physical_address
;
462 /* BUG: If access range crosses multiple pages, the translation will not correct
463 * for second page or so. */
465 /* When DEX is set to one, hardware will enforce the following behavior without
466 * modifying the corresponding control bits in PSW.
468 * Disable all interrupts
469 * Become superuser mode
471 * Use MMU_CFG.DE as the data access endian
472 * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
473 * Disable audio special features
474 * Disable inline function call
476 * Because hardware will turn off IT/DT by default, it MUST translate virtual address
477 * to physical address.
479 if (ERROR_OK
== target
->type
->virt2phys(target
, address
, &physical_address
))
480 address
= physical_address
;
485 struct aice_port_s
*aice
= target_to_aice(target
);
486 /* give arbitrary initial value to avoid warning messages */
487 enum nds_memory_access origin_access_channel
= NDS_MEMORY_ACC_CPU
;
489 if (nds32
->hit_syscall
) {
490 /* Use bus mode to access memory during virtual hosting */
491 origin_access_channel
= memory
->access_channel
;
492 memory
->access_channel
= NDS_MEMORY_ACC_BUS
;
493 aice_memory_access(aice
, NDS_MEMORY_ACC_BUS
);
496 result
= nds32_read_buffer(target
, address
, size
, buffer
);
498 if (nds32
->hit_syscall
) {
499 /* Restore access_channel after virtual hosting */
500 memory
->access_channel
= origin_access_channel
;
501 aice_memory_access(aice
, origin_access_channel
);
507 int nds32_v3_write_buffer(struct target
*target
, uint32_t address
,
508 uint32_t size
, const uint8_t *buffer
)
510 struct nds32
*nds32
= target_to_nds32(target
);
511 struct nds32_memory
*memory
= &(nds32
->memory
);
513 if ((NDS_MEMORY_ACC_CPU
== memory
->access_channel
) &&
514 (target
->state
!= TARGET_HALTED
)) {
515 LOG_WARNING("target was not halted");
516 return ERROR_TARGET_NOT_HALTED
;
519 uint32_t physical_address
;
520 /* BUG: If access range crosses multiple pages, the translation will not correct
521 * for second page or so. */
523 /* When DEX is set to one, hardware will enforce the following behavior without
524 * modifying the corresponding control bits in PSW.
526 * Disable all interrupts
527 * Become superuser mode
529 * Use MMU_CFG.DE as the data access endian
530 * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
531 * Disable audio special features
532 * Disable inline function call
534 * Because hardware will turn off IT/DT by default, it MUST translate virtual address
535 * to physical address.
537 if (ERROR_OK
== target
->type
->virt2phys(target
, address
, &physical_address
))
538 address
= physical_address
;
542 if (nds32
->hit_syscall
) {
543 struct aice_port_s
*aice
= target_to_aice(target
);
544 enum nds_memory_access origin_access_channel
;
545 origin_access_channel
= memory
->access_channel
;
547 /* If target has no cache, use BUS mode to access memory. */
548 if ((memory
->dcache
.line_size
== 0)
549 || (memory
->dcache
.enable
== false)) {
550 /* There is no Dcache or Dcache is disabled. */
551 memory
->access_channel
= NDS_MEMORY_ACC_BUS
;
552 aice_memory_access(aice
, NDS_MEMORY_ACC_BUS
);
556 result
= nds32_gdb_fileio_write_memory(nds32
, address
, size
, buffer
);
558 if (NDS_MEMORY_ACC_CPU
== origin_access_channel
) {
559 memory
->access_channel
= NDS_MEMORY_ACC_CPU
;
560 aice_memory_access(aice
, NDS_MEMORY_ACC_CPU
);
566 return nds32_write_buffer(target
, address
, size
, buffer
);
569 int nds32_v3_read_memory(struct target
*target
, uint32_t address
,
570 uint32_t size
, uint32_t count
, uint8_t *buffer
)
572 struct nds32
*nds32
= target_to_nds32(target
);
573 struct nds32_memory
*memory
= &(nds32
->memory
);
575 if ((NDS_MEMORY_ACC_CPU
== memory
->access_channel
) &&
576 (target
->state
!= TARGET_HALTED
)) {
577 LOG_WARNING("target was not halted");
578 return ERROR_TARGET_NOT_HALTED
;
581 uint32_t physical_address
;
582 /* BUG: If access range crosses multiple pages, the translation will not correct
583 * for second page or so. */
585 /* When DEX is set to one, hardware will enforce the following behavior without
586 * modifying the corresponding control bits in PSW.
588 * Disable all interrupts
589 * Become superuser mode
591 * Use MMU_CFG.DE as the data access endian
592 * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
593 * Disable audio special features
594 * Disable inline function call
596 * Because hardware will turn off IT/DT by default, it MUST translate virtual address
597 * to physical address.
599 if (ERROR_OK
== target
->type
->virt2phys(target
, address
, &physical_address
))
600 address
= physical_address
;
604 struct aice_port_s
*aice
= target_to_aice(target
);
605 /* give arbitrary initial value to avoid warning messages */
606 enum nds_memory_access origin_access_channel
= NDS_MEMORY_ACC_CPU
;
609 if (nds32
->hit_syscall
) {
610 /* Use bus mode to access memory during virtual hosting */
611 origin_access_channel
= memory
->access_channel
;
612 memory
->access_channel
= NDS_MEMORY_ACC_BUS
;
613 aice_memory_access(aice
, NDS_MEMORY_ACC_BUS
);
616 result
= nds32_read_memory(target
, address
, size
, count
, buffer
);
618 if (nds32
->hit_syscall
) {
619 /* Restore access_channel after virtual hosting */
620 memory
->access_channel
= origin_access_channel
;
621 aice_memory_access(aice
, origin_access_channel
);
627 int nds32_v3_write_memory(struct target
*target
, uint32_t address
,
628 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
630 struct nds32
*nds32
= target_to_nds32(target
);
631 struct nds32_memory
*memory
= &(nds32
->memory
);
633 if ((NDS_MEMORY_ACC_CPU
== memory
->access_channel
) &&
634 (target
->state
!= TARGET_HALTED
)) {
635 LOG_WARNING("target was not halted");
636 return ERROR_TARGET_NOT_HALTED
;
639 uint32_t physical_address
;
640 /* BUG: If access range crosses multiple pages, the translation will not correct
641 * for second page or so. */
643 /* When DEX is set to one, hardware will enforce the following behavior without
644 * modifying the corresponding control bits in PSW.
646 * Disable all interrupts
647 * Become superuser mode
649 * Use MMU_CFG.DE as the data access endian
650 * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
651 * Disable audio special features
652 * Disable inline function call
654 * Because hardware will turn off IT/DT by default, it MUST translate virtual address
655 * to physical address.
657 if (ERROR_OK
== target
->type
->virt2phys(target
, address
, &physical_address
))
658 address
= physical_address
;
662 return nds32_write_memory(target
, address
, size
, count
, buffer
);
665 int nds32_v3_init_target(struct command_context
*cmd_ctx
,
666 struct target
*target
)
668 /* Initialize anything we can set up without talking to the target */
669 struct nds32
*nds32
= target_to_nds32(target
);
673 target
->fileio_info
= malloc(sizeof(struct gdb_fileio_info
));
674 target
->fileio_info
->identifier
= NULL
;