1 // SPDX-License-Identifier: GPL-2.0-or-later
3 /***************************************************************************
4 * Copyright (C) 2013 Andes Technology *
5 * Hsiangkai Wang <hkwang@andestech.com> *
6 ***************************************************************************/
12 #include <helper/log.h>
13 #include <helper/binarybuffer.h>
15 #include "nds32_aice.h"
16 #include "nds32_tlb.h"
17 #include "nds32_disassembler.h"
19 struct nds32_edm_operation nds32_edm_ops
[NDS32_EDM_OPERATION_MAX_NUM
];
20 uint32_t nds32_edm_ops_num
;
22 const char *nds32_debug_type_name
[11] = {
25 "HARDWARE BREAKPOINT",
26 "DATA ADDR WATCHPOINT PRECISE",
27 "DATA VALUE WATCHPOINT PRECISE",
28 "DATA VALUE WATCHPOINT IMPRECISE",
30 "HARDWARE SINGLE STEP",
31 "DATA ADDR WATCHPOINT NEXT PRECISE",
32 "DATA VALUE WATCHPOINT NEXT PRECISE",
33 "LOAD STORE GLOBAL STOP",
36 static const int nds32_lm_size_table
[16] = {
50 static const int nds32_line_size_table
[6] = {
59 static int nds32_get_core_reg(struct reg
*reg
)
62 struct nds32_reg
*reg_arch_info
= reg
->arch_info
;
63 struct target
*target
= reg_arch_info
->target
;
64 struct nds32
*nds32
= target_to_nds32(target
);
65 struct aice_port_s
*aice
= target_to_aice(target
);
67 if (target
->state
!= TARGET_HALTED
) {
68 LOG_ERROR("Target not halted");
69 return ERROR_TARGET_NOT_HALTED
;
73 uint32_t val
= buf_get_u32(reg_arch_info
->value
, 0, 32);
74 LOG_DEBUG("reading register(cached) %" PRIi32
"(%s), value: 0x%8.8" PRIx32
,
75 reg_arch_info
->num
, reg
->name
, val
);
79 int mapped_regnum
= nds32
->register_map(nds32
, reg_arch_info
->num
);
81 if (reg_arch_info
->enable
== false) {
82 buf_set_u32(reg_arch_info
->value
, 0, 32, NDS32_REGISTER_DISABLE
);
86 if ((nds32
->fpu_enable
== false)
87 && (nds32_reg_type(mapped_regnum
) == NDS32_REG_TYPE_FPU
)) {
89 } else if ((nds32
->audio_enable
== false)
90 && (nds32_reg_type(mapped_regnum
) == NDS32_REG_TYPE_AUMR
)) {
93 retval
= aice_read_register(aice
, mapped_regnum
, &val
);
95 buf_set_u32(reg_arch_info
->value
, 0, 32, val
);
97 LOG_DEBUG("reading register %" PRIi32
"(%s), value: 0x%8.8" PRIx32
,
98 reg_arch_info
->num
, reg
->name
, val
);
101 if (retval
== ERROR_OK
) {
109 static int nds32_get_core_reg_64(struct reg
*reg
)
112 struct nds32_reg
*reg_arch_info
= reg
->arch_info
;
113 struct target
*target
= reg_arch_info
->target
;
114 struct nds32
*nds32
= target_to_nds32(target
);
115 struct aice_port_s
*aice
= target_to_aice(target
);
117 if (target
->state
!= TARGET_HALTED
) {
118 LOG_ERROR("Target not halted");
119 return ERROR_TARGET_NOT_HALTED
;
125 if (reg_arch_info
->enable
== false) {
126 buf_set_u64(reg_arch_info
->value
, 0, 64, NDS32_REGISTER_DISABLE
);
130 if ((nds32
->fpu_enable
== false)
131 && ((reg_arch_info
->num
>= FD0
) && (reg_arch_info
->num
<= FD31
))) {
134 retval
= aice_read_reg_64(aice
, reg_arch_info
->num
, &val
);
136 buf_set_u64(reg_arch_info
->value
, 0, 64, val
);
139 if (retval
== ERROR_OK
) {
147 static int nds32_update_psw(struct nds32
*nds32
)
150 struct aice_port_s
*aice
= target_to_aice(nds32
->target
);
152 nds32_get_mapped_reg(nds32
, IR0
, &value_ir0
);
154 /* Save data memory endian */
155 if ((value_ir0
>> 5) & 0x1) {
156 nds32
->data_endian
= TARGET_BIG_ENDIAN
;
157 aice_set_data_endian(aice
, AICE_BIG_ENDIAN
);
159 nds32
->data_endian
= TARGET_LITTLE_ENDIAN
;
160 aice_set_data_endian(aice
, AICE_LITTLE_ENDIAN
);
163 /* Save translation status */
164 nds32
->memory
.address_translation
= ((value_ir0
>> 7) & 0x1) ? true : false;
169 static int nds32_update_mmu_info(struct nds32
*nds32
)
173 /* Update MMU control status */
174 nds32_get_mapped_reg(nds32
, MR0
, &value
);
175 nds32
->mmu_config
.default_min_page_size
= value
& 0x1;
176 nds32
->mmu_config
.multiple_page_size_in_use
= (value
>> 10) & 0x1;
181 static int nds32_update_cache_info(struct nds32
*nds32
)
185 if (nds32_get_mapped_reg(nds32
, MR8
, &value
) == ERROR_OK
) {
187 nds32
->memory
.icache
.enable
= true;
189 nds32
->memory
.icache
.enable
= false;
192 nds32
->memory
.dcache
.enable
= true;
194 nds32
->memory
.dcache
.enable
= false;
196 nds32
->memory
.icache
.enable
= false;
197 nds32
->memory
.dcache
.enable
= false;
203 static int nds32_update_lm_info(struct nds32
*nds32
)
205 struct nds32_memory
*memory
= &(nds32
->memory
);
209 nds32_get_mapped_reg(nds32
, MR6
, &value_mr6
);
211 memory
->ilm_enable
= true;
213 memory
->ilm_enable
= false;
215 if (memory
->ilm_align_ver
== 0) { /* 1MB aligned */
216 memory
->ilm_start
= value_mr6
& 0xFFF00000;
217 memory
->ilm_end
= memory
->ilm_start
+ memory
->ilm_size
;
218 } else if (memory
->ilm_align_ver
== 1) { /* aligned to local memory size */
219 memory
->ilm_start
= value_mr6
& 0xFFFFFC00;
220 memory
->ilm_end
= memory
->ilm_start
+ memory
->ilm_size
;
222 memory
->ilm_start
= -1;
223 memory
->ilm_end
= -1;
226 nds32_get_mapped_reg(nds32
, MR7
, &value_mr7
);
228 memory
->dlm_enable
= true;
230 memory
->dlm_enable
= false;
232 if (memory
->dlm_align_ver
== 0) { /* 1MB aligned */
233 memory
->dlm_start
= value_mr7
& 0xFFF00000;
234 memory
->dlm_end
= memory
->dlm_start
+ memory
->dlm_size
;
235 } else if (memory
->dlm_align_ver
== 1) { /* aligned to local memory size */
236 memory
->dlm_start
= value_mr7
& 0xFFFFFC00;
237 memory
->dlm_end
= memory
->dlm_start
+ memory
->dlm_size
;
239 memory
->dlm_start
= -1;
240 memory
->dlm_end
= -1;
247 * If fpu/audio is disabled, to access fpu/audio registers will cause
248 * exceptions. So, we need to check if fpu/audio is enabled or not as
249 * target is halted. If fpu/audio is disabled, as users access fpu/audio
250 * registers, OpenOCD will return fake value 0 instead of accessing
251 * registers through DIM.
253 static int nds32_check_extension(struct nds32
*nds32
)
257 nds32_get_mapped_reg(nds32
, FUCPR
, &value
);
258 if (value
== NDS32_REGISTER_DISABLE
) {
259 nds32
->fpu_enable
= false;
260 nds32
->audio_enable
= false;
265 nds32
->fpu_enable
= true;
267 nds32
->fpu_enable
= false;
269 if (value
& 0x80000000)
270 nds32
->audio_enable
= true;
272 nds32
->audio_enable
= false;
277 static int nds32_set_core_reg(struct reg
*reg
, uint8_t *buf
)
279 struct nds32_reg
*reg_arch_info
= reg
->arch_info
;
280 struct target
*target
= reg_arch_info
->target
;
281 struct nds32
*nds32
= target_to_nds32(target
);
282 struct aice_port_s
*aice
= target_to_aice(target
);
283 uint32_t value
= buf_get_u32(buf
, 0, 32);
285 if (target
->state
!= TARGET_HALTED
) {
286 LOG_ERROR("Target not halted");
287 return ERROR_TARGET_NOT_HALTED
;
290 int mapped_regnum
= nds32
->register_map(nds32
, reg_arch_info
->num
);
292 /* ignore values that will generate exception */
293 if (nds32_reg_exception(mapped_regnum
, value
))
296 LOG_DEBUG("writing register %" PRIi32
"(%s) with value 0x%8.8" PRIx32
,
297 reg_arch_info
->num
, reg
->name
, value
);
299 if ((nds32
->fpu_enable
== false) &&
300 (nds32_reg_type(mapped_regnum
) == NDS32_REG_TYPE_FPU
)) {
302 buf_set_u32(reg
->value
, 0, 32, 0);
303 } else if ((nds32
->audio_enable
== false) &&
304 (nds32_reg_type(mapped_regnum
) == NDS32_REG_TYPE_AUMR
)) {
306 buf_set_u32(reg
->value
, 0, 32, 0);
308 buf_set_u32(reg
->value
, 0, 32, value
);
309 uint32_t val
= buf_get_u32(reg_arch_info
->value
, 0, 32);
310 aice_write_register(aice
, mapped_regnum
, val
);
312 /* After set value to registers, read the value from target
313 * to avoid W1C inconsistency. */
314 aice_read_register(aice
, mapped_regnum
, &val
);
315 buf_set_u32(reg_arch_info
->value
, 0, 32, val
);
321 /* update registers to take effect right now */
322 if (mapped_regnum
== IR0
) {
323 nds32_update_psw(nds32
);
324 } else if (mapped_regnum
== MR0
) {
325 nds32_update_mmu_info(nds32
);
326 } else if ((mapped_regnum
== MR6
) || (mapped_regnum
== MR7
)) {
327 /* update lm information */
328 nds32_update_lm_info(nds32
);
329 } else if (mapped_regnum
== MR8
) {
330 nds32_update_cache_info(nds32
);
331 } else if (mapped_regnum
== FUCPR
) {
332 /* update audio/fpu setting */
333 nds32_check_extension(nds32
);
339 static int nds32_set_core_reg_64(struct reg
*reg
, uint8_t *buf
)
341 struct nds32_reg
*reg_arch_info
= reg
->arch_info
;
342 struct target
*target
= reg_arch_info
->target
;
343 struct nds32
*nds32
= target_to_nds32(target
);
344 uint32_t low_part
= buf_get_u32(buf
, 0, 32);
345 uint32_t high_part
= buf_get_u32(buf
, 32, 32);
347 if (target
->state
!= TARGET_HALTED
) {
348 LOG_ERROR("Target not halted");
349 return ERROR_TARGET_NOT_HALTED
;
352 if ((nds32
->fpu_enable
== false) &&
353 ((reg_arch_info
->num
>= FD0
) && (reg_arch_info
->num
<= FD31
))) {
355 buf_set_u32(reg
->value
, 0, 32, 0);
356 buf_set_u32(reg
->value
, 32, 32, 0);
361 buf_set_u32(reg
->value
, 0, 32, low_part
);
362 buf_set_u32(reg
->value
, 32, 32, high_part
);
371 static const struct reg_arch_type nds32_reg_access_type
= {
372 .get
= nds32_get_core_reg
,
373 .set
= nds32_set_core_reg
,
376 static const struct reg_arch_type nds32_reg_access_type_64
= {
377 .get
= nds32_get_core_reg_64
,
378 .set
= nds32_set_core_reg_64
,
381 static struct reg_cache
*nds32_build_reg_cache(struct target
*target
,
384 struct reg_cache
*cache
= calloc(sizeof(struct reg_cache
), 1);
385 struct reg
*reg_list
= calloc(TOTAL_REG_NUM
, sizeof(struct reg
));
386 struct nds32_reg
*reg_arch_info
= calloc(TOTAL_REG_NUM
, sizeof(struct nds32_reg
));
389 if (!cache
|| !reg_list
|| !reg_arch_info
) {
396 cache
->name
= "Andes registers";
398 cache
->reg_list
= reg_list
;
401 for (i
= 0; i
< TOTAL_REG_NUM
; i
++) {
402 reg_arch_info
[i
].num
= i
;
403 reg_arch_info
[i
].target
= target
;
404 reg_arch_info
[i
].nds32
= nds32
;
405 reg_arch_info
[i
].enable
= false;
407 reg_list
[i
].name
= nds32_reg_simple_name(i
);
408 reg_list
[i
].number
= reg_arch_info
[i
].num
;
409 reg_list
[i
].size
= nds32_reg_size(i
);
410 reg_list
[i
].arch_info
= ®_arch_info
[i
];
412 reg_list
[i
].reg_data_type
= calloc(sizeof(struct reg_data_type
), 1);
414 if (reg_arch_info
[i
].num
>= FD0
&& reg_arch_info
[i
].num
<= FD31
) {
415 reg_list
[i
].value
= reg_arch_info
[i
].value
;
416 reg_list
[i
].type
= &nds32_reg_access_type_64
;
418 reg_list
[i
].reg_data_type
->type
= REG_TYPE_IEEE_DOUBLE
;
419 reg_list
[i
].reg_data_type
->id
= "ieee_double";
420 reg_list
[i
].group
= "float";
422 reg_list
[i
].value
= reg_arch_info
[i
].value
;
423 reg_list
[i
].type
= &nds32_reg_access_type
;
424 reg_list
[i
].group
= "general";
426 if ((reg_arch_info
[i
].num
>= FS0
) && (reg_arch_info
[i
].num
<= FS31
)) {
427 reg_list
[i
].reg_data_type
->type
= REG_TYPE_IEEE_SINGLE
;
428 reg_list
[i
].reg_data_type
->id
= "ieee_single";
429 reg_list
[i
].group
= "float";
430 } else if ((reg_arch_info
[i
].num
== FPCSR
) ||
431 (reg_arch_info
[i
].num
== FPCFG
)) {
432 reg_list
[i
].group
= "float";
433 } else if ((reg_arch_info
[i
].num
== R28
) ||
434 (reg_arch_info
[i
].num
== R29
) ||
435 (reg_arch_info
[i
].num
== R31
)) {
436 reg_list
[i
].reg_data_type
->type
= REG_TYPE_DATA_PTR
;
437 reg_list
[i
].reg_data_type
->id
= "data_ptr";
438 } else if ((reg_arch_info
[i
].num
== R30
) ||
439 (reg_arch_info
[i
].num
== PC
)) {
440 reg_list
[i
].reg_data_type
->type
= REG_TYPE_CODE_PTR
;
441 reg_list
[i
].reg_data_type
->id
= "code_ptr";
443 reg_list
[i
].reg_data_type
->type
= REG_TYPE_UINT32
;
444 reg_list
[i
].reg_data_type
->id
= "uint32";
448 if (reg_arch_info
[i
].num
>= R16
&& reg_arch_info
[i
].num
<= R25
)
449 reg_list
[i
].caller_save
= true;
451 reg_list
[i
].caller_save
= false;
453 reg_list
[i
].feature
= malloc(sizeof(struct reg_feature
));
455 if (reg_arch_info
[i
].num
>= R0
&& reg_arch_info
[i
].num
<= IFC_LP
)
456 reg_list
[i
].feature
->name
= "org.gnu.gdb.nds32.core";
457 else if (reg_arch_info
[i
].num
>= CR0
&& reg_arch_info
[i
].num
<= SECUR0
)
458 reg_list
[i
].feature
->name
= "org.gnu.gdb.nds32.system";
459 else if (reg_arch_info
[i
].num
>= D0L24
&& reg_arch_info
[i
].num
<= CBE3
)
460 reg_list
[i
].feature
->name
= "org.gnu.gdb.nds32.audio";
461 else if (reg_arch_info
[i
].num
>= FPCSR
&& reg_arch_info
[i
].num
<= FD31
)
462 reg_list
[i
].feature
->name
= "org.gnu.gdb.nds32.fpu";
467 nds32
->core_cache
= cache
;
472 static int nds32_reg_cache_init(struct target
*target
, struct nds32
*nds32
)
474 struct reg_cache
*cache
;
476 cache
= nds32_build_reg_cache(target
, nds32
);
480 *register_get_last_cache_p(&target
->reg_cache
) = cache
;
485 static struct reg
*nds32_reg_current(struct nds32
*nds32
, unsigned regnum
)
489 r
= nds32
->core_cache
->reg_list
+ regnum
;
494 int nds32_full_context(struct nds32
*nds32
)
496 uint32_t value
, value_ir0
;
498 /* save $pc & $psw */
499 nds32_get_mapped_reg(nds32
, PC
, &value
);
500 nds32_get_mapped_reg(nds32
, IR0
, &value_ir0
);
502 nds32_update_psw(nds32
);
503 nds32_update_mmu_info(nds32
);
504 nds32_update_cache_info(nds32
);
505 nds32_update_lm_info(nds32
);
507 nds32_check_extension(nds32
);
512 /* get register value internally */
513 int nds32_get_mapped_reg(struct nds32
*nds32
, unsigned regnum
, uint32_t *value
)
515 struct reg_cache
*reg_cache
= nds32
->core_cache
;
518 if (regnum
> reg_cache
->num_regs
)
521 r
= nds32_reg_current(nds32
, regnum
);
523 if (r
->type
->get(r
) != ERROR_OK
)
526 *value
= buf_get_u32(r
->value
, 0, 32);
531 /** set register internally */
532 int nds32_set_mapped_reg(struct nds32
*nds32
, unsigned regnum
, uint32_t value
)
534 struct reg_cache
*reg_cache
= nds32
->core_cache
;
536 uint8_t set_value
[4];
538 if (regnum
> reg_cache
->num_regs
)
541 r
= nds32_reg_current(nds32
, regnum
);
543 buf_set_u32(set_value
, 0, 32, value
);
545 return r
->type
->set(r
, set_value
);
548 /** get general register list */
549 static int nds32_get_general_reg_list(struct nds32
*nds32
,
550 struct reg
**reg_list
[], int *reg_list_size
)
552 struct reg
*reg_current
;
556 /** freed in gdb_server.c */
557 *reg_list
= malloc(sizeof(struct reg
*) * (IFC_LP
- R0
+ 1));
560 for (i
= R0
; i
< IFC_LP
+ 1; i
++) {
561 reg_current
= nds32_reg_current(nds32
, i
);
562 if (((struct nds32_reg
*)reg_current
->arch_info
)->enable
) {
563 (*reg_list
)[current_idx
] = reg_current
;
567 *reg_list_size
= current_idx
;
572 /** get all register list */
573 static int nds32_get_all_reg_list(struct nds32
*nds32
,
574 struct reg
**reg_list
[], int *reg_list_size
)
576 struct reg_cache
*reg_cache
= nds32
->core_cache
;
577 struct reg
*reg_current
;
580 *reg_list_size
= reg_cache
->num_regs
;
582 /** freed in gdb_server.c */
583 *reg_list
= malloc(sizeof(struct reg
*) * (*reg_list_size
));
585 for (i
= 0; i
< reg_cache
->num_regs
; i
++) {
586 reg_current
= nds32_reg_current(nds32
, i
);
587 reg_current
->exist
= ((struct nds32_reg
*)
588 reg_current
->arch_info
)->enable
;
589 (*reg_list
)[i
] = reg_current
;
595 /** get all register list */
596 int nds32_get_gdb_reg_list(struct target
*target
,
597 struct reg
**reg_list
[], int *reg_list_size
,
598 enum target_register_class reg_class
)
600 struct nds32
*nds32
= target_to_nds32(target
);
604 return nds32_get_all_reg_list(nds32
, reg_list
, reg_list_size
);
605 case REG_CLASS_GENERAL
:
606 return nds32_get_general_reg_list(nds32
, reg_list
, reg_list_size
);
614 static int nds32_select_memory_mode(struct target
*target
, uint32_t address
,
615 uint32_t length
, uint32_t *end_address
)
617 struct nds32
*nds32
= target_to_nds32(target
);
618 struct aice_port_s
*aice
= target_to_aice(target
);
619 struct nds32_memory
*memory
= &(nds32
->memory
);
620 struct nds32_edm
*edm
= &(nds32
->edm
);
621 uint32_t dlm_start
, dlm_end
;
622 uint32_t ilm_start
, ilm_end
;
623 uint32_t address_end
= address
+ length
;
625 /* init end_address */
626 *end_address
= address_end
;
628 if (memory
->access_channel
== NDS_MEMORY_ACC_CPU
)
631 if (edm
->access_control
== false) {
632 LOG_DEBUG("EDM does not support ACC_CTL");
636 if (edm
->direct_access_local_memory
== false) {
637 LOG_DEBUG("EDM does not support DALM");
638 aice_memory_mode(aice
, NDS_MEMORY_SELECT_MEM
);
642 if (memory
->mode
!= NDS_MEMORY_SELECT_AUTO
) {
643 LOG_DEBUG("Memory mode is not AUTO");
647 /* set default mode */
648 aice_memory_mode(aice
, NDS_MEMORY_SELECT_MEM
);
650 if ((memory
->ilm_base
!= 0) && (memory
->ilm_enable
== true)) {
651 ilm_start
= memory
->ilm_start
;
652 ilm_end
= memory
->ilm_end
;
654 /* case 1, address < ilm_start */
655 if (address
< ilm_start
) {
656 if (ilm_start
< address_end
) {
657 /* update end_address to split non-ILM from ILM */
658 *end_address
= ilm_start
;
661 aice_memory_mode(aice
, NDS_MEMORY_SELECT_MEM
);
662 } else if ((ilm_start
<= address
) && (address
< ilm_end
)) {
663 /* case 2, ilm_start <= address < ilm_end */
664 if (ilm_end
< address_end
) {
665 /* update end_address to split non-ILM from ILM */
666 *end_address
= ilm_end
;
669 aice_memory_mode(aice
, NDS_MEMORY_SELECT_ILM
);
670 } else { /* case 3, ilm_end <= address */
672 aice_memory_mode(aice
, NDS_MEMORY_SELECT_MEM
);
677 LOG_DEBUG("ILM is not enabled");
680 if ((memory
->dlm_base
!= 0) && (memory
->dlm_enable
== true)) {
681 dlm_start
= memory
->dlm_start
;
682 dlm_end
= memory
->dlm_end
;
684 /* case 1, address < dlm_start */
685 if (address
< dlm_start
) {
686 if (dlm_start
< address_end
) {
687 /* update end_address to split non-DLM from DLM */
688 *end_address
= dlm_start
;
691 aice_memory_mode(aice
, NDS_MEMORY_SELECT_MEM
);
692 } else if ((dlm_start
<= address
) && (address
< dlm_end
)) {
693 /* case 2, dlm_start <= address < dlm_end */
694 if (dlm_end
< address_end
) {
695 /* update end_address to split non-DLM from DLM */
696 *end_address
= dlm_end
;
699 aice_memory_mode(aice
, NDS_MEMORY_SELECT_DLM
);
700 } else { /* case 3, dlm_end <= address */
702 aice_memory_mode(aice
, NDS_MEMORY_SELECT_MEM
);
707 LOG_DEBUG("DLM is not enabled");
713 int nds32_read_buffer(struct target
*target
, uint32_t address
,
714 uint32_t size
, uint8_t *buffer
)
716 struct nds32
*nds32
= target_to_nds32(target
);
717 struct nds32_memory
*memory
= &(nds32
->memory
);
719 if ((memory
->access_channel
== NDS_MEMORY_ACC_CPU
) &&
720 (target
->state
!= TARGET_HALTED
)) {
721 LOG_WARNING("target was not halted");
722 return ERROR_TARGET_NOT_HALTED
;
725 LOG_DEBUG("READ BUFFER: ADDR %08" PRIx32
" SIZE %08" PRIx32
,
729 int retval
= ERROR_OK
;
730 struct aice_port_s
*aice
= target_to_aice(target
);
731 uint32_t end_address
;
733 if (((address
% 2) == 0) && (size
== 2)) {
734 nds32_select_memory_mode(target
, address
, 2, &end_address
);
735 return aice_read_mem_unit(aice
, address
, 2, 1, buffer
);
738 /* handle unaligned head bytes */
740 uint32_t unaligned
= 4 - (address
% 4);
742 if (unaligned
> size
)
745 nds32_select_memory_mode(target
, address
, unaligned
, &end_address
);
746 retval
= aice_read_mem_unit(aice
, address
, 1, unaligned
, buffer
);
747 if (retval
!= ERROR_OK
)
751 address
+= unaligned
;
755 /* handle aligned words */
757 int aligned
= size
- (size
% 4);
761 nds32_select_memory_mode(target
, address
, aligned
, &end_address
);
763 read_len
= end_address
- address
;
766 retval
= aice_read_mem_bulk(aice
, address
, read_len
, buffer
);
768 retval
= aice_read_mem_unit(aice
, address
, 4, read_len
/ 4, buffer
);
770 if (retval
!= ERROR_OK
)
778 } while (aligned
!= 0);
781 /*prevent byte access when possible (avoid AHB access limitations in some cases)*/
783 int aligned
= size
- (size
% 2);
784 nds32_select_memory_mode(target
, address
, aligned
, &end_address
);
785 retval
= aice_read_mem_unit(aice
, address
, 2, aligned
/ 2, buffer
);
786 if (retval
!= ERROR_OK
)
793 /* handle tail writes of less than 4 bytes */
795 nds32_select_memory_mode(target
, address
, size
, &end_address
);
796 retval
= aice_read_mem_unit(aice
, address
, 1, size
, buffer
);
797 if (retval
!= ERROR_OK
)
804 int nds32_read_memory(struct target
*target
, uint32_t address
,
805 uint32_t size
, uint32_t count
, uint8_t *buffer
)
807 struct aice_port_s
*aice
= target_to_aice(target
);
809 return aice_read_mem_unit(aice
, address
, size
, count
, buffer
);
812 int nds32_read_phys_memory(struct target
*target
, target_addr_t address
,
813 uint32_t size
, uint32_t count
, uint8_t *buffer
)
815 struct aice_port_s
*aice
= target_to_aice(target
);
816 struct nds32
*nds32
= target_to_nds32(target
);
817 struct nds32_memory
*memory
= &(nds32
->memory
);
818 enum nds_memory_access orig_channel
;
821 /* switch to BUS access mode to skip MMU */
822 orig_channel
= memory
->access_channel
;
823 memory
->access_channel
= NDS_MEMORY_ACC_BUS
;
824 aice_memory_access(aice
, memory
->access_channel
);
826 /* The input address is physical address. No need to do address translation. */
827 result
= aice_read_mem_unit(aice
, address
, size
, count
, buffer
);
829 /* restore to origin access mode */
830 memory
->access_channel
= orig_channel
;
831 aice_memory_access(aice
, memory
->access_channel
);
836 int nds32_write_buffer(struct target
*target
, uint32_t address
,
837 uint32_t size
, const uint8_t *buffer
)
839 struct nds32
*nds32
= target_to_nds32(target
);
840 struct nds32_memory
*memory
= &(nds32
->memory
);
842 if ((memory
->access_channel
== NDS_MEMORY_ACC_CPU
) &&
843 (target
->state
!= TARGET_HALTED
)) {
844 LOG_WARNING("target was not halted");
845 return ERROR_TARGET_NOT_HALTED
;
848 LOG_DEBUG("WRITE BUFFER: ADDR %08" PRIx32
" SIZE %08" PRIx32
,
852 struct aice_port_s
*aice
= target_to_aice(target
);
853 int retval
= ERROR_OK
;
854 uint32_t end_address
;
856 if (((address
% 2) == 0) && (size
== 2)) {
857 nds32_select_memory_mode(target
, address
, 2, &end_address
);
858 return aice_write_mem_unit(aice
, address
, 2, 1, buffer
);
861 /* handle unaligned head bytes */
863 uint32_t unaligned
= 4 - (address
% 4);
865 if (unaligned
> size
)
868 nds32_select_memory_mode(target
, address
, unaligned
, &end_address
);
869 retval
= aice_write_mem_unit(aice
, address
, 1, unaligned
, buffer
);
870 if (retval
!= ERROR_OK
)
874 address
+= unaligned
;
878 /* handle aligned words */
880 int aligned
= size
- (size
% 4);
884 nds32_select_memory_mode(target
, address
, aligned
, &end_address
);
886 write_len
= end_address
- address
;
888 retval
= aice_write_mem_bulk(aice
, address
, write_len
, buffer
);
890 retval
= aice_write_mem_unit(aice
, address
, 4, write_len
/ 4, buffer
);
891 if (retval
!= ERROR_OK
)
895 address
+= write_len
;
897 aligned
-= write_len
;
899 } while (aligned
!= 0);
902 /* handle tail writes of less than 4 bytes */
904 nds32_select_memory_mode(target
, address
, size
, &end_address
);
905 retval
= aice_write_mem_unit(aice
, address
, 1, size
, buffer
);
906 if (retval
!= ERROR_OK
)
913 int nds32_write_memory(struct target
*target
, uint32_t address
,
914 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
916 struct aice_port_s
*aice
= target_to_aice(target
);
918 return aice_write_mem_unit(aice
, address
, size
, count
, buffer
);
921 int nds32_write_phys_memory(struct target
*target
, target_addr_t address
,
922 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
924 struct aice_port_s
*aice
= target_to_aice(target
);
925 struct nds32
*nds32
= target_to_nds32(target
);
926 struct nds32_memory
*memory
= &(nds32
->memory
);
927 enum nds_memory_access orig_channel
;
930 /* switch to BUS access mode to skip MMU */
931 orig_channel
= memory
->access_channel
;
932 memory
->access_channel
= NDS_MEMORY_ACC_BUS
;
933 aice_memory_access(aice
, memory
->access_channel
);
935 /* The input address is physical address. No need to do address translation. */
936 result
= aice_write_mem_unit(aice
, address
, size
, count
, buffer
);
938 /* restore to origin access mode */
939 memory
->access_channel
= orig_channel
;
940 aice_memory_access(aice
, memory
->access_channel
);
945 int nds32_mmu(struct target
*target
, int *enabled
)
947 if (target
->state
!= TARGET_HALTED
) {
948 LOG_ERROR("%s: target not halted", __func__
);
949 return ERROR_TARGET_INVALID
;
952 struct nds32
*nds32
= target_to_nds32(target
);
953 struct nds32_memory
*memory
= &(nds32
->memory
);
954 struct nds32_mmu_config
*mmu_config
= &(nds32
->mmu_config
);
956 if ((mmu_config
->memory_protection
== 2) && (memory
->address_translation
== true))
964 int nds32_arch_state(struct target
*target
)
966 struct nds32
*nds32
= target_to_nds32(target
);
968 if (nds32
->common_magic
!= NDS32_COMMON_MAGIC
) {
969 LOG_ERROR("BUG: called for a non-Andes target");
973 uint32_t value_pc
, value_psw
;
975 nds32_get_mapped_reg(nds32
, PC
, &value_pc
);
976 nds32_get_mapped_reg(nds32
, IR0
, &value_psw
);
978 LOG_USER("target halted due to %s\n"
979 "psw: 0x%8.8" PRIx32
" pc: 0x%8.8" PRIx32
"%s",
980 debug_reason_name(target
),
983 nds32
->virtual_hosting
? ", virtual hosting" : "");
985 /* save pc value to pseudo register pc */
986 struct reg
*reg
= register_get_by_name(target
->reg_cache
, "pc", true);
987 buf_set_u32(reg
->value
, 0, 32, value_pc
);
992 static void nds32_init_must_have_registers(struct nds32
*nds32
)
994 struct reg_cache
*reg_cache
= nds32
->core_cache
;
996 /** MUST have general registers */
997 ((struct nds32_reg
*)reg_cache
->reg_list
[R0
].arch_info
)->enable
= true;
998 ((struct nds32_reg
*)reg_cache
->reg_list
[R1
].arch_info
)->enable
= true;
999 ((struct nds32_reg
*)reg_cache
->reg_list
[R2
].arch_info
)->enable
= true;
1000 ((struct nds32_reg
*)reg_cache
->reg_list
[R3
].arch_info
)->enable
= true;
1001 ((struct nds32_reg
*)reg_cache
->reg_list
[R4
].arch_info
)->enable
= true;
1002 ((struct nds32_reg
*)reg_cache
->reg_list
[R5
].arch_info
)->enable
= true;
1003 ((struct nds32_reg
*)reg_cache
->reg_list
[R6
].arch_info
)->enable
= true;
1004 ((struct nds32_reg
*)reg_cache
->reg_list
[R7
].arch_info
)->enable
= true;
1005 ((struct nds32_reg
*)reg_cache
->reg_list
[R8
].arch_info
)->enable
= true;
1006 ((struct nds32_reg
*)reg_cache
->reg_list
[R9
].arch_info
)->enable
= true;
1007 ((struct nds32_reg
*)reg_cache
->reg_list
[R10
].arch_info
)->enable
= true;
1008 ((struct nds32_reg
*)reg_cache
->reg_list
[R15
].arch_info
)->enable
= true;
1009 ((struct nds32_reg
*)reg_cache
->reg_list
[R28
].arch_info
)->enable
= true;
1010 ((struct nds32_reg
*)reg_cache
->reg_list
[R29
].arch_info
)->enable
= true;
1011 ((struct nds32_reg
*)reg_cache
->reg_list
[R30
].arch_info
)->enable
= true;
1012 ((struct nds32_reg
*)reg_cache
->reg_list
[R31
].arch_info
)->enable
= true;
1013 ((struct nds32_reg
*)reg_cache
->reg_list
[PC
].arch_info
)->enable
= true;
1015 /** MUST have configuration system registers */
1016 ((struct nds32_reg
*)reg_cache
->reg_list
[CR0
].arch_info
)->enable
= true;
1017 ((struct nds32_reg
*)reg_cache
->reg_list
[CR1
].arch_info
)->enable
= true;
1018 ((struct nds32_reg
*)reg_cache
->reg_list
[CR2
].arch_info
)->enable
= true;
1019 ((struct nds32_reg
*)reg_cache
->reg_list
[CR3
].arch_info
)->enable
= true;
1020 ((struct nds32_reg
*)reg_cache
->reg_list
[CR4
].arch_info
)->enable
= true;
1022 /** MUST have interrupt system registers */
1023 ((struct nds32_reg
*)reg_cache
->reg_list
[IR0
].arch_info
)->enable
= true;
1024 ((struct nds32_reg
*)reg_cache
->reg_list
[IR1
].arch_info
)->enable
= true;
1025 ((struct nds32_reg
*)reg_cache
->reg_list
[IR3
].arch_info
)->enable
= true;
1026 ((struct nds32_reg
*)reg_cache
->reg_list
[IR4
].arch_info
)->enable
= true;
1027 ((struct nds32_reg
*)reg_cache
->reg_list
[IR6
].arch_info
)->enable
= true;
1028 ((struct nds32_reg
*)reg_cache
->reg_list
[IR9
].arch_info
)->enable
= true;
1029 ((struct nds32_reg
*)reg_cache
->reg_list
[IR11
].arch_info
)->enable
= true;
1030 ((struct nds32_reg
*)reg_cache
->reg_list
[IR14
].arch_info
)->enable
= true;
1031 ((struct nds32_reg
*)reg_cache
->reg_list
[IR15
].arch_info
)->enable
= true;
1033 /** MUST have MMU system registers */
1034 ((struct nds32_reg
*)reg_cache
->reg_list
[MR0
].arch_info
)->enable
= true;
1036 /** MUST have EDM system registers */
1037 ((struct nds32_reg
*)reg_cache
->reg_list
[DR40
].arch_info
)->enable
= true;
1038 ((struct nds32_reg
*)reg_cache
->reg_list
[DR42
].arch_info
)->enable
= true;
1041 static int nds32_init_memory_config(struct nds32
*nds32
)
1043 uint32_t value_cr1
; /* ICM_CFG */
1044 uint32_t value_cr2
; /* DCM_CFG */
1045 struct nds32_memory
*memory
= &(nds32
->memory
);
1047 /* read $cr1 to init instruction memory information */
1048 nds32_get_mapped_reg(nds32
, CR1
, &value_cr1
);
1049 memory
->icache
.set
= value_cr1
& 0x7;
1050 memory
->icache
.way
= (value_cr1
>> 3) & 0x7;
1051 memory
->icache
.line_size
= (value_cr1
>> 6) & 0x7;
1052 memory
->icache
.lock_support
= (value_cr1
>> 9) & 0x1;
1054 memory
->ilm_base
= (value_cr1
>> 10) & 0x7;
1055 memory
->ilm_align_ver
= (value_cr1
>> 13) & 0x3;
1057 /* read $cr2 to init data memory information */
1058 nds32_get_mapped_reg(nds32
, CR2
, &value_cr2
);
1059 memory
->dcache
.set
= value_cr2
& 0x7;
1060 memory
->dcache
.way
= (value_cr2
>> 3) & 0x7;
1061 memory
->dcache
.line_size
= (value_cr2
>> 6) & 0x7;
1062 memory
->dcache
.lock_support
= (value_cr2
>> 9) & 0x1;
1064 memory
->dlm_base
= (value_cr2
>> 10) & 0x7;
1065 memory
->dlm_align_ver
= (value_cr2
>> 13) & 0x3;
1070 static void nds32_init_config(struct nds32
*nds32
)
1075 struct nds32_cpu_version
*cpu_version
= &(nds32
->cpu_version
);
1076 struct nds32_mmu_config
*mmu_config
= &(nds32
->mmu_config
);
1077 struct nds32_misc_config
*misc_config
= &(nds32
->misc_config
);
1079 nds32_get_mapped_reg(nds32
, CR0
, &value_cr0
);
1080 nds32_get_mapped_reg(nds32
, CR3
, &value_cr3
);
1081 nds32_get_mapped_reg(nds32
, CR4
, &value_cr4
);
1083 /* config cpu version */
1084 cpu_version
->performance_extension
= value_cr0
& 0x1;
1085 cpu_version
->_16bit_extension
= (value_cr0
>> 1) & 0x1;
1086 cpu_version
->performance_extension_2
= (value_cr0
>> 2) & 0x1;
1087 cpu_version
->cop_fpu_extension
= (value_cr0
>> 3) & 0x1;
1088 cpu_version
->string_extension
= (value_cr0
>> 4) & 0x1;
1089 cpu_version
->revision
= (value_cr0
>> 16) & 0xFF;
1090 cpu_version
->cpu_id_family
= (value_cr0
>> 24) & 0xF;
1091 cpu_version
->cpu_id_version
= (value_cr0
>> 28) & 0xF;
1094 mmu_config
->memory_protection
= value_cr3
& 0x3;
1095 mmu_config
->memory_protection_version
= (value_cr3
>> 2) & 0x1F;
1096 mmu_config
->fully_associative_tlb
= (value_cr3
>> 7) & 0x1;
1097 if (mmu_config
->fully_associative_tlb
) {
1098 mmu_config
->tlb_size
= (value_cr3
>> 8) & 0x7F;
1100 mmu_config
->tlb_ways
= (value_cr3
>> 8) & 0x7;
1101 mmu_config
->tlb_sets
= (value_cr3
>> 11) & 0x7;
1103 mmu_config
->_8k_page_support
= (value_cr3
>> 15) & 0x1;
1104 mmu_config
->extra_page_size_support
= (value_cr3
>> 16) & 0xFF;
1105 mmu_config
->tlb_lock
= (value_cr3
>> 24) & 0x1;
1106 mmu_config
->hardware_page_table_walker
= (value_cr3
>> 25) & 0x1;
1107 mmu_config
->default_endian
= (value_cr3
>> 26) & 0x1;
1108 mmu_config
->partition_num
= (value_cr3
>> 27) & 0x1;
1109 mmu_config
->invisible_tlb
= (value_cr3
>> 28) & 0x1;
1110 mmu_config
->vlpt
= (value_cr3
>> 29) & 0x1;
1111 mmu_config
->ntme
= (value_cr3
>> 30) & 0x1;
1112 mmu_config
->drde
= (value_cr3
>> 31) & 0x1;
1115 misc_config
->edm
= value_cr4
& 0x1;
1116 misc_config
->local_memory_dma
= (value_cr4
>> 1) & 0x1;
1117 misc_config
->performance_monitor
= (value_cr4
>> 2) & 0x1;
1118 misc_config
->high_speed_memory_port
= (value_cr4
>> 3) & 0x1;
1119 misc_config
->debug_tracer
= (value_cr4
>> 4) & 0x1;
1120 misc_config
->div_instruction
= (value_cr4
>> 5) & 0x1;
1121 misc_config
->mac_instruction
= (value_cr4
>> 6) & 0x1;
1122 misc_config
->audio_isa
= (value_cr4
>> 7) & 0x3;
1123 misc_config
->l2_cache
= (value_cr4
>> 9) & 0x1;
1124 misc_config
->reduce_register
= (value_cr4
>> 10) & 0x1;
1125 misc_config
->addr_24
= (value_cr4
>> 11) & 0x1;
1126 misc_config
->interruption_level
= (value_cr4
>> 12) & 0x1;
1127 misc_config
->baseline_instruction
= (value_cr4
>> 13) & 0x7;
1128 misc_config
->no_dx_register
= (value_cr4
>> 16) & 0x1;
1129 misc_config
->implement_dependant_register
= (value_cr4
>> 17) & 0x1;
1130 misc_config
->implement_dependant_sr_encoding
= (value_cr4
>> 18) & 0x1;
1131 misc_config
->ifc
= (value_cr4
>> 19) & 0x1;
1132 misc_config
->mcu
= (value_cr4
>> 20) & 0x1;
1133 misc_config
->shadow
= (value_cr4
>> 21) & 0x7;
1134 misc_config
->ex9
= (value_cr4
>> 24) & 0x1;
1136 nds32_init_memory_config(nds32
);
1139 static int nds32_init_option_registers(struct nds32
*nds32
)
1141 struct reg_cache
*reg_cache
= nds32
->core_cache
;
1142 struct nds32_cpu_version
*cpu_version
= &(nds32
->cpu_version
);
1143 struct nds32_mmu_config
*mmu_config
= &(nds32
->mmu_config
);
1144 struct nds32_misc_config
*misc_config
= &(nds32
->misc_config
);
1145 struct nds32_memory
*memory_config
= &(nds32
->memory
);
1151 if (((cpu_version
->cpu_id_family
== 0xC) || (cpu_version
->cpu_id_family
== 0xD)) &&
1152 ((cpu_version
->revision
& 0xFC) == 0)) {
1162 if (misc_config
->reduce_register
== false) {
1163 ((struct nds32_reg
*)reg_cache
->reg_list
[R11
].arch_info
)->enable
= true;
1164 ((struct nds32_reg
*)reg_cache
->reg_list
[R12
].arch_info
)->enable
= true;
1165 ((struct nds32_reg
*)reg_cache
->reg_list
[R13
].arch_info
)->enable
= true;
1166 ((struct nds32_reg
*)reg_cache
->reg_list
[R14
].arch_info
)->enable
= true;
1167 ((struct nds32_reg
*)reg_cache
->reg_list
[R16
].arch_info
)->enable
= true;
1168 ((struct nds32_reg
*)reg_cache
->reg_list
[R17
].arch_info
)->enable
= true;
1169 ((struct nds32_reg
*)reg_cache
->reg_list
[R18
].arch_info
)->enable
= true;
1170 ((struct nds32_reg
*)reg_cache
->reg_list
[R19
].arch_info
)->enable
= true;
1171 ((struct nds32_reg
*)reg_cache
->reg_list
[R20
].arch_info
)->enable
= true;
1172 ((struct nds32_reg
*)reg_cache
->reg_list
[R21
].arch_info
)->enable
= true;
1173 ((struct nds32_reg
*)reg_cache
->reg_list
[R22
].arch_info
)->enable
= true;
1174 ((struct nds32_reg
*)reg_cache
->reg_list
[R23
].arch_info
)->enable
= true;
1175 ((struct nds32_reg
*)reg_cache
->reg_list
[R24
].arch_info
)->enable
= true;
1176 ((struct nds32_reg
*)reg_cache
->reg_list
[R25
].arch_info
)->enable
= true;
1177 ((struct nds32_reg
*)reg_cache
->reg_list
[R26
].arch_info
)->enable
= true;
1178 ((struct nds32_reg
*)reg_cache
->reg_list
[R27
].arch_info
)->enable
= true;
1181 if (misc_config
->no_dx_register
== false) {
1182 ((struct nds32_reg
*)reg_cache
->reg_list
[D0LO
].arch_info
)->enable
= true;
1183 ((struct nds32_reg
*)reg_cache
->reg_list
[D0HI
].arch_info
)->enable
= true;
1184 ((struct nds32_reg
*)reg_cache
->reg_list
[D1LO
].arch_info
)->enable
= true;
1185 ((struct nds32_reg
*)reg_cache
->reg_list
[D1HI
].arch_info
)->enable
= true;
1188 if (misc_config
->ex9
)
1189 ((struct nds32_reg
*)reg_cache
->reg_list
[ITB
].arch_info
)->enable
= true;
1191 if (no_cr5
== false)
1192 ((struct nds32_reg
*)reg_cache
->reg_list
[CR5
].arch_info
)->enable
= true;
1194 if (cpu_version
->cop_fpu_extension
) {
1195 ((struct nds32_reg
*)reg_cache
->reg_list
[CR6
].arch_info
)->enable
= true;
1196 ((struct nds32_reg
*)reg_cache
->reg_list
[FPCSR
].arch_info
)->enable
= true;
1197 ((struct nds32_reg
*)reg_cache
->reg_list
[FPCFG
].arch_info
)->enable
= true;
1200 if (mmu_config
->memory_protection
== 1) {
1201 /* Secure MPU has no IPC, IPSW, P_ITYPE */
1202 ((struct nds32_reg
*)reg_cache
->reg_list
[IR1
].arch_info
)->enable
= false;
1203 ((struct nds32_reg
*)reg_cache
->reg_list
[IR9
].arch_info
)->enable
= false;
1206 if (nds32
->privilege_level
!= 0)
1207 ((struct nds32_reg
*)reg_cache
->reg_list
[IR3
].arch_info
)->enable
= false;
1209 if (misc_config
->mcu
== true)
1210 ((struct nds32_reg
*)reg_cache
->reg_list
[IR4
].arch_info
)->enable
= false;
1212 if (misc_config
->interruption_level
== false) {
1213 ((struct nds32_reg
*)reg_cache
->reg_list
[IR2
].arch_info
)->enable
= true;
1214 ((struct nds32_reg
*)reg_cache
->reg_list
[IR5
].arch_info
)->enable
= true;
1215 ((struct nds32_reg
*)reg_cache
->reg_list
[IR10
].arch_info
)->enable
= true;
1216 ((struct nds32_reg
*)reg_cache
->reg_list
[IR12
].arch_info
)->enable
= true;
1217 ((struct nds32_reg
*)reg_cache
->reg_list
[IR13
].arch_info
)->enable
= true;
1219 /* Secure MPU has no IPC, IPSW, P_ITYPE */
1220 if (mmu_config
->memory_protection
!= 1)
1221 ((struct nds32_reg
*)reg_cache
->reg_list
[IR7
].arch_info
)->enable
= true;
1224 if ((cpu_version
->cpu_id_family
== 0x9) ||
1225 (cpu_version
->cpu_id_family
== 0xA) ||
1226 (cpu_version
->cpu_id_family
== 0xC) ||
1227 (cpu_version
->cpu_id_family
== 0xD))
1228 ((struct nds32_reg
*)reg_cache
->reg_list
[IR8
].arch_info
)->enable
= true;
1230 if (misc_config
->shadow
== 1) {
1231 ((struct nds32_reg
*)reg_cache
->reg_list
[IR16
].arch_info
)->enable
= true;
1232 ((struct nds32_reg
*)reg_cache
->reg_list
[IR17
].arch_info
)->enable
= true;
1235 if (misc_config
->ifc
)
1236 ((struct nds32_reg
*)reg_cache
->reg_list
[IFC_LP
].arch_info
)->enable
= true;
1238 if (nds32
->privilege_level
!= 0)
1239 ((struct nds32_reg
*)reg_cache
->reg_list
[MR0
].arch_info
)->enable
= false;
1241 if (mmu_config
->memory_protection
== 1) {
1242 if (mmu_config
->memory_protection_version
== 24)
1243 ((struct nds32_reg
*)reg_cache
->reg_list
[MR4
].arch_info
)->enable
= true;
1245 if (nds32
->privilege_level
== 0) {
1246 if ((mmu_config
->memory_protection_version
== 16) ||
1247 (mmu_config
->memory_protection_version
== 24)) {
1248 ((struct nds32_reg
*)reg_cache
->reg_list
[MR11
].arch_info
)->enable
= true;
1249 ((struct nds32_reg
*)reg_cache
->reg_list
[SECUR0
].arch_info
)->enable
= true;
1250 ((struct nds32_reg
*)reg_cache
->reg_list
[IR20
].arch_info
)->enable
= true;
1251 ((struct nds32_reg
*)reg_cache
->reg_list
[IR22
].arch_info
)->enable
= true;
1252 ((struct nds32_reg
*)reg_cache
->reg_list
[IR24
].arch_info
)->enable
= true;
1253 ((struct nds32_reg
*)reg_cache
->reg_list
[IR30
].arch_info
)->enable
= true;
1255 if (misc_config
->shadow
== 1) {
1256 ((struct nds32_reg
*)reg_cache
->reg_list
[IR21
].arch_info
)->enable
= true;
1257 ((struct nds32_reg
*)reg_cache
->reg_list
[IR23
].arch_info
)->enable
= true;
1258 ((struct nds32_reg
*)reg_cache
->reg_list
[IR25
].arch_info
)->enable
= true;
1262 } else if (mmu_config
->memory_protection
== 2) {
1263 ((struct nds32_reg
*)reg_cache
->reg_list
[MR1
].arch_info
)->enable
= true;
1264 ((struct nds32_reg
*)reg_cache
->reg_list
[MR4
].arch_info
)->enable
= true;
1266 if ((cpu_version
->cpu_id_family
!= 0xA) && (cpu_version
->cpu_id_family
!= 0xC) &&
1267 (cpu_version
->cpu_id_family
!= 0xD))
1268 ((struct nds32_reg
*)reg_cache
->reg_list
[MR5
].arch_info
)->enable
= true;
1271 if (mmu_config
->memory_protection
> 0) {
1272 ((struct nds32_reg
*)reg_cache
->reg_list
[MR2
].arch_info
)->enable
= true;
1273 ((struct nds32_reg
*)reg_cache
->reg_list
[MR3
].arch_info
)->enable
= true;
1276 if (memory_config
->ilm_base
!= 0)
1277 if (nds32
->privilege_level
== 0)
1278 ((struct nds32_reg
*)reg_cache
->reg_list
[MR6
].arch_info
)->enable
= true;
1280 if (memory_config
->dlm_base
!= 0)
1281 if (nds32
->privilege_level
== 0)
1282 ((struct nds32_reg
*)reg_cache
->reg_list
[MR7
].arch_info
)->enable
= true;
1284 if ((memory_config
->icache
.line_size
!= 0) && (memory_config
->dcache
.line_size
!= 0))
1285 ((struct nds32_reg
*)reg_cache
->reg_list
[MR8
].arch_info
)->enable
= true;
1287 if (misc_config
->high_speed_memory_port
)
1288 ((struct nds32_reg
*)reg_cache
->reg_list
[MR9
].arch_info
)->enable
= true;
1291 ((struct nds32_reg
*)reg_cache
->reg_list
[MR10
].arch_info
)->enable
= true;
1293 if (misc_config
->edm
) {
1294 int dr_reg_n
= nds32
->edm
.breakpoint_num
* 5;
1296 for (int i
= 0 ; i
< dr_reg_n
; i
++)
1297 ((struct nds32_reg
*)reg_cache
->reg_list
[DR0
+ i
].arch_info
)->enable
= true;
1299 ((struct nds32_reg
*)reg_cache
->reg_list
[DR41
].arch_info
)->enable
= true;
1300 ((struct nds32_reg
*)reg_cache
->reg_list
[DR43
].arch_info
)->enable
= true;
1301 ((struct nds32_reg
*)reg_cache
->reg_list
[DR44
].arch_info
)->enable
= true;
1302 ((struct nds32_reg
*)reg_cache
->reg_list
[DR45
].arch_info
)->enable
= true;
1305 if (misc_config
->debug_tracer
) {
1306 ((struct nds32_reg
*)reg_cache
->reg_list
[DR46
].arch_info
)->enable
= true;
1307 ((struct nds32_reg
*)reg_cache
->reg_list
[DR47
].arch_info
)->enable
= true;
1310 if (misc_config
->performance_monitor
) {
1311 ((struct nds32_reg
*)reg_cache
->reg_list
[PFR0
].arch_info
)->enable
= true;
1312 ((struct nds32_reg
*)reg_cache
->reg_list
[PFR1
].arch_info
)->enable
= true;
1313 ((struct nds32_reg
*)reg_cache
->reg_list
[PFR2
].arch_info
)->enable
= true;
1314 ((struct nds32_reg
*)reg_cache
->reg_list
[PFR3
].arch_info
)->enable
= true;
1317 if (misc_config
->local_memory_dma
) {
1318 ((struct nds32_reg
*)reg_cache
->reg_list
[DMAR0
].arch_info
)->enable
= true;
1319 ((struct nds32_reg
*)reg_cache
->reg_list
[DMAR1
].arch_info
)->enable
= true;
1320 ((struct nds32_reg
*)reg_cache
->reg_list
[DMAR2
].arch_info
)->enable
= true;
1321 ((struct nds32_reg
*)reg_cache
->reg_list
[DMAR3
].arch_info
)->enable
= true;
1322 ((struct nds32_reg
*)reg_cache
->reg_list
[DMAR4
].arch_info
)->enable
= true;
1323 ((struct nds32_reg
*)reg_cache
->reg_list
[DMAR5
].arch_info
)->enable
= true;
1324 ((struct nds32_reg
*)reg_cache
->reg_list
[DMAR6
].arch_info
)->enable
= true;
1325 ((struct nds32_reg
*)reg_cache
->reg_list
[DMAR7
].arch_info
)->enable
= true;
1326 ((struct nds32_reg
*)reg_cache
->reg_list
[DMAR8
].arch_info
)->enable
= true;
1327 ((struct nds32_reg
*)reg_cache
->reg_list
[DMAR9
].arch_info
)->enable
= true;
1328 ((struct nds32_reg
*)reg_cache
->reg_list
[DMAR10
].arch_info
)->enable
= true;
1331 if ((misc_config
->local_memory_dma
|| misc_config
->performance_monitor
) &&
1332 (no_racr0
== false))
1333 ((struct nds32_reg
*)reg_cache
->reg_list
[RACR
].arch_info
)->enable
= true;
1335 if (cpu_version
->cop_fpu_extension
|| (misc_config
->audio_isa
!= 0))
1336 ((struct nds32_reg
*)reg_cache
->reg_list
[FUCPR
].arch_info
)->enable
= true;
1338 if (misc_config
->audio_isa
!= 0) {
1339 if (misc_config
->audio_isa
> 1) {
1340 ((struct nds32_reg
*)reg_cache
->reg_list
[D0L24
].arch_info
)->enable
= true;
1341 ((struct nds32_reg
*)reg_cache
->reg_list
[D1L24
].arch_info
)->enable
= true;
1344 ((struct nds32_reg
*)reg_cache
->reg_list
[I0
].arch_info
)->enable
= true;
1345 ((struct nds32_reg
*)reg_cache
->reg_list
[I1
].arch_info
)->enable
= true;
1346 ((struct nds32_reg
*)reg_cache
->reg_list
[I2
].arch_info
)->enable
= true;
1347 ((struct nds32_reg
*)reg_cache
->reg_list
[I3
].arch_info
)->enable
= true;
1348 ((struct nds32_reg
*)reg_cache
->reg_list
[I4
].arch_info
)->enable
= true;
1349 ((struct nds32_reg
*)reg_cache
->reg_list
[I5
].arch_info
)->enable
= true;
1350 ((struct nds32_reg
*)reg_cache
->reg_list
[I6
].arch_info
)->enable
= true;
1351 ((struct nds32_reg
*)reg_cache
->reg_list
[I7
].arch_info
)->enable
= true;
1352 ((struct nds32_reg
*)reg_cache
->reg_list
[M1
].arch_info
)->enable
= true;
1353 ((struct nds32_reg
*)reg_cache
->reg_list
[M2
].arch_info
)->enable
= true;
1354 ((struct nds32_reg
*)reg_cache
->reg_list
[M3
].arch_info
)->enable
= true;
1355 ((struct nds32_reg
*)reg_cache
->reg_list
[M5
].arch_info
)->enable
= true;
1356 ((struct nds32_reg
*)reg_cache
->reg_list
[M6
].arch_info
)->enable
= true;
1357 ((struct nds32_reg
*)reg_cache
->reg_list
[M7
].arch_info
)->enable
= true;
1358 ((struct nds32_reg
*)reg_cache
->reg_list
[MOD
].arch_info
)->enable
= true;
1359 ((struct nds32_reg
*)reg_cache
->reg_list
[LBE
].arch_info
)->enable
= true;
1360 ((struct nds32_reg
*)reg_cache
->reg_list
[LE
].arch_info
)->enable
= true;
1361 ((struct nds32_reg
*)reg_cache
->reg_list
[LC
].arch_info
)->enable
= true;
1362 ((struct nds32_reg
*)reg_cache
->reg_list
[ADM_VBASE
].arch_info
)->enable
= true;
1363 ((struct nds32_reg
*)reg_cache
->reg_list
[SHFT_CTL0
].arch_info
)->enable
= true;
1364 ((struct nds32_reg
*)reg_cache
->reg_list
[SHFT_CTL1
].arch_info
)->enable
= true;
1367 uint32_t fucpr_backup
;
1368 /* enable fpu and get configuration */
1369 nds32_get_mapped_reg(nds32
, FUCPR
, &fucpr_backup
);
1370 if ((fucpr_backup
& 0x80000000) == 0)
1371 nds32_set_mapped_reg(nds32
, FUCPR
, fucpr_backup
| 0x80000000);
1372 nds32_get_mapped_reg(nds32
, MOD
, &value_mod
);
1373 /* restore origin fucpr value */
1374 if ((fucpr_backup
& 0x80000000) == 0)
1375 nds32_set_mapped_reg(nds32
, FUCPR
, fucpr_backup
);
1377 if ((value_mod
>> 6) & 0x1) {
1378 ((struct nds32_reg
*)reg_cache
->reg_list
[CB_CTL
].arch_info
)->enable
= true;
1379 ((struct nds32_reg
*)reg_cache
->reg_list
[CBB0
].arch_info
)->enable
= true;
1380 ((struct nds32_reg
*)reg_cache
->reg_list
[CBB1
].arch_info
)->enable
= true;
1381 ((struct nds32_reg
*)reg_cache
->reg_list
[CBB2
].arch_info
)->enable
= true;
1382 ((struct nds32_reg
*)reg_cache
->reg_list
[CBB3
].arch_info
)->enable
= true;
1383 ((struct nds32_reg
*)reg_cache
->reg_list
[CBE0
].arch_info
)->enable
= true;
1384 ((struct nds32_reg
*)reg_cache
->reg_list
[CBE1
].arch_info
)->enable
= true;
1385 ((struct nds32_reg
*)reg_cache
->reg_list
[CBE2
].arch_info
)->enable
= true;
1386 ((struct nds32_reg
*)reg_cache
->reg_list
[CBE3
].arch_info
)->enable
= true;
1390 if ((cpu_version
->cpu_id_family
== 0x9) ||
1391 (cpu_version
->cpu_id_family
== 0xA) ||
1392 (cpu_version
->cpu_id_family
== 0xC)) {
1394 ((struct nds32_reg
*)reg_cache
->reg_list
[IDR0
].arch_info
)->enable
= true;
1395 ((struct nds32_reg
*)reg_cache
->reg_list
[IDR1
].arch_info
)->enable
= true;
1397 if ((cpu_version
->cpu_id_family
== 0xC) && (cpu_version
->revision
== 0x0C))
1398 ((struct nds32_reg
*)reg_cache
->reg_list
[IDR0
].arch_info
)->enable
= false;
1402 uint32_t ivb_prog_pri_lvl
;
1403 uint32_t ivb_ivic_ver
;
1405 nds32_get_mapped_reg(nds32
, IR3
, &ir3_value
);
1406 ivb_prog_pri_lvl
= ir3_value
& 0x1;
1407 ivb_ivic_ver
= (ir3_value
>> 11) & 0x3;
1409 if ((ivb_prog_pri_lvl
== 1) || (ivb_ivic_ver
>= 1)) {
1410 ((struct nds32_reg
*)reg_cache
->reg_list
[IR18
].arch_info
)->enable
= true;
1411 ((struct nds32_reg
*)reg_cache
->reg_list
[IR19
].arch_info
)->enable
= true;
1414 if (ivb_ivic_ver
>= 1) {
1415 ((struct nds32_reg
*)reg_cache
->reg_list
[IR26
].arch_info
)->enable
= true;
1416 ((struct nds32_reg
*)reg_cache
->reg_list
[IR27
].arch_info
)->enable
= true;
1417 ((struct nds32_reg
*)reg_cache
->reg_list
[IR28
].arch_info
)->enable
= true;
1418 ((struct nds32_reg
*)reg_cache
->reg_list
[IR29
].arch_info
)->enable
= true;
1424 int nds32_init_register_table(struct nds32
*nds32
)
1426 nds32_init_must_have_registers(nds32
);
1431 int nds32_add_software_breakpoint(struct target
*target
,
1432 struct breakpoint
*breakpoint
)
1435 uint32_t check_data
;
1436 uint32_t break_insn
;
1438 /* check the breakpoint size */
1439 target
->type
->read_buffer(target
, breakpoint
->address
, 4, (uint8_t *)&data
);
1441 /* backup origin instruction
1442 * instruction is big-endian */
1443 if (*(char *)&data
& 0x80) { /* 16-bits instruction */
1444 breakpoint
->length
= 2;
1445 break_insn
= NDS32_BREAK_16
;
1446 } else { /* 32-bits instruction */
1447 breakpoint
->length
= 4;
1448 break_insn
= NDS32_BREAK_32
;
1451 free(breakpoint
->orig_instr
);
1453 breakpoint
->orig_instr
= malloc(breakpoint
->length
);
1454 memcpy(breakpoint
->orig_instr
, &data
, breakpoint
->length
);
1456 /* self-modified code */
1457 target
->type
->write_buffer(target
, breakpoint
->address
, breakpoint
->length
, (const uint8_t *)&break_insn
);
1458 /* write_back & invalidate dcache & invalidate icache */
1459 nds32_cache_sync(target
, breakpoint
->address
, breakpoint
->length
);
1461 /* read back to check */
1462 target
->type
->read_buffer(target
, breakpoint
->address
, breakpoint
->length
, (uint8_t *)&check_data
);
1463 if (memcmp(&check_data
, &break_insn
, breakpoint
->length
) == 0)
1469 int nds32_remove_software_breakpoint(struct target
*target
,
1470 struct breakpoint
*breakpoint
)
1472 uint32_t check_data
;
1473 uint32_t break_insn
;
1475 if (breakpoint
->length
== 2)
1476 break_insn
= NDS32_BREAK_16
;
1477 else if (breakpoint
->length
== 4)
1478 break_insn
= NDS32_BREAK_32
;
1482 target
->type
->read_buffer(target
, breakpoint
->address
, breakpoint
->length
,
1483 (uint8_t *)&check_data
);
1485 /* break instruction is modified */
1486 if (memcmp(&check_data
, &break_insn
, breakpoint
->length
) != 0)
1489 /* self-modified code */
1490 target
->type
->write_buffer(target
, breakpoint
->address
, breakpoint
->length
,
1491 breakpoint
->orig_instr
);
1493 /* write_back & invalidate dcache & invalidate icache */
1494 nds32_cache_sync(target
, breakpoint
->address
, breakpoint
->length
);
1500 * Restore the processor context on an Andes target. The full processor
1501 * context is analyzed to see if any of the registers are dirty on this end, but
1502 * have a valid new value. If this is the case, the processor is changed to the
1503 * appropriate mode and the new register values are written out to the
1504 * processor. If there happens to be a dirty register with an invalid value, an
1505 * error will be logged.
1507 * @param target Pointer to the Andes target to have its context restored
1508 * @return Error status if the target is not halted.
1510 int nds32_restore_context(struct target
*target
)
1512 struct nds32
*nds32
= target_to_nds32(target
);
1513 struct aice_port_s
*aice
= target_to_aice(target
);
1514 struct reg_cache
*reg_cache
= nds32
->core_cache
;
1516 struct nds32_reg
*reg_arch_info
;
1521 if (target
->state
!= TARGET_HALTED
) {
1522 LOG_WARNING("target not halted");
1523 return ERROR_TARGET_NOT_HALTED
;
1526 /* check if there are dirty registers */
1527 for (i
= 0; i
< reg_cache
->num_regs
; i
++) {
1528 reg
= &(reg_cache
->reg_list
[i
]);
1529 if (reg
->dirty
== true) {
1530 if (reg
->valid
== true) {
1532 LOG_DEBUG("examining dirty reg: %s", reg
->name
);
1533 LOG_DEBUG("writing register %d with value 0x%8.8" PRIx32
,
1534 i
, buf_get_u32(reg
->value
, 0, 32));
1536 reg_arch_info
= reg
->arch_info
;
1537 if (reg_arch_info
->num
>= FD0
&& reg_arch_info
->num
<= FD31
) {
1538 uint64_t val
= buf_get_u64(reg_arch_info
->value
, 0, 64);
1539 aice_write_reg_64(aice
, reg_arch_info
->num
, val
);
1541 uint32_t val
= buf_get_u32(reg_arch_info
->value
, 0, 32);
1542 aice_write_register(aice
, reg_arch_info
->num
, val
);
1554 int nds32_edm_config(struct nds32
*nds32
)
1556 struct target
*target
= nds32
->target
;
1557 struct aice_port_s
*aice
= target_to_aice(target
);
1561 aice_read_debug_reg(aice
, NDS_EDM_SR_EDM_CFG
, &edm_cfg
);
1563 nds32
->edm
.version
= (edm_cfg
>> 16) & 0xFFFF;
1564 LOG_INFO("EDM version 0x%04x", nds32
->edm
.version
);
1566 nds32
->edm
.breakpoint_num
= (edm_cfg
& 0x7) + 1;
1568 if ((nds32
->edm
.version
& 0x1000) || (nds32
->edm
.version
>= 0x60))
1569 nds32
->edm
.access_control
= true;
1571 nds32
->edm
.access_control
= false;
1573 if ((edm_cfg
>> 4) & 0x1)
1574 nds32
->edm
.direct_access_local_memory
= true;
1576 nds32
->edm
.direct_access_local_memory
= false;
1578 if (nds32
->edm
.version
<= 0x20)
1579 nds32
->edm
.direct_access_local_memory
= false;
1581 aice_read_debug_reg(aice
, NDS_EDM_SR_EDM_CTL
, &edm_ctl
);
1582 if (edm_ctl
& (0x1 << 29))
1583 nds32
->edm
.support_max_stop
= true;
1585 nds32
->edm
.support_max_stop
= false;
1587 /* set passcode for secure MCU */
1593 int nds32_config(struct nds32
*nds32
)
1595 nds32_init_config(nds32
);
1597 /* init optional system registers according to config registers */
1598 nds32_init_option_registers(nds32
);
1600 /* get max interrupt level */
1601 if (nds32
->misc_config
.interruption_level
)
1602 nds32
->max_interrupt_level
= 2;
1604 nds32
->max_interrupt_level
= 3;
1606 /* get ILM/DLM size from MR6/MR7 */
1607 uint32_t value_mr6
, value_mr7
;
1608 uint32_t size_index
;
1609 nds32_get_mapped_reg(nds32
, MR6
, &value_mr6
);
1610 size_index
= (value_mr6
>> 1) & 0xF;
1611 nds32
->memory
.ilm_size
= nds32_lm_size_table
[size_index
];
1613 nds32_get_mapped_reg(nds32
, MR7
, &value_mr7
);
1614 size_index
= (value_mr7
>> 1) & 0xF;
1615 nds32
->memory
.dlm_size
= nds32_lm_size_table
[size_index
];
1620 int nds32_init_arch_info(struct target
*target
, struct nds32
*nds32
)
1622 target
->arch_info
= nds32
;
1623 nds32
->target
= target
;
1625 nds32
->common_magic
= NDS32_COMMON_MAGIC
;
1626 nds32
->init_arch_info_after_halted
= false;
1627 nds32
->auto_convert_hw_bp
= true;
1628 nds32
->global_stop
= false;
1629 nds32
->soft_reset_halt
= false;
1630 nds32
->edm_passcode
= NULL
;
1631 nds32
->privilege_level
= 0;
1632 nds32
->boot_time
= 1500;
1633 nds32
->reset_halt_as_examine
= false;
1634 nds32
->keep_target_edm_ctl
= false;
1635 nds32
->word_access_mem
= false;
1636 nds32
->virtual_hosting
= true;
1637 nds32
->hit_syscall
= false;
1638 nds32
->active_syscall_id
= NDS32_SYSCALL_UNDEFINED
;
1639 nds32
->virtual_hosting_errno
= 0;
1640 nds32
->virtual_hosting_ctrl_c
= false;
1641 nds32
->attached
= false;
1643 nds32
->syscall_break
.asid
= 0;
1644 nds32
->syscall_break
.length
= 4;
1645 nds32
->syscall_break
.is_set
= false;
1646 nds32
->syscall_break
.orig_instr
= NULL
;
1647 nds32
->syscall_break
.next
= NULL
;
1648 nds32
->syscall_break
.unique_id
= 0x515CAll
+ target
->target_number
;
1649 nds32
->syscall_break
.linked_brp
= 0;
1653 if (nds32_reg_cache_init(target
, nds32
) == ERROR_FAIL
)
1656 if (nds32_init_register_table(nds32
) != ERROR_OK
)
1662 int nds32_virtual_to_physical(struct target
*target
, target_addr_t address
, target_addr_t
*physical
)
1664 struct nds32
*nds32
= target_to_nds32(target
);
1666 if (nds32
->memory
.address_translation
== false) {
1667 *physical
= address
;
1671 if (nds32_probe_tlb(nds32
, address
, physical
) == ERROR_OK
)
1674 if (nds32_walk_page_table(nds32
, address
, physical
) == ERROR_OK
)
1680 int nds32_cache_sync(struct target
*target
, target_addr_t address
, uint32_t length
)
1682 struct aice_port_s
*aice
= target_to_aice(target
);
1683 struct nds32
*nds32
= target_to_nds32(target
);
1684 struct nds32_cache
*dcache
= &(nds32
->memory
.dcache
);
1685 struct nds32_cache
*icache
= &(nds32
->memory
.icache
);
1686 uint32_t dcache_line_size
= nds32_line_size_table
[dcache
->line_size
];
1687 uint32_t icache_line_size
= nds32_line_size_table
[icache
->line_size
];
1688 uint32_t cur_address
;
1690 uint32_t start_line
, end_line
;
1693 if ((dcache
->line_size
!= 0) && (dcache
->enable
== true)) {
1694 /* address / dcache_line_size */
1695 start_line
= address
>> (dcache
->line_size
+ 2);
1696 /* (address + length - 1) / dcache_line_size */
1697 end_line
= (address
+ length
- 1) >> (dcache
->line_size
+ 2);
1699 for (cur_address
= address
, cur_line
= start_line
;
1700 cur_line
<= end_line
;
1701 cur_address
+= dcache_line_size
, cur_line
++) {
1703 result
= aice_cache_ctl(aice
, AICE_CACHE_CTL_L1D_VA_WB
, cur_address
);
1704 if (result
!= ERROR_OK
)
1708 result
= aice_cache_ctl(aice
, AICE_CACHE_CTL_L1D_VA_INVAL
, cur_address
);
1709 if (result
!= ERROR_OK
)
1714 if ((icache
->line_size
!= 0) && (icache
->enable
== true)) {
1715 /* address / icache_line_size */
1716 start_line
= address
>> (icache
->line_size
+ 2);
1717 /* (address + length - 1) / icache_line_size */
1718 end_line
= (address
+ length
- 1) >> (icache
->line_size
+ 2);
1720 for (cur_address
= address
, cur_line
= start_line
;
1721 cur_line
<= end_line
;
1722 cur_address
+= icache_line_size
, cur_line
++) {
1723 /* Because PSW.IT is turned off under debug exception, address MUST
1724 * be physical address. L1I_VA_INVALIDATE uses PSW.IT to decide
1725 * address translation or not. */
1726 target_addr_t physical_addr
;
1727 if (target
->type
->virt2phys(target
, cur_address
, &physical_addr
) == ERROR_FAIL
)
1731 result
= aice_cache_ctl(aice
, AICE_CACHE_CTL_L1I_VA_INVAL
, physical_addr
);
1732 if (result
!= ERROR_OK
)
1740 uint32_t nds32_nextpc(struct nds32
*nds32
, int current
, uint32_t address
)
1743 nds32_set_mapped_reg(nds32
, PC
, address
);
1745 nds32_get_mapped_reg(nds32
, PC
, &address
);
1750 int nds32_step(struct target
*target
, int current
,
1751 target_addr_t address
, int handle_breakpoints
)
1753 LOG_DEBUG("target->state: %s",
1754 target_state_name(target
));
1756 if (target
->state
!= TARGET_HALTED
) {
1757 LOG_WARNING("target was not halted");
1758 return ERROR_TARGET_NOT_HALTED
;
1761 struct nds32
*nds32
= target_to_nds32(target
);
1763 address
= nds32_nextpc(nds32
, current
, address
);
1765 LOG_DEBUG("STEP PC %08" TARGET_PRIxADDR
"%s", address
, !current
? "!" : "");
1768 uint32_t ir14_value
;
1769 nds32_get_mapped_reg(nds32
, IR14
, &ir14_value
);
1770 if (nds32
->step_isr_enable
)
1771 ir14_value
|= (0x1 << 31);
1773 ir14_value
&= ~(0x1 << 31);
1774 nds32_set_mapped_reg(nds32
, IR14
, ir14_value
);
1776 /* check hit_syscall before leave_debug_state() because
1777 * leave_debug_state() may clear hit_syscall flag */
1778 bool no_step
= false;
1779 if (nds32
->hit_syscall
)
1780 /* step after hit_syscall should be ignored because
1781 * leave_debug_state will step implicitly to skip the
1785 /********* TODO: maybe create another function to handle this part */
1786 CHECK_RETVAL(nds32
->leave_debug_state(nds32
, true));
1787 CHECK_RETVAL(target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
));
1789 if (no_step
== false) {
1790 struct aice_port_s
*aice
= target_to_aice(target
);
1791 if (aice_step(aice
) != ERROR_OK
)
1796 CHECK_RETVAL(nds32
->enter_debug_state(nds32
, true));
1797 /********* TODO: maybe create another function to handle this part */
1800 if (nds32
->step_isr_enable
) {
1801 nds32_get_mapped_reg(nds32
, IR14
, &ir14_value
);
1802 ir14_value
&= ~(0x1 << 31);
1803 nds32_set_mapped_reg(nds32
, IR14
, ir14_value
);
1806 CHECK_RETVAL(target_call_event_callbacks(target
, TARGET_EVENT_HALTED
));
1811 static int nds32_step_without_watchpoint(struct nds32
*nds32
)
1813 struct target
*target
= nds32
->target
;
1815 if (target
->state
!= TARGET_HALTED
) {
1816 LOG_WARNING("target was not halted");
1817 return ERROR_TARGET_NOT_HALTED
;
1821 uint32_t ir14_value
;
1822 nds32_get_mapped_reg(nds32
, IR14
, &ir14_value
);
1823 if (nds32
->step_isr_enable
)
1824 ir14_value
|= (0x1 << 31);
1826 ir14_value
&= ~(0x1 << 31);
1827 nds32_set_mapped_reg(nds32
, IR14
, ir14_value
);
1829 /********* TODO: maybe create another function to handle this part */
1830 CHECK_RETVAL(nds32
->leave_debug_state(nds32
, false));
1832 struct aice_port_s
*aice
= target_to_aice(target
);
1834 if (aice_step(aice
) != ERROR_OK
)
1838 CHECK_RETVAL(nds32
->enter_debug_state(nds32
, false));
1839 /********* TODO: maybe create another function to handle this part */
1842 if (nds32
->step_isr_enable
) {
1843 nds32_get_mapped_reg(nds32
, IR14
, &ir14_value
);
1844 ir14_value
&= ~(0x1 << 31);
1845 nds32_set_mapped_reg(nds32
, IR14
, ir14_value
);
1851 int nds32_target_state(struct nds32
*nds32
, enum target_state
*state
)
1853 struct aice_port_s
*aice
= target_to_aice(nds32
->target
);
1854 enum aice_target_state_s nds32_state
;
1856 if (aice_state(aice
, &nds32_state
) != ERROR_OK
)
1859 switch (nds32_state
) {
1860 case AICE_DISCONNECT
:
1861 LOG_INFO("USB is disconnected");
1863 case AICE_TARGET_DETACH
:
1864 LOG_INFO("Target is disconnected");
1866 case AICE_TARGET_UNKNOWN
:
1867 *state
= TARGET_UNKNOWN
;
1869 case AICE_TARGET_RUNNING
:
1870 *state
= TARGET_RUNNING
;
1872 case AICE_TARGET_HALTED
:
1873 *state
= TARGET_HALTED
;
1875 case AICE_TARGET_RESET
:
1876 *state
= TARGET_RESET
;
1878 case AICE_TARGET_DEBUG_RUNNING
:
1879 *state
= TARGET_DEBUG_RUNNING
;
1888 int nds32_examine_debug_reason(struct nds32
*nds32
)
1891 struct target
*target
= nds32
->target
;
1893 if (nds32
->hit_syscall
== true) {
1894 LOG_DEBUG("Hit syscall breakpoint");
1895 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
1899 nds32
->get_debug_reason(nds32
, &reason
);
1901 LOG_DEBUG("nds32 examines debug reason: %s", nds32_debug_type_name
[reason
]);
1903 /* Examine debug reason */
1905 case NDS32_DEBUG_BREAK
:
1906 case NDS32_DEBUG_BREAK_16
:
1907 case NDS32_DEBUG_INST_BREAK
:
1911 struct nds32_instruction instruction
;
1913 nds32_get_mapped_reg(nds32
, PC
, &value_pc
);
1915 if (nds32_read_opcode(nds32
, value_pc
, &opcode
) != ERROR_OK
)
1917 if (nds32_evaluate_opcode(nds32
, opcode
, value_pc
, &instruction
) != ERROR_OK
)
1920 /* hit 'break 0x7FFF' */
1921 if ((instruction
.info
.opc_6
== 0x32) &&
1922 (instruction
.info
.sub_opc
== 0xA) &&
1923 (instruction
.info
.imm
== 0x7FFF)) {
1924 target
->debug_reason
= DBG_REASON_EXIT
;
1926 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
1929 case NDS32_DEBUG_DATA_ADDR_WATCHPOINT_PRECISE
:
1930 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_PRECISE
:
1931 case NDS32_DEBUG_LOAD_STORE_GLOBAL_STOP
: /* GLOBAL_STOP is precise exception */
1935 result
= nds32
->get_watched_address(nds32
,
1936 &(nds32
->watched_address
), reason
);
1937 /* do single step(without watchpoints) to skip the "watched" instruction */
1938 nds32_step_without_watchpoint(nds32
);
1940 /* before single_step, save exception address */
1941 if (result
!= ERROR_OK
)
1944 target
->debug_reason
= DBG_REASON_WATCHPOINT
;
1947 case NDS32_DEBUG_DEBUG_INTERRUPT
:
1948 target
->debug_reason
= DBG_REASON_DBGRQ
;
1950 case NDS32_DEBUG_HARDWARE_SINGLE_STEP
:
1951 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1953 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_IMPRECISE
:
1954 case NDS32_DEBUG_DATA_ADDR_WATCHPOINT_NEXT_PRECISE
:
1955 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_NEXT_PRECISE
:
1956 if (nds32
->get_watched_address(nds32
, &(nds32
->watched_address
), reason
) != ERROR_OK
)
1959 target
->debug_reason
= DBG_REASON_WATCHPOINT
;
1962 target
->debug_reason
= DBG_REASON_UNDEFINED
;
1969 int nds32_login(struct nds32
*nds32
)
1971 struct target
*target
= nds32
->target
;
1972 struct aice_port_s
*aice
= target_to_aice(target
);
1973 uint32_t passcode_length
;
1974 char command_sequence
[129];
1975 char command_str
[33];
1977 uint32_t copy_length
;
1981 LOG_DEBUG("nds32_login");
1983 if (nds32
->edm_passcode
) {
1984 /* convert EDM passcode to command sequences */
1985 passcode_length
= strlen(nds32
->edm_passcode
);
1986 command_sequence
[0] = '\0';
1987 for (i
= 0; i
< passcode_length
; i
+= 8) {
1988 if (passcode_length
- i
< 8)
1989 copy_length
= passcode_length
- i
;
1993 strncpy(code_str
, nds32
->edm_passcode
+ i
, copy_length
);
1994 code_str
[copy_length
] = '\0';
1995 code
= strtoul(code_str
, NULL
, 16);
1997 sprintf(command_str
, "write_misc gen_port0 0x%" PRIx32
";", code
);
1998 strcat(command_sequence
, command_str
);
2001 if (aice_program_edm(aice
, command_sequence
) != ERROR_OK
)
2004 /* get current privilege level */
2005 uint32_t value_edmsw
;
2006 aice_read_debug_reg(aice
, NDS_EDM_SR_EDMSW
, &value_edmsw
);
2007 nds32
->privilege_level
= (value_edmsw
>> 16) & 0x3;
2008 LOG_INFO("Current privilege level: %d", nds32
->privilege_level
);
2011 if (nds32_edm_ops_num
> 0) {
2012 const char *reg_name
;
2013 for (i
= 0 ; i
< nds32_edm_ops_num
; i
++) {
2014 code
= nds32_edm_ops
[i
].value
;
2015 if (nds32_edm_ops
[i
].reg_no
== 6)
2016 reg_name
= "gen_port0";
2017 else if (nds32_edm_ops
[i
].reg_no
== 7)
2018 reg_name
= "gen_port1";
2022 sprintf(command_str
, "write_misc %s 0x%" PRIx32
";", reg_name
, code
);
2023 if (aice_program_edm(aice
, command_str
) != ERROR_OK
)
2031 int nds32_halt(struct target
*target
)
2033 struct nds32
*nds32
= target_to_nds32(target
);
2034 struct aice_port_s
*aice
= target_to_aice(target
);
2035 enum target_state state
;
2037 LOG_DEBUG("target->state: %s",
2038 target_state_name(target
));
2040 if (target
->state
== TARGET_HALTED
) {
2041 LOG_DEBUG("target was already halted");
2045 if (nds32_target_state(nds32
, &state
) != ERROR_OK
)
2048 if (state
!= TARGET_HALTED
)
2049 /* TODO: if state == TARGET_HALTED, check ETYPE is DBGI or not */
2050 if (aice_halt(aice
) != ERROR_OK
)
2053 CHECK_RETVAL(nds32
->enter_debug_state(nds32
, true));
2055 CHECK_RETVAL(target_call_event_callbacks(target
, TARGET_EVENT_HALTED
));
2060 /* poll current target status */
2061 int nds32_poll(struct target
*target
)
2063 struct nds32
*nds32
= target_to_nds32(target
);
2064 enum target_state state
;
2066 if (nds32_target_state(nds32
, &state
) != ERROR_OK
)
2069 if (state
== TARGET_HALTED
) {
2070 if (target
->state
!= TARGET_HALTED
) {
2071 /* if false_hit, continue free_run */
2072 if (nds32
->enter_debug_state(nds32
, true) != ERROR_OK
) {
2073 struct aice_port_s
*aice
= target_to_aice(target
);
2078 LOG_DEBUG("Change target state to TARGET_HALTED.");
2080 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
2082 } else if (state
== TARGET_RESET
) {
2083 if (target
->state
== TARGET_HALTED
) {
2084 /* similar to assert srst */
2085 register_cache_invalidate(nds32
->core_cache
);
2086 target
->state
= TARGET_RESET
;
2088 /* TODO: deassert srst */
2089 } else if (target
->state
== TARGET_RUNNING
) {
2090 /* reset as running */
2091 LOG_WARNING("<-- TARGET WARNING! The debug target has been reset. -->");
2094 if (target
->state
!= TARGET_RUNNING
&& target
->state
!= TARGET_DEBUG_RUNNING
) {
2095 LOG_DEBUG("Change target state to TARGET_RUNNING.");
2096 target
->state
= TARGET_RUNNING
;
2097 target
->debug_reason
= DBG_REASON_NOTHALTED
;
2104 int nds32_resume(struct target
*target
, int current
,
2105 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
2107 LOG_DEBUG("current %d address %08" TARGET_PRIxADDR
2108 " handle_breakpoints %d"
2109 " debug_execution %d",
2110 current
, address
, handle_breakpoints
, debug_execution
);
2112 struct nds32
*nds32
= target_to_nds32(target
);
2114 if (target
->state
!= TARGET_HALTED
) {
2115 LOG_ERROR("Target not halted");
2116 return ERROR_TARGET_NOT_HALTED
;
2119 address
= nds32_nextpc(nds32
, current
, address
);
2121 LOG_DEBUG("RESUME PC %08" TARGET_PRIxADDR
"%s", address
, !current
? "!" : "");
2123 if (!debug_execution
)
2124 target_free_all_working_areas(target
);
2126 /* Disable HSS to avoid users misuse HSS */
2127 if (nds32_reach_max_interrupt_level(nds32
) == false) {
2129 nds32_get_mapped_reg(nds32
, IR0
, &value_ir0
);
2130 value_ir0
&= ~(0x1 << 11);
2131 nds32_set_mapped_reg(nds32
, IR0
, value_ir0
);
2134 CHECK_RETVAL(nds32
->leave_debug_state(nds32
, true));
2135 CHECK_RETVAL(target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
));
2137 if (nds32
->virtual_hosting_ctrl_c
== false) {
2138 struct aice_port_s
*aice
= target_to_aice(target
);
2141 nds32
->virtual_hosting_ctrl_c
= false;
2143 target
->debug_reason
= DBG_REASON_NOTHALTED
;
2144 if (!debug_execution
)
2145 target
->state
= TARGET_RUNNING
;
2147 target
->state
= TARGET_DEBUG_RUNNING
;
2149 LOG_DEBUG("target->state: %s",
2150 target_state_name(target
));
2155 static int nds32_soft_reset_halt(struct target
*target
)
2158 struct nds32
*nds32
= target_to_nds32(target
);
2159 struct aice_port_s
*aice
= target_to_aice(target
);
2161 aice_assert_srst(aice
, AICE_SRST
);
2163 /* halt core and set pc to 0x0 */
2164 int retval
= target_halt(target
);
2165 if (retval
!= ERROR_OK
)
2168 /* start fetching from IVB */
2170 nds32_get_mapped_reg(nds32
, IR3
, &value_ir3
);
2171 nds32_set_mapped_reg(nds32
, PC
, value_ir3
& 0xFFFF0000);
2176 int nds32_assert_reset(struct target
*target
)
2178 struct nds32
*nds32
= target_to_nds32(target
);
2179 struct aice_port_s
*aice
= target_to_aice(target
);
2180 struct nds32_cpu_version
*cpu_version
= &(nds32
->cpu_version
);
2182 /* TODO: apply hw reset signal in not examined state */
2183 if (!(target_was_examined(target
))) {
2184 LOG_WARNING("Reset is not asserted because the target is not examined.");
2185 LOG_WARNING("Use a reset button or power cycle the target.");
2186 return ERROR_TARGET_NOT_EXAMINED
;
2189 if (target
->reset_halt
) {
2190 if ((nds32
->soft_reset_halt
)
2191 || (nds32
->edm
.version
< 0x51)
2192 || ((nds32
->edm
.version
== 0x51)
2193 && (cpu_version
->revision
== 0x1C)
2194 && (cpu_version
->cpu_id_family
== 0xC)
2195 && (cpu_version
->cpu_id_version
== 0x0)))
2196 nds32_soft_reset_halt(target
);
2198 aice_assert_srst(aice
, AICE_RESET_HOLD
);
2200 aice_assert_srst(aice
, AICE_SRST
);
2201 alive_sleep(nds32
->boot_time
);
2204 /* set passcode for secure MCU after core reset */
2207 /* registers are now invalid */
2208 register_cache_invalidate(nds32
->core_cache
);
2210 target
->state
= TARGET_RESET
;
2215 static int nds32_gdb_attach(struct nds32
*nds32
)
2217 LOG_DEBUG("nds32_gdb_attach, target coreid: %" PRId32
, nds32
->target
->coreid
);
2219 if (nds32
->attached
== false) {
2221 if (nds32
->keep_target_edm_ctl
) {
2222 /* backup target EDM_CTL */
2223 struct aice_port_s
*aice
= target_to_aice(nds32
->target
);
2224 aice_read_debug_reg(aice
, NDS_EDM_SR_EDM_CTL
, &nds32
->backup_edm_ctl
);
2227 target_halt(nds32
->target
);
2229 nds32
->attached
= true;
2235 static int nds32_gdb_detach(struct nds32
*nds32
)
2237 LOG_DEBUG("nds32_gdb_detach");
2238 bool backup_virtual_hosting_setting
;
2240 if (nds32
->attached
) {
2242 backup_virtual_hosting_setting
= nds32
->virtual_hosting
;
2243 /* turn off virtual hosting before resume as gdb-detach */
2244 nds32
->virtual_hosting
= false;
2245 target_resume(nds32
->target
, 1, 0, 0, 0);
2246 nds32
->virtual_hosting
= backup_virtual_hosting_setting
;
2248 if (nds32
->keep_target_edm_ctl
) {
2249 /* restore target EDM_CTL */
2250 struct aice_port_s
*aice
= target_to_aice(nds32
->target
);
2251 aice_write_debug_reg(aice
, NDS_EDM_SR_EDM_CTL
, nds32
->backup_edm_ctl
);
2254 nds32
->attached
= false;
2260 static int nds32_callback_event_handler(struct target
*target
,
2261 enum target_event event
, void *priv
)
2263 int retval
= ERROR_OK
;
2264 int target_number
= *(int *)priv
;
2266 if (target_number
!= target
->target_number
)
2269 struct nds32
*nds32
= target_to_nds32(target
);
2272 case TARGET_EVENT_GDB_ATTACH
:
2273 retval
= nds32_gdb_attach(nds32
);
2275 case TARGET_EVENT_GDB_DETACH
:
2276 retval
= nds32_gdb_detach(nds32
);
2285 int nds32_init(struct nds32
*nds32
)
2287 /* Initialize anything we can set up without talking to the target */
2288 nds32
->memory
.access_channel
= NDS_MEMORY_ACC_CPU
;
2290 /* register event callback */
2291 target_register_event_callback(nds32_callback_event_handler
,
2292 &(nds32
->target
->target_number
));
2297 int nds32_get_gdb_fileio_info(struct target
*target
, struct gdb_fileio_info
*fileio_info
)
2299 /* fill syscall parameters to file-I/O info */
2301 LOG_ERROR("Target has not initial file-I/O data structure");
2305 struct nds32
*nds32
= target_to_nds32(target
);
2307 uint32_t syscall_id
;
2309 if (nds32
->hit_syscall
== false)
2312 nds32_get_mapped_reg(nds32
, IR6
, &value_ir6
);
2313 syscall_id
= (value_ir6
>> 16) & 0x7FFF;
2314 nds32
->active_syscall_id
= syscall_id
;
2316 LOG_DEBUG("hit syscall ID: 0x%" PRIx32
, syscall_id
);
2318 /* free previous identifier storage */
2319 free(fileio_info
->identifier
);
2320 fileio_info
->identifier
= NULL
;
2322 uint32_t reg_r0
, reg_r1
, reg_r2
;
2323 nds32_get_mapped_reg(nds32
, R0
, ®_r0
);
2324 nds32_get_mapped_reg(nds32
, R1
, ®_r1
);
2325 nds32_get_mapped_reg(nds32
, R2
, ®_r2
);
2327 switch (syscall_id
) {
2328 case NDS32_SYSCALL_EXIT
:
2329 fileio_info
->identifier
= malloc(5);
2330 sprintf(fileio_info
->identifier
, "exit");
2331 fileio_info
->param_1
= reg_r0
;
2333 case NDS32_SYSCALL_OPEN
:
2335 uint8_t filename
[256];
2336 fileio_info
->identifier
= malloc(5);
2337 sprintf(fileio_info
->identifier
, "open");
2338 fileio_info
->param_1
= reg_r0
;
2339 /* reserve fileio_info->param_2 for length of path */
2340 fileio_info
->param_3
= reg_r1
;
2341 fileio_info
->param_4
= reg_r2
;
2343 target
->type
->read_buffer(target
, reg_r0
, 256, filename
);
2344 fileio_info
->param_2
= strlen((char *)filename
);
2347 case NDS32_SYSCALL_CLOSE
:
2348 fileio_info
->identifier
= malloc(6);
2349 sprintf(fileio_info
->identifier
, "close");
2350 fileio_info
->param_1
= reg_r0
;
2352 case NDS32_SYSCALL_READ
:
2353 fileio_info
->identifier
= malloc(5);
2354 sprintf(fileio_info
->identifier
, "read");
2355 fileio_info
->param_1
= reg_r0
;
2356 fileio_info
->param_2
= reg_r1
;
2357 fileio_info
->param_3
= reg_r2
;
2359 case NDS32_SYSCALL_WRITE
:
2360 fileio_info
->identifier
= malloc(6);
2361 sprintf(fileio_info
->identifier
, "write");
2362 fileio_info
->param_1
= reg_r0
;
2363 fileio_info
->param_2
= reg_r1
;
2364 fileio_info
->param_3
= reg_r2
;
2366 case NDS32_SYSCALL_LSEEK
:
2367 fileio_info
->identifier
= malloc(6);
2368 sprintf(fileio_info
->identifier
, "lseek");
2369 fileio_info
->param_1
= reg_r0
;
2370 fileio_info
->param_2
= reg_r1
;
2371 fileio_info
->param_3
= reg_r2
;
2373 case NDS32_SYSCALL_UNLINK
:
2375 uint8_t filename
[256];
2376 fileio_info
->identifier
= malloc(7);
2377 sprintf(fileio_info
->identifier
, "unlink");
2378 fileio_info
->param_1
= reg_r0
;
2379 /* reserve fileio_info->param_2 for length of path */
2381 target
->type
->read_buffer(target
, reg_r0
, 256, filename
);
2382 fileio_info
->param_2
= strlen((char *)filename
);
2385 case NDS32_SYSCALL_RENAME
:
2387 uint8_t filename
[256];
2388 fileio_info
->identifier
= malloc(7);
2389 sprintf(fileio_info
->identifier
, "rename");
2390 fileio_info
->param_1
= reg_r0
;
2391 /* reserve fileio_info->param_2 for length of old path */
2392 fileio_info
->param_3
= reg_r1
;
2393 /* reserve fileio_info->param_4 for length of new path */
2395 target
->type
->read_buffer(target
, reg_r0
, 256, filename
);
2396 fileio_info
->param_2
= strlen((char *)filename
);
2398 target
->type
->read_buffer(target
, reg_r1
, 256, filename
);
2399 fileio_info
->param_4
= strlen((char *)filename
);
2402 case NDS32_SYSCALL_FSTAT
:
2403 fileio_info
->identifier
= malloc(6);
2404 sprintf(fileio_info
->identifier
, "fstat");
2405 fileio_info
->param_1
= reg_r0
;
2406 fileio_info
->param_2
= reg_r1
;
2408 case NDS32_SYSCALL_STAT
:
2410 uint8_t filename
[256];
2411 fileio_info
->identifier
= malloc(5);
2412 sprintf(fileio_info
->identifier
, "stat");
2413 fileio_info
->param_1
= reg_r0
;
2414 /* reserve fileio_info->param_2 for length of old path */
2415 fileio_info
->param_3
= reg_r1
;
2417 target
->type
->read_buffer(target
, reg_r0
, 256, filename
);
2418 fileio_info
->param_2
= strlen((char *)filename
) + 1;
2421 case NDS32_SYSCALL_GETTIMEOFDAY
:
2422 fileio_info
->identifier
= malloc(13);
2423 sprintf(fileio_info
->identifier
, "gettimeofday");
2424 fileio_info
->param_1
= reg_r0
;
2425 fileio_info
->param_2
= reg_r1
;
2427 case NDS32_SYSCALL_ISATTY
:
2428 fileio_info
->identifier
= malloc(7);
2429 sprintf(fileio_info
->identifier
, "isatty");
2430 fileio_info
->param_1
= reg_r0
;
2432 case NDS32_SYSCALL_SYSTEM
:
2434 uint8_t command
[256];
2435 fileio_info
->identifier
= malloc(7);
2436 sprintf(fileio_info
->identifier
, "system");
2437 fileio_info
->param_1
= reg_r0
;
2438 /* reserve fileio_info->param_2 for length of old path */
2440 target
->type
->read_buffer(target
, reg_r0
, 256, command
);
2441 fileio_info
->param_2
= strlen((char *)command
);
2444 case NDS32_SYSCALL_ERRNO
:
2445 fileio_info
->identifier
= malloc(6);
2446 sprintf(fileio_info
->identifier
, "errno");
2447 nds32_set_mapped_reg(nds32
, R0
, nds32
->virtual_hosting_errno
);
2450 fileio_info
->identifier
= malloc(8);
2451 sprintf(fileio_info
->identifier
, "unknown");
2458 int nds32_gdb_fileio_end(struct target
*target
, int retcode
, int fileio_errno
, bool ctrl_c
)
2460 LOG_DEBUG("syscall return code: 0x%x, errno: 0x%x , ctrl_c: %s",
2461 retcode
, fileio_errno
, ctrl_c
? "true" : "false");
2463 struct nds32
*nds32
= target_to_nds32(target
);
2465 nds32_set_mapped_reg(nds32
, R0
, (uint32_t)retcode
);
2467 nds32
->virtual_hosting_errno
= fileio_errno
;
2468 nds32
->virtual_hosting_ctrl_c
= ctrl_c
;
2469 nds32
->active_syscall_id
= NDS32_SYSCALL_UNDEFINED
;
2474 int nds32_profiling(struct target
*target
, uint32_t *samples
,
2475 uint32_t max_num_samples
, uint32_t *num_samples
, uint32_t seconds
)
2477 /* sample $PC every 10 milliseconds */
2478 uint32_t iteration
= seconds
* 100;
2479 struct aice_port_s
*aice
= target_to_aice(target
);
2480 struct nds32
*nds32
= target_to_nds32(target
);
2482 /* REVISIT: can nds32 profile without halting? */
2483 if (target
->state
!= TARGET_HALTED
) {
2484 LOG_WARNING("target %s is not halted (profiling)", target
->cmd_name
);
2485 return ERROR_TARGET_NOT_HALTED
;
2488 if (max_num_samples
< iteration
)
2489 iteration
= max_num_samples
;
2491 int pc_regnum
= nds32
->register_map(nds32
, PC
);
2492 aice_profiling(aice
, 10, iteration
, pc_regnum
, samples
, num_samples
);
2494 register_cache_invalidate(nds32
->core_cache
);
2499 int nds32_gdb_fileio_write_memory(struct nds32
*nds32
, uint32_t address
,
2500 uint32_t size
, const uint8_t *buffer
)
2502 if ((nds32
->active_syscall_id
== NDS32_SYSCALL_FSTAT
) ||
2503 (nds32
->active_syscall_id
== NDS32_SYSCALL_STAT
)) {
2504 /* If doing GDB file-I/O, target should convert 'struct stat'
2505 * from gdb-format to target-format */
2506 uint8_t stat_buffer
[NDS32_STRUCT_STAT_SIZE
];
2508 stat_buffer
[0] = buffer
[3];
2509 stat_buffer
[1] = buffer
[2];
2511 stat_buffer
[2] = buffer
[7];
2512 stat_buffer
[3] = buffer
[6];
2514 stat_buffer
[4] = buffer
[11];
2515 stat_buffer
[5] = buffer
[10];
2516 stat_buffer
[6] = buffer
[9];
2517 stat_buffer
[7] = buffer
[8];
2519 stat_buffer
[8] = buffer
[15];
2520 stat_buffer
[9] = buffer
[16];
2522 stat_buffer
[10] = buffer
[19];
2523 stat_buffer
[11] = buffer
[18];
2525 stat_buffer
[12] = buffer
[23];
2526 stat_buffer
[13] = buffer
[22];
2528 stat_buffer
[14] = buffer
[27];
2529 stat_buffer
[15] = buffer
[26];
2531 stat_buffer
[16] = buffer
[35];
2532 stat_buffer
[17] = buffer
[34];
2533 stat_buffer
[18] = buffer
[33];
2534 stat_buffer
[19] = buffer
[32];
2536 stat_buffer
[20] = buffer
[55];
2537 stat_buffer
[21] = buffer
[54];
2538 stat_buffer
[22] = buffer
[53];
2539 stat_buffer
[23] = buffer
[52];
2541 stat_buffer
[24] = 0;
2542 stat_buffer
[25] = 0;
2543 stat_buffer
[26] = 0;
2544 stat_buffer
[27] = 0;
2546 stat_buffer
[28] = buffer
[59];
2547 stat_buffer
[29] = buffer
[58];
2548 stat_buffer
[30] = buffer
[57];
2549 stat_buffer
[31] = buffer
[56];
2551 stat_buffer
[32] = 0;
2552 stat_buffer
[33] = 0;
2553 stat_buffer
[34] = 0;
2554 stat_buffer
[35] = 0;
2556 stat_buffer
[36] = buffer
[63];
2557 stat_buffer
[37] = buffer
[62];
2558 stat_buffer
[38] = buffer
[61];
2559 stat_buffer
[39] = buffer
[60];
2561 stat_buffer
[40] = 0;
2562 stat_buffer
[41] = 0;
2563 stat_buffer
[42] = 0;
2564 stat_buffer
[43] = 0;
2566 stat_buffer
[44] = buffer
[43];
2567 stat_buffer
[45] = buffer
[42];
2568 stat_buffer
[46] = buffer
[41];
2569 stat_buffer
[47] = buffer
[40];
2571 stat_buffer
[48] = buffer
[51];
2572 stat_buffer
[49] = buffer
[50];
2573 stat_buffer
[50] = buffer
[49];
2574 stat_buffer
[51] = buffer
[48];
2576 stat_buffer
[52] = 0;
2577 stat_buffer
[53] = 0;
2578 stat_buffer
[54] = 0;
2579 stat_buffer
[55] = 0;
2580 stat_buffer
[56] = 0;
2581 stat_buffer
[57] = 0;
2582 stat_buffer
[58] = 0;
2583 stat_buffer
[59] = 0;
2585 return nds32_write_buffer(nds32
->target
, address
, NDS32_STRUCT_STAT_SIZE
, stat_buffer
);
2586 } else if (nds32
->active_syscall_id
== NDS32_SYSCALL_GETTIMEOFDAY
) {
2587 /* If doing GDB file-I/O, target should convert 'struct timeval'
2588 * from gdb-format to target-format */
2589 uint8_t timeval_buffer
[NDS32_STRUCT_TIMEVAL_SIZE
];
2590 timeval_buffer
[0] = buffer
[3];
2591 timeval_buffer
[1] = buffer
[2];
2592 timeval_buffer
[2] = buffer
[1];
2593 timeval_buffer
[3] = buffer
[0];
2594 timeval_buffer
[4] = buffer
[11];
2595 timeval_buffer
[5] = buffer
[10];
2596 timeval_buffer
[6] = buffer
[9];
2597 timeval_buffer
[7] = buffer
[8];
2599 return nds32_write_buffer(nds32
->target
, address
, NDS32_STRUCT_TIMEVAL_SIZE
, timeval_buffer
);
2602 return nds32_write_buffer(nds32
->target
, address
, size
, buffer
);
2605 int nds32_reset_halt(struct nds32
*nds32
)
2607 LOG_INFO("reset halt as init");
2609 struct aice_port_s
*aice
= target_to_aice(nds32
->target
);
2610 aice_assert_srst(aice
, AICE_RESET_HOLD
);