nds32: remove .soft_reset_halt dependency
[openocd.git] / src / target / nds32.c
blobb171afebeb895cea29fe2eb4232e76ca8e9bc530
1 /***************************************************************************
2 * Copyright (C) 2013 Andes Technology *
3 * Hsiangkai Wang <hkwang@andestech.com> *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
19 ***************************************************************************/
21 #ifdef HAVE_CONFIG_H
22 #include "config.h"
23 #endif
25 #include <helper/log.h>
26 #include <helper/binarybuffer.h>
27 #include "nds32.h"
28 #include "nds32_aice.h"
29 #include "nds32_tlb.h"
30 #include "nds32_disassembler.h"
32 const int NDS32_BREAK_16 = 0x00EA; /* 0xEA00 */
33 const int NDS32_BREAK_32 = 0x0A000064; /* 0x6400000A */
35 struct nds32_edm_operation nds32_edm_ops[NDS32_EDM_OPERATION_MAX_NUM];
36 uint32_t nds32_edm_ops_num;
38 const char *nds32_debug_type_name[11] = {
39 "SOFTWARE BREAK",
40 "SOFTWARE BREAK_16",
41 "HARDWARE BREAKPOINT",
42 "DATA ADDR WATCHPOINT PRECISE",
43 "DATA VALUE WATCHPOINT PRECISE",
44 "DATA VALUE WATCHPOINT IMPRECISE",
45 "DEBUG INTERRUPT",
46 "HARDWARE SINGLE STEP",
47 "DATA ADDR WATCHPOINT NEXT PRECISE",
48 "DATA VALUE WATCHPOINT NEXT PRECISE",
49 "LOAD STORE GLOBAL STOP",
52 static const int NDS32_LM_SIZE_TABLE[16] = {
53 4 * 1024,
54 8 * 1024,
55 16 * 1024,
56 32 * 1024,
57 64 * 1024,
58 128 * 1024,
59 256 * 1024,
60 512 * 1024,
61 1024 * 1024,
62 1 * 1024,
63 2 * 1024,
66 static const int NDS32_LINE_SIZE_TABLE[6] = {
69 16,
70 32,
71 64,
72 128,
75 static int nds32_get_core_reg(struct reg *reg)
77 int retval;
78 struct nds32_reg *reg_arch_info = reg->arch_info;
79 struct target *target = reg_arch_info->target;
80 struct nds32 *nds32 = target_to_nds32(target);
81 struct aice_port_s *aice = target_to_aice(target);
83 if (target->state != TARGET_HALTED) {
84 LOG_ERROR("Target not halted");
85 return ERROR_TARGET_NOT_HALTED;
88 if (reg->valid) {
89 LOG_DEBUG("reading register(cached) %i(%s), value: 0x%8.8" PRIx32,
90 reg_arch_info->num, reg->name, reg_arch_info->value);
91 return ERROR_OK;
94 int mapped_regnum = nds32->register_map(nds32, reg_arch_info->num);
96 if (reg_arch_info->enable == false) {
97 reg_arch_info->value = NDS32_REGISTER_DISABLE;
98 retval = ERROR_FAIL;
99 } else {
100 if ((nds32->fpu_enable == false) &&
101 (NDS32_REG_TYPE_FPU == nds32_reg_type(mapped_regnum))) {
102 reg_arch_info->value = 0;
103 retval = ERROR_OK;
104 } else if ((nds32->audio_enable == false) &&
105 (NDS32_REG_TYPE_AUMR == nds32_reg_type(mapped_regnum))) {
106 reg_arch_info->value = 0;
107 retval = ERROR_OK;
108 } else {
109 retval = aice_read_register(aice,
110 mapped_regnum, &(reg_arch_info->value));
113 LOG_DEBUG("reading register %i(%s), value: 0x%8.8" PRIx32,
114 reg_arch_info->num, reg->name, reg_arch_info->value);
117 if (retval == ERROR_OK) {
118 reg->valid = true;
119 reg->dirty = false;
122 return retval;
125 static int nds32_get_core_reg_64(struct reg *reg)
127 int retval;
128 struct nds32_reg *reg_arch_info = reg->arch_info;
129 struct target *target = reg_arch_info->target;
130 struct nds32 *nds32 = target_to_nds32(target);
131 struct aice_port_s *aice = target_to_aice(target);
133 if (target->state != TARGET_HALTED) {
134 LOG_ERROR("Target not halted");
135 return ERROR_TARGET_NOT_HALTED;
138 if (reg->valid)
139 return ERROR_OK;
141 if (reg_arch_info->enable == false) {
142 reg_arch_info->value_64 = NDS32_REGISTER_DISABLE;
143 retval = ERROR_FAIL;
144 } else {
145 if ((nds32->fpu_enable == false) &&
146 ((FD0 <= reg_arch_info->num) && (reg_arch_info->num <= FD31))) {
147 reg_arch_info->value_64 = 0;
148 retval = ERROR_OK;
149 } else {
150 retval = aice_read_reg_64(aice, reg_arch_info->num,
151 &(reg_arch_info->value_64));
155 if (retval == ERROR_OK) {
156 reg->valid = true;
157 reg->dirty = false;
160 return retval;
163 static int nds32_update_psw(struct nds32 *nds32)
165 uint32_t value_ir0;
166 struct aice_port_s *aice = target_to_aice(nds32->target);
168 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
170 /* Save data memory endian */
171 if ((value_ir0 >> 5) & 0x1) {
172 nds32->data_endian = TARGET_BIG_ENDIAN;
173 aice_set_data_endian(aice, AICE_BIG_ENDIAN);
174 } else {
175 nds32->data_endian = TARGET_LITTLE_ENDIAN;
176 aice_set_data_endian(aice, AICE_LITTLE_ENDIAN);
179 /* Save translation status */
180 nds32->memory.address_translation = ((value_ir0 >> 7) & 0x1) ? true : false;
182 return ERROR_OK;
185 static int nds32_update_mmu_info(struct nds32 *nds32)
187 uint32_t value;
189 /* Update MMU control status */
190 nds32_get_mapped_reg(nds32, MR0, &value);
191 nds32->mmu_config.default_min_page_size = value & 0x1;
192 nds32->mmu_config.multiple_page_size_in_use = (value >> 10) & 0x1;
194 return ERROR_OK;
197 static int nds32_update_cache_info(struct nds32 *nds32)
199 uint32_t value;
201 if (ERROR_OK == nds32_get_mapped_reg(nds32, MR8, &value)) {
202 if (value & 0x1)
203 nds32->memory.icache.enable = true;
204 else
205 nds32->memory.icache.enable = false;
207 if (value & 0x2)
208 nds32->memory.dcache.enable = true;
209 else
210 nds32->memory.dcache.enable = false;
211 } else {
212 nds32->memory.icache.enable = false;
213 nds32->memory.dcache.enable = false;
216 return ERROR_OK;
219 static int nds32_update_lm_info(struct nds32 *nds32)
221 struct nds32_memory *memory = &(nds32->memory);
222 uint32_t value_mr6;
223 uint32_t value_mr7;
225 nds32_get_mapped_reg(nds32, MR6, &value_mr6);
226 if (value_mr6 & 0x1)
227 memory->ilm_enable = true;
228 else
229 memory->ilm_enable = false;
231 if (memory->ilm_align_ver == 0) { /* 1MB aligned */
232 memory->ilm_start = value_mr6 & 0xFFF00000;
233 memory->ilm_end = memory->ilm_start + memory->ilm_size;
234 } else if (memory->ilm_align_ver == 1) { /* aligned to local memory size */
235 memory->ilm_start = value_mr6 & 0xFFFFFC00;
236 memory->ilm_end = memory->ilm_start + memory->ilm_size;
237 } else {
238 memory->ilm_start = -1;
239 memory->ilm_end = -1;
242 nds32_get_mapped_reg(nds32, MR7, &value_mr7);
243 if (value_mr7 & 0x1)
244 memory->dlm_enable = true;
245 else
246 memory->dlm_enable = false;
248 if (memory->dlm_align_ver == 0) { /* 1MB aligned */
249 memory->dlm_start = value_mr7 & 0xFFF00000;
250 memory->dlm_end = memory->dlm_start + memory->dlm_size;
251 } else if (memory->dlm_align_ver == 1) { /* aligned to local memory size */
252 memory->dlm_start = value_mr7 & 0xFFFFFC00;
253 memory->dlm_end = memory->dlm_start + memory->dlm_size;
254 } else {
255 memory->dlm_start = -1;
256 memory->dlm_end = -1;
259 return ERROR_OK;
263 * If fpu/audio is disabled, to access fpu/audio registers will cause
264 * exceptions. So, we need to check if fpu/audio is enabled or not as
265 * target is halted. If fpu/audio is disabled, as users access fpu/audio
266 * registers, OpenOCD will return fake value 0 instead of accessing
267 * registers through DIM.
269 static int nds32_check_extension(struct nds32 *nds32)
271 uint32_t value;
273 nds32_get_mapped_reg(nds32, FUCPR, &value);
274 if (value == NDS32_REGISTER_DISABLE) {
275 nds32->fpu_enable = false;
276 nds32->audio_enable = false;
277 return ERROR_OK;
280 if (value & 0x1)
281 nds32->fpu_enable = true;
282 else
283 nds32->fpu_enable = false;
285 if (value & 0x80000000)
286 nds32->audio_enable = true;
287 else
288 nds32->audio_enable = false;
290 return ERROR_OK;
293 static int nds32_set_core_reg(struct reg *reg, uint8_t *buf)
295 struct nds32_reg *reg_arch_info = reg->arch_info;
296 struct target *target = reg_arch_info->target;
297 struct nds32 *nds32 = target_to_nds32(target);
298 struct aice_port_s *aice = target_to_aice(target);
299 uint32_t value = buf_get_u32(buf, 0, 32);
301 if (target->state != TARGET_HALTED) {
302 LOG_ERROR("Target not halted");
303 return ERROR_TARGET_NOT_HALTED;
306 int mapped_regnum = nds32->register_map(nds32, reg_arch_info->num);
308 /* ignore values that will generate exception */
309 if (nds32_reg_exception(mapped_regnum, value))
310 return ERROR_OK;
312 LOG_DEBUG("writing register %i(%s) with value 0x%8.8" PRIx32,
313 reg_arch_info->num, reg->name, value);
315 if ((nds32->fpu_enable == false) &&
316 (NDS32_REG_TYPE_FPU == nds32_reg_type(mapped_regnum))) {
318 buf_set_u32(reg->value, 0, 32, 0);
319 } else if ((nds32->audio_enable == false) &&
320 (NDS32_REG_TYPE_AUMR == nds32_reg_type(mapped_regnum))) {
322 buf_set_u32(reg->value, 0, 32, 0);
323 } else {
324 buf_set_u32(reg->value, 0, 32, value);
325 aice_write_register(aice, mapped_regnum, reg_arch_info->value);
327 /* After set value to registers, read the value from target
328 * to avoid W1C inconsistency. */
329 aice_read_register(aice, mapped_regnum, &(reg_arch_info->value));
332 reg->valid = true;
333 reg->dirty = false;
335 /* update registers to take effect right now */
336 if (IR0 == mapped_regnum) {
337 nds32_update_psw(nds32);
338 } else if (MR0 == mapped_regnum) {
339 nds32_update_mmu_info(nds32);
340 } else if ((MR6 == mapped_regnum) || (MR7 == mapped_regnum)) {
341 /* update lm information */
342 nds32_update_lm_info(nds32);
343 } else if (MR8 == mapped_regnum) {
344 nds32_update_cache_info(nds32);
345 } else if (FUCPR == mapped_regnum) {
346 /* update audio/fpu setting */
347 nds32_check_extension(nds32);
350 return ERROR_OK;
353 static int nds32_set_core_reg_64(struct reg *reg, uint8_t *buf)
355 struct nds32_reg *reg_arch_info = reg->arch_info;
356 struct target *target = reg_arch_info->target;
357 struct nds32 *nds32 = target_to_nds32(target);
358 uint32_t low_part = buf_get_u32(buf, 0, 32);
359 uint32_t high_part = buf_get_u32(buf, 32, 32);
361 if (target->state != TARGET_HALTED) {
362 LOG_ERROR("Target not halted");
363 return ERROR_TARGET_NOT_HALTED;
366 if ((nds32->fpu_enable == false) &&
367 ((FD0 <= reg_arch_info->num) && (reg_arch_info->num <= FD31))) {
369 buf_set_u32(reg->value, 0, 32, 0);
370 buf_set_u32(reg->value, 32, 32, 0);
372 reg->valid = true;
373 reg->dirty = false;
374 } else {
375 buf_set_u32(reg->value, 0, 32, low_part);
376 buf_set_u32(reg->value, 32, 32, high_part);
378 reg->valid = true;
379 reg->dirty = true;
382 return ERROR_OK;
385 static const struct reg_arch_type nds32_reg_access_type = {
386 .get = nds32_get_core_reg,
387 .set = nds32_set_core_reg,
390 static const struct reg_arch_type nds32_reg_access_type_64 = {
391 .get = nds32_get_core_reg_64,
392 .set = nds32_set_core_reg_64,
395 static struct reg_cache *nds32_build_reg_cache(struct target *target,
396 struct nds32 *nds32)
398 struct reg_cache *cache = calloc(sizeof(struct reg_cache), 1);
399 struct reg *reg_list = calloc(TOTAL_REG_NUM, sizeof(struct reg));
400 struct nds32_reg *reg_arch_info = calloc(TOTAL_REG_NUM, sizeof(struct nds32_reg));
401 int i;
403 if (!cache || !reg_list || !reg_arch_info) {
404 free(cache);
405 free(reg_list);
406 free(reg_arch_info);
407 return NULL;
410 cache->name = "Andes registers";
411 cache->next = NULL;
412 cache->reg_list = reg_list;
413 cache->num_regs = 0;
415 for (i = 0; i < TOTAL_REG_NUM; i++) {
416 reg_arch_info[i].num = i;
417 reg_arch_info[i].target = target;
418 reg_arch_info[i].nds32 = nds32;
419 reg_arch_info[i].enable = false;
421 reg_list[i].name = nds32_reg_simple_name(i);
422 reg_list[i].number = reg_arch_info[i].num;
423 reg_list[i].size = nds32_reg_size(i);
424 reg_list[i].arch_info = &reg_arch_info[i];
426 reg_list[i].reg_data_type = calloc(sizeof(struct reg_data_type), 1);
428 if (FD0 <= reg_arch_info[i].num && reg_arch_info[i].num <= FD31) {
429 reg_list[i].value = &(reg_arch_info[i].value_64);
430 reg_list[i].type = &nds32_reg_access_type_64;
432 reg_list[i].reg_data_type->type = REG_TYPE_IEEE_DOUBLE;
433 reg_list[i].reg_data_type->id = "ieee_double";
434 reg_list[i].group = "float";
435 } else {
436 reg_list[i].value = &(reg_arch_info[i].value);
437 reg_list[i].type = &nds32_reg_access_type;
438 reg_list[i].group = "general";
440 if ((FS0 <= reg_arch_info[i].num) && (reg_arch_info[i].num <= FS31)) {
441 reg_list[i].reg_data_type->type = REG_TYPE_IEEE_SINGLE;
442 reg_list[i].reg_data_type->id = "ieee_single";
443 reg_list[i].group = "float";
444 } else if ((reg_arch_info[i].num == FPCSR) ||
445 (reg_arch_info[i].num == FPCFG)) {
446 reg_list[i].group = "float";
447 } else if ((reg_arch_info[i].num == R28) ||
448 (reg_arch_info[i].num == R29) ||
449 (reg_arch_info[i].num == R31)) {
450 reg_list[i].reg_data_type->type = REG_TYPE_DATA_PTR;
451 reg_list[i].reg_data_type->id = "data_ptr";
452 } else if ((reg_arch_info[i].num == R30) ||
453 (reg_arch_info[i].num == PC)) {
454 reg_list[i].reg_data_type->type = REG_TYPE_CODE_PTR;
455 reg_list[i].reg_data_type->id = "code_ptr";
456 } else {
457 reg_list[i].reg_data_type->type = REG_TYPE_UINT32;
458 reg_list[i].reg_data_type->id = "uint32";
462 if (R16 <= reg_arch_info[i].num && reg_arch_info[i].num <= R25)
463 reg_list[i].caller_save = true;
464 else
465 reg_list[i].caller_save = false;
467 reg_list[i].feature = malloc(sizeof(struct reg_feature));
469 if (R0 <= reg_arch_info[i].num && reg_arch_info[i].num <= IFC_LP)
470 reg_list[i].feature->name = "org.gnu.gdb.nds32.core";
471 else if (CR0 <= reg_arch_info[i].num && reg_arch_info[i].num <= SECUR0)
472 reg_list[i].feature->name = "org.gnu.gdb.nds32.system";
473 else if (D0L24 <= reg_arch_info[i].num && reg_arch_info[i].num <= CBE3)
474 reg_list[i].feature->name = "org.gnu.gdb.nds32.audio";
475 else if (FPCSR <= reg_arch_info[i].num && reg_arch_info[i].num <= FD31)
476 reg_list[i].feature->name = "org.gnu.gdb.nds32.fpu";
478 cache->num_regs++;
481 nds32->core_cache = cache;
483 return cache;
486 static int nds32_reg_cache_init(struct target *target, struct nds32 *nds32)
488 struct reg_cache *cache;
490 cache = nds32_build_reg_cache(target, nds32);
491 if (!cache)
492 return ERROR_FAIL;
494 *register_get_last_cache_p(&target->reg_cache) = cache;
496 return ERROR_OK;
499 static struct reg *nds32_reg_current(struct nds32 *nds32, unsigned regnum)
501 struct reg *r;
503 r = nds32->core_cache->reg_list + regnum;
505 return r;
508 int nds32_full_context(struct nds32 *nds32)
510 uint32_t value, value_ir0;
512 /* save $pc & $psw */
513 nds32_get_mapped_reg(nds32, PC, &value);
514 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
516 nds32_update_psw(nds32);
517 nds32_update_mmu_info(nds32);
518 nds32_update_cache_info(nds32);
519 nds32_update_lm_info(nds32);
521 nds32_check_extension(nds32);
523 return ERROR_OK;
526 /* get register value internally */
527 int nds32_get_mapped_reg(struct nds32 *nds32, unsigned regnum, uint32_t *value)
529 struct reg_cache *reg_cache = nds32->core_cache;
530 struct reg *r;
532 if (regnum > reg_cache->num_regs)
533 return ERROR_FAIL;
535 r = nds32_reg_current(nds32, regnum);
537 if (ERROR_OK != r->type->get(r))
538 return ERROR_FAIL;
540 *value = buf_get_u32(r->value, 0, 32);
542 return ERROR_OK;
545 /** set register internally */
546 int nds32_set_mapped_reg(struct nds32 *nds32, unsigned regnum, uint32_t value)
548 struct reg_cache *reg_cache = nds32->core_cache;
549 struct reg *r;
550 uint8_t set_value[4];
552 if (regnum > reg_cache->num_regs)
553 return ERROR_FAIL;
555 r = nds32_reg_current(nds32, regnum);
557 buf_set_u32(set_value, 0, 32, value);
559 return r->type->set(r, set_value);
562 /** get general register list */
563 static int nds32_get_general_reg_list(struct nds32 *nds32,
564 struct reg **reg_list[], int *reg_list_size)
566 struct reg *reg_current;
567 int i;
568 int current_idx;
570 /** freed in gdb_server.c */
571 *reg_list = malloc(sizeof(struct reg *) * (IFC_LP - R0 + 1));
572 current_idx = 0;
574 for (i = R0; i < IFC_LP + 1; i++) {
575 reg_current = nds32_reg_current(nds32, i);
576 if (((struct nds32_reg *)reg_current->arch_info)->enable) {
577 (*reg_list)[current_idx] = reg_current;
578 current_idx++;
581 *reg_list_size = current_idx;
583 return ERROR_OK;
586 /** get all register list */
587 static int nds32_get_all_reg_list(struct nds32 *nds32,
588 struct reg **reg_list[], int *reg_list_size)
590 struct reg_cache *reg_cache = nds32->core_cache;
591 struct reg *reg_current;
592 unsigned int i;
594 *reg_list_size = reg_cache->num_regs;
596 /** freed in gdb_server.c */
597 *reg_list = malloc(sizeof(struct reg *) * (*reg_list_size));
599 for (i = 0; i < reg_cache->num_regs; i++) {
600 reg_current = nds32_reg_current(nds32, i);
601 reg_current->exist = ((struct nds32_reg *)
602 reg_current->arch_info)->enable;
603 (*reg_list)[i] = reg_current;
606 return ERROR_OK;
609 /** get all register list */
610 int nds32_get_gdb_reg_list(struct target *target,
611 struct reg **reg_list[], int *reg_list_size,
612 enum target_register_class reg_class)
614 struct nds32 *nds32 = target_to_nds32(target);
616 switch (reg_class) {
617 case REG_CLASS_ALL:
618 return nds32_get_all_reg_list(nds32, reg_list, reg_list_size);
619 case REG_CLASS_GENERAL:
620 return nds32_get_general_reg_list(nds32, reg_list, reg_list_size);
621 default:
622 return ERROR_FAIL;
625 return ERROR_FAIL;
628 static int nds32_select_memory_mode(struct target *target, uint32_t address,
629 uint32_t length, uint32_t *end_address)
631 struct nds32 *nds32 = target_to_nds32(target);
632 struct aice_port_s *aice = target_to_aice(target);
633 struct nds32_memory *memory = &(nds32->memory);
634 struct nds32_edm *edm = &(nds32->edm);
635 uint32_t dlm_start, dlm_end;
636 uint32_t ilm_start, ilm_end;
637 uint32_t address_end = address + length;
639 /* init end_address */
640 *end_address = address_end;
642 if (NDS_MEMORY_ACC_CPU == memory->access_channel)
643 return ERROR_OK;
645 if (edm->access_control == false) {
646 LOG_DEBUG("EDM does not support ACC_CTL");
647 return ERROR_OK;
650 if (edm->direct_access_local_memory == false) {
651 LOG_DEBUG("EDM does not support DALM");
652 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
653 return ERROR_OK;
656 if (NDS_MEMORY_SELECT_AUTO != memory->mode) {
657 LOG_DEBUG("Memory mode is not AUTO");
658 return ERROR_OK;
661 /* set default mode */
662 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
664 if ((memory->ilm_base != 0) && (memory->ilm_enable == true)) {
665 ilm_start = memory->ilm_start;
666 ilm_end = memory->ilm_end;
668 /* case 1, address < ilm_start */
669 if (address < ilm_start) {
670 if (ilm_start < address_end) {
671 /* update end_address to split non-ILM from ILM */
672 *end_address = ilm_start;
674 /* MEM mode */
675 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
676 } else if ((ilm_start <= address) && (address < ilm_end)) {
677 /* case 2, ilm_start <= address < ilm_end */
678 if (ilm_end < address_end) {
679 /* update end_address to split non-ILM from ILM */
680 *end_address = ilm_end;
682 /* ILM mode */
683 aice_memory_mode(aice, NDS_MEMORY_SELECT_ILM);
684 } else { /* case 3, ilm_end <= address */
685 /* MEM mode */
686 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
689 return ERROR_OK;
690 } else {
691 LOG_DEBUG("ILM is not enabled");
694 if ((memory->dlm_base != 0) && (memory->dlm_enable == true)) {
695 dlm_start = memory->dlm_start;
696 dlm_end = memory->dlm_end;
698 /* case 1, address < dlm_start */
699 if (address < dlm_start) {
700 if (dlm_start < address_end) {
701 /* update end_address to split non-DLM from DLM */
702 *end_address = dlm_start;
704 /* MEM mode */
705 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
706 } else if ((dlm_start <= address) && (address < dlm_end)) {
707 /* case 2, dlm_start <= address < dlm_end */
708 if (dlm_end < address_end) {
709 /* update end_address to split non-DLM from DLM */
710 *end_address = dlm_end;
712 /* DLM mode */
713 aice_memory_mode(aice, NDS_MEMORY_SELECT_DLM);
714 } else { /* case 3, dlm_end <= address */
715 /* MEM mode */
716 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
719 return ERROR_OK;
720 } else {
721 LOG_DEBUG("DLM is not enabled");
724 return ERROR_OK;
727 int nds32_read_buffer(struct target *target, uint32_t address,
728 uint32_t size, uint8_t *buffer)
730 struct nds32 *nds32 = target_to_nds32(target);
731 struct nds32_memory *memory = &(nds32->memory);
733 if ((NDS_MEMORY_ACC_CPU == memory->access_channel) &&
734 (target->state != TARGET_HALTED)) {
735 LOG_WARNING("target was not halted");
736 return ERROR_TARGET_NOT_HALTED;
739 LOG_DEBUG("READ BUFFER: ADDR %08" PRIx32 " SIZE %08" PRIx32,
740 address,
741 size);
743 int retval = ERROR_OK;
744 struct aice_port_s *aice = target_to_aice(target);
745 uint32_t end_address;
747 if (((address % 2) == 0) && (size == 2)) {
748 nds32_select_memory_mode(target, address, 2, &end_address);
749 return aice_read_mem_unit(aice, address, 2, 1, buffer);
752 /* handle unaligned head bytes */
753 if (address % 4) {
754 uint32_t unaligned = 4 - (address % 4);
756 if (unaligned > size)
757 unaligned = size;
759 nds32_select_memory_mode(target, address, unaligned, &end_address);
760 retval = aice_read_mem_unit(aice, address, 1, unaligned, buffer);
761 if (retval != ERROR_OK)
762 return retval;
764 buffer += unaligned;
765 address += unaligned;
766 size -= unaligned;
769 /* handle aligned words */
770 if (size >= 4) {
771 int aligned = size - (size % 4);
772 int read_len;
774 do {
775 nds32_select_memory_mode(target, address, aligned, &end_address);
777 read_len = end_address - address;
779 if (read_len > 8)
780 retval = aice_read_mem_bulk(aice, address, read_len, buffer);
781 else
782 retval = aice_read_mem_unit(aice, address, 4, read_len / 4, buffer);
784 if (retval != ERROR_OK)
785 return retval;
787 buffer += read_len;
788 address += read_len;
789 size -= read_len;
790 aligned -= read_len;
792 } while (aligned != 0);
795 /*prevent byte access when possible (avoid AHB access limitations in some cases)*/
796 if (size >= 2) {
797 int aligned = size - (size % 2);
798 nds32_select_memory_mode(target, address, aligned, &end_address);
799 retval = aice_read_mem_unit(aice, address, 2, aligned / 2, buffer);
800 if (retval != ERROR_OK)
801 return retval;
803 buffer += aligned;
804 address += aligned;
805 size -= aligned;
807 /* handle tail writes of less than 4 bytes */
808 if (size > 0) {
809 nds32_select_memory_mode(target, address, size, &end_address);
810 retval = aice_read_mem_unit(aice, address, 1, size, buffer);
811 if (retval != ERROR_OK)
812 return retval;
815 return ERROR_OK;
818 int nds32_read_memory(struct target *target, uint32_t address,
819 uint32_t size, uint32_t count, uint8_t *buffer)
821 struct aice_port_s *aice = target_to_aice(target);
823 return aice_read_mem_unit(aice, address, size, count, buffer);
826 int nds32_read_phys_memory(struct target *target, uint32_t address,
827 uint32_t size, uint32_t count, uint8_t *buffer)
829 struct aice_port_s *aice = target_to_aice(target);
830 struct nds32 *nds32 = target_to_nds32(target);
831 struct nds32_memory *memory = &(nds32->memory);
832 enum nds_memory_access orig_channel;
833 int result;
835 /* switch to BUS access mode to skip MMU */
836 orig_channel = memory->access_channel;
837 memory->access_channel = NDS_MEMORY_ACC_BUS;
838 aice_memory_access(aice, memory->access_channel);
840 /* The input address is physical address. No need to do address translation. */
841 result = aice_read_mem_unit(aice, address, size, count, buffer);
843 /* restore to origin access mode */
844 memory->access_channel = orig_channel;
845 aice_memory_access(aice, memory->access_channel);
847 return result;
850 int nds32_write_buffer(struct target *target, uint32_t address,
851 uint32_t size, const uint8_t *buffer)
853 struct nds32 *nds32 = target_to_nds32(target);
854 struct nds32_memory *memory = &(nds32->memory);
856 if ((NDS_MEMORY_ACC_CPU == memory->access_channel) &&
857 (target->state != TARGET_HALTED)) {
858 LOG_WARNING("target was not halted");
859 return ERROR_TARGET_NOT_HALTED;
862 LOG_DEBUG("WRITE BUFFER: ADDR %08" PRIx32 " SIZE %08" PRIx32,
863 address,
864 size);
866 struct aice_port_s *aice = target_to_aice(target);
867 int retval = ERROR_OK;
868 uint32_t end_address;
870 if (((address % 2) == 0) && (size == 2)) {
871 nds32_select_memory_mode(target, address, 2, &end_address);
872 return aice_write_mem_unit(aice, address, 2, 1, buffer);
875 /* handle unaligned head bytes */
876 if (address % 4) {
877 uint32_t unaligned = 4 - (address % 4);
879 if (unaligned > size)
880 unaligned = size;
882 nds32_select_memory_mode(target, address, unaligned, &end_address);
883 retval = aice_write_mem_unit(aice, address, 1, unaligned, buffer);
884 if (retval != ERROR_OK)
885 return retval;
887 buffer += unaligned;
888 address += unaligned;
889 size -= unaligned;
892 /* handle aligned words */
893 if (size >= 4) {
894 int aligned = size - (size % 4);
895 int write_len;
897 do {
898 nds32_select_memory_mode(target, address, aligned, &end_address);
900 write_len = end_address - address;
901 if (write_len > 8)
902 retval = aice_write_mem_bulk(aice, address, write_len, buffer);
903 else
904 retval = aice_write_mem_unit(aice, address, 4, write_len / 4, buffer);
905 if (retval != ERROR_OK)
906 return retval;
908 buffer += write_len;
909 address += write_len;
910 size -= write_len;
911 aligned -= write_len;
913 } while (aligned != 0);
916 /* handle tail writes of less than 4 bytes */
917 if (size > 0) {
918 nds32_select_memory_mode(target, address, size, &end_address);
919 retval = aice_write_mem_unit(aice, address, 1, size, buffer);
920 if (retval != ERROR_OK)
921 return retval;
924 return retval;
927 int nds32_write_memory(struct target *target, uint32_t address,
928 uint32_t size, uint32_t count, const uint8_t *buffer)
930 struct aice_port_s *aice = target_to_aice(target);
932 return aice_write_mem_unit(aice, address, size, count, buffer);
935 int nds32_write_phys_memory(struct target *target, uint32_t address,
936 uint32_t size, uint32_t count, const uint8_t *buffer)
938 struct aice_port_s *aice = target_to_aice(target);
939 struct nds32 *nds32 = target_to_nds32(target);
940 struct nds32_memory *memory = &(nds32->memory);
941 enum nds_memory_access orig_channel;
942 int result;
944 /* switch to BUS access mode to skip MMU */
945 orig_channel = memory->access_channel;
946 memory->access_channel = NDS_MEMORY_ACC_BUS;
947 aice_memory_access(aice, memory->access_channel);
949 /* The input address is physical address. No need to do address translation. */
950 result = aice_write_mem_unit(aice, address, size, count, buffer);
952 /* restore to origin access mode */
953 memory->access_channel = orig_channel;
954 aice_memory_access(aice, memory->access_channel);
956 return result;
959 int nds32_mmu(struct target *target, int *enabled)
961 if (target->state != TARGET_HALTED) {
962 LOG_ERROR("%s: target not halted", __func__);
963 return ERROR_TARGET_INVALID;
966 struct nds32 *nds32 = target_to_nds32(target);
967 struct nds32_memory *memory = &(nds32->memory);
968 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
970 if ((mmu_config->memory_protection == 2) && (memory->address_translation == true))
971 *enabled = 1;
972 else
973 *enabled = 0;
975 return ERROR_OK;
978 int nds32_arch_state(struct target *target)
980 struct nds32 *nds32 = target_to_nds32(target);
982 if (nds32->common_magic != NDS32_COMMON_MAGIC) {
983 LOG_ERROR("BUG: called for a non-Andes target");
984 return ERROR_FAIL;
987 uint32_t value_pc, value_psw;
989 nds32_get_mapped_reg(nds32, PC, &value_pc);
990 nds32_get_mapped_reg(nds32, IR0, &value_psw);
992 LOG_USER("target halted due to %s\n"
993 "psw: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "%s",
994 debug_reason_name(target),
995 value_psw,
996 value_pc,
997 nds32->virtual_hosting ? ", virtual hosting" : "");
999 /* save pc value to pseudo register pc */
1000 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
1001 buf_set_u32(reg->value, 0, 32, value_pc);
1003 return ERROR_OK;
1006 static void nds32_init_must_have_registers(struct nds32 *nds32)
1008 struct reg_cache *reg_cache = nds32->core_cache;
1010 /** MUST have general registers */
1011 ((struct nds32_reg *)reg_cache->reg_list[R0].arch_info)->enable = true;
1012 ((struct nds32_reg *)reg_cache->reg_list[R1].arch_info)->enable = true;
1013 ((struct nds32_reg *)reg_cache->reg_list[R2].arch_info)->enable = true;
1014 ((struct nds32_reg *)reg_cache->reg_list[R3].arch_info)->enable = true;
1015 ((struct nds32_reg *)reg_cache->reg_list[R4].arch_info)->enable = true;
1016 ((struct nds32_reg *)reg_cache->reg_list[R5].arch_info)->enable = true;
1017 ((struct nds32_reg *)reg_cache->reg_list[R6].arch_info)->enable = true;
1018 ((struct nds32_reg *)reg_cache->reg_list[R7].arch_info)->enable = true;
1019 ((struct nds32_reg *)reg_cache->reg_list[R8].arch_info)->enable = true;
1020 ((struct nds32_reg *)reg_cache->reg_list[R9].arch_info)->enable = true;
1021 ((struct nds32_reg *)reg_cache->reg_list[R10].arch_info)->enable = true;
1022 ((struct nds32_reg *)reg_cache->reg_list[R15].arch_info)->enable = true;
1023 ((struct nds32_reg *)reg_cache->reg_list[R28].arch_info)->enable = true;
1024 ((struct nds32_reg *)reg_cache->reg_list[R29].arch_info)->enable = true;
1025 ((struct nds32_reg *)reg_cache->reg_list[R30].arch_info)->enable = true;
1026 ((struct nds32_reg *)reg_cache->reg_list[R31].arch_info)->enable = true;
1027 ((struct nds32_reg *)reg_cache->reg_list[PC].arch_info)->enable = true;
1029 /** MUST have configuration system registers */
1030 ((struct nds32_reg *)reg_cache->reg_list[CR0].arch_info)->enable = true;
1031 ((struct nds32_reg *)reg_cache->reg_list[CR1].arch_info)->enable = true;
1032 ((struct nds32_reg *)reg_cache->reg_list[CR2].arch_info)->enable = true;
1033 ((struct nds32_reg *)reg_cache->reg_list[CR3].arch_info)->enable = true;
1034 ((struct nds32_reg *)reg_cache->reg_list[CR4].arch_info)->enable = true;
1036 /** MUST have interrupt system registers */
1037 ((struct nds32_reg *)reg_cache->reg_list[IR0].arch_info)->enable = true;
1038 ((struct nds32_reg *)reg_cache->reg_list[IR1].arch_info)->enable = true;
1039 ((struct nds32_reg *)reg_cache->reg_list[IR3].arch_info)->enable = true;
1040 ((struct nds32_reg *)reg_cache->reg_list[IR4].arch_info)->enable = true;
1041 ((struct nds32_reg *)reg_cache->reg_list[IR6].arch_info)->enable = true;
1042 ((struct nds32_reg *)reg_cache->reg_list[IR9].arch_info)->enable = true;
1043 ((struct nds32_reg *)reg_cache->reg_list[IR11].arch_info)->enable = true;
1044 ((struct nds32_reg *)reg_cache->reg_list[IR14].arch_info)->enable = true;
1045 ((struct nds32_reg *)reg_cache->reg_list[IR15].arch_info)->enable = true;
1047 /** MUST have MMU system registers */
1048 ((struct nds32_reg *)reg_cache->reg_list[MR0].arch_info)->enable = true;
1050 /** MUST have EDM system registers */
1051 ((struct nds32_reg *)reg_cache->reg_list[DR40].arch_info)->enable = true;
1052 ((struct nds32_reg *)reg_cache->reg_list[DR42].arch_info)->enable = true;
1055 static int nds32_init_memory_config(struct nds32 *nds32)
1057 uint32_t value_cr1; /* ICM_CFG */
1058 uint32_t value_cr2; /* DCM_CFG */
1059 struct nds32_memory *memory = &(nds32->memory);
1061 /* read $cr1 to init instruction memory information */
1062 nds32_get_mapped_reg(nds32, CR1, &value_cr1);
1063 memory->icache.set = value_cr1 & 0x7;
1064 memory->icache.way = (value_cr1 >> 3) & 0x7;
1065 memory->icache.line_size = (value_cr1 >> 6) & 0x7;
1066 memory->icache.lock_support = (value_cr1 >> 9) & 0x1;
1068 memory->ilm_base = (value_cr1 >> 10) & 0x7;
1069 memory->ilm_align_ver = (value_cr1 >> 13) & 0x3;
1071 /* read $cr2 to init data memory information */
1072 nds32_get_mapped_reg(nds32, CR2, &value_cr2);
1073 memory->dcache.set = value_cr2 & 0x7;
1074 memory->dcache.way = (value_cr2 >> 3) & 0x7;
1075 memory->dcache.line_size = (value_cr2 >> 6) & 0x7;
1076 memory->dcache.lock_support = (value_cr2 >> 9) & 0x1;
1078 memory->dlm_base = (value_cr2 >> 10) & 0x7;
1079 memory->dlm_align_ver = (value_cr2 >> 13) & 0x3;
1081 return ERROR_OK;
1084 static void nds32_init_config(struct nds32 *nds32)
1086 uint32_t value_cr0;
1087 uint32_t value_cr3;
1088 uint32_t value_cr4;
1089 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
1090 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
1091 struct nds32_misc_config *misc_config = &(nds32->misc_config);
1093 nds32_get_mapped_reg(nds32, CR0, &value_cr0);
1094 nds32_get_mapped_reg(nds32, CR3, &value_cr3);
1095 nds32_get_mapped_reg(nds32, CR4, &value_cr4);
1097 /* config cpu version */
1098 cpu_version->performance_extension = value_cr0 & 0x1;
1099 cpu_version->_16bit_extension = (value_cr0 >> 1) & 0x1;
1100 cpu_version->performance_extension_2 = (value_cr0 >> 2) & 0x1;
1101 cpu_version->cop_fpu_extension = (value_cr0 >> 3) & 0x1;
1102 cpu_version->string_extension = (value_cr0 >> 4) & 0x1;
1103 cpu_version->revision = (value_cr0 >> 16) & 0xFF;
1104 cpu_version->cpu_id_family = (value_cr0 >> 24) & 0xF;
1105 cpu_version->cpu_id_version = (value_cr0 >> 28) & 0xF;
1107 /* config MMU */
1108 mmu_config->memory_protection = value_cr3 & 0x3;
1109 mmu_config->memory_protection_version = (value_cr3 >> 2) & 0x1F;
1110 mmu_config->fully_associative_tlb = (value_cr3 >> 7) & 0x1;
1111 if (mmu_config->fully_associative_tlb) {
1112 mmu_config->tlb_size = (value_cr3 >> 8) & 0x7F;
1113 } else {
1114 mmu_config->tlb_ways = (value_cr3 >> 8) & 0x7;
1115 mmu_config->tlb_sets = (value_cr3 >> 11) & 0x7;
1117 mmu_config->_8k_page_support = (value_cr3 >> 15) & 0x1;
1118 mmu_config->extra_page_size_support = (value_cr3 >> 16) & 0xFF;
1119 mmu_config->tlb_lock = (value_cr3 >> 24) & 0x1;
1120 mmu_config->hardware_page_table_walker = (value_cr3 >> 25) & 0x1;
1121 mmu_config->default_endian = (value_cr3 >> 26) & 0x1;
1122 mmu_config->partition_num = (value_cr3 >> 27) & 0x1;
1123 mmu_config->invisible_tlb = (value_cr3 >> 28) & 0x1;
1124 mmu_config->vlpt = (value_cr3 >> 29) & 0x1;
1125 mmu_config->ntme = (value_cr3 >> 30) & 0x1;
1126 mmu_config->drde = (value_cr3 >> 31) & 0x1;
1128 /* config misc */
1129 misc_config->edm = value_cr4 & 0x1;
1130 misc_config->local_memory_dma = (value_cr4 >> 1) & 0x1;
1131 misc_config->performance_monitor = (value_cr4 >> 2) & 0x1;
1132 misc_config->high_speed_memory_port = (value_cr4 >> 3) & 0x1;
1133 misc_config->debug_tracer = (value_cr4 >> 4) & 0x1;
1134 misc_config->div_instruction = (value_cr4 >> 5) & 0x1;
1135 misc_config->mac_instruction = (value_cr4 >> 6) & 0x1;
1136 misc_config->audio_isa = (value_cr4 >> 7) & 0x3;
1137 misc_config->L2_cache = (value_cr4 >> 9) & 0x1;
1138 misc_config->reduce_register = (value_cr4 >> 10) & 0x1;
1139 misc_config->addr_24 = (value_cr4 >> 11) & 0x1;
1140 misc_config->interruption_level = (value_cr4 >> 12) & 0x1;
1141 misc_config->baseline_instruction = (value_cr4 >> 13) & 0x7;
1142 misc_config->no_dx_register = (value_cr4 >> 16) & 0x1;
1143 misc_config->implement_dependant_register = (value_cr4 >> 17) & 0x1;
1144 misc_config->implement_dependant_sr_encoding = (value_cr4 >> 18) & 0x1;
1145 misc_config->ifc = (value_cr4 >> 19) & 0x1;
1146 misc_config->mcu = (value_cr4 >> 20) & 0x1;
1147 misc_config->shadow = (value_cr4 >> 21) & 0x7;
1148 misc_config->ex9 = (value_cr4 >> 24) & 0x1;
1150 nds32_init_memory_config(nds32);
1153 static int nds32_init_option_registers(struct nds32 *nds32)
1155 struct reg_cache *reg_cache = nds32->core_cache;
1156 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
1157 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
1158 struct nds32_misc_config *misc_config = &(nds32->misc_config);
1159 struct nds32_memory *memory_config = &(nds32->memory);
1161 bool no_cr5;
1162 bool mr10_exist;
1163 bool no_racr0;
1165 if (((cpu_version->cpu_id_family == 0xC) || (cpu_version->cpu_id_family == 0xD)) &&
1166 ((cpu_version->revision & 0xFC) == 0)) {
1167 no_cr5 = true;
1168 mr10_exist = true;
1169 no_racr0 = true;
1170 } else {
1171 no_cr5 = false;
1172 mr10_exist = false;
1173 no_racr0 = false;
1176 if (misc_config->reduce_register == false) {
1177 ((struct nds32_reg *)reg_cache->reg_list[R11].arch_info)->enable = true;
1178 ((struct nds32_reg *)reg_cache->reg_list[R12].arch_info)->enable = true;
1179 ((struct nds32_reg *)reg_cache->reg_list[R13].arch_info)->enable = true;
1180 ((struct nds32_reg *)reg_cache->reg_list[R14].arch_info)->enable = true;
1181 ((struct nds32_reg *)reg_cache->reg_list[R16].arch_info)->enable = true;
1182 ((struct nds32_reg *)reg_cache->reg_list[R17].arch_info)->enable = true;
1183 ((struct nds32_reg *)reg_cache->reg_list[R18].arch_info)->enable = true;
1184 ((struct nds32_reg *)reg_cache->reg_list[R19].arch_info)->enable = true;
1185 ((struct nds32_reg *)reg_cache->reg_list[R20].arch_info)->enable = true;
1186 ((struct nds32_reg *)reg_cache->reg_list[R21].arch_info)->enable = true;
1187 ((struct nds32_reg *)reg_cache->reg_list[R22].arch_info)->enable = true;
1188 ((struct nds32_reg *)reg_cache->reg_list[R23].arch_info)->enable = true;
1189 ((struct nds32_reg *)reg_cache->reg_list[R24].arch_info)->enable = true;
1190 ((struct nds32_reg *)reg_cache->reg_list[R25].arch_info)->enable = true;
1191 ((struct nds32_reg *)reg_cache->reg_list[R26].arch_info)->enable = true;
1192 ((struct nds32_reg *)reg_cache->reg_list[R27].arch_info)->enable = true;
1195 if (misc_config->no_dx_register == false) {
1196 ((struct nds32_reg *)reg_cache->reg_list[D0LO].arch_info)->enable = true;
1197 ((struct nds32_reg *)reg_cache->reg_list[D0HI].arch_info)->enable = true;
1198 ((struct nds32_reg *)reg_cache->reg_list[D1LO].arch_info)->enable = true;
1199 ((struct nds32_reg *)reg_cache->reg_list[D1HI].arch_info)->enable = true;
1202 if (misc_config->ex9)
1203 ((struct nds32_reg *)reg_cache->reg_list[ITB].arch_info)->enable = true;
1205 if (no_cr5 == false)
1206 ((struct nds32_reg *)reg_cache->reg_list[CR5].arch_info)->enable = true;
1208 if (cpu_version->cop_fpu_extension) {
1209 ((struct nds32_reg *)reg_cache->reg_list[CR6].arch_info)->enable = true;
1210 ((struct nds32_reg *)reg_cache->reg_list[FPCSR].arch_info)->enable = true;
1211 ((struct nds32_reg *)reg_cache->reg_list[FPCFG].arch_info)->enable = true;
1214 if (mmu_config->memory_protection == 1) {
1215 /* Secure MPU has no IPC, IPSW, P_ITYPE */
1216 ((struct nds32_reg *)reg_cache->reg_list[IR1].arch_info)->enable = false;
1217 ((struct nds32_reg *)reg_cache->reg_list[IR9].arch_info)->enable = false;
1220 if (nds32->privilege_level != 0)
1221 ((struct nds32_reg *)reg_cache->reg_list[IR3].arch_info)->enable = false;
1223 if (misc_config->mcu == true)
1224 ((struct nds32_reg *)reg_cache->reg_list[IR4].arch_info)->enable = false;
1226 if (misc_config->interruption_level == false) {
1227 ((struct nds32_reg *)reg_cache->reg_list[IR2].arch_info)->enable = true;
1228 ((struct nds32_reg *)reg_cache->reg_list[IR5].arch_info)->enable = true;
1229 ((struct nds32_reg *)reg_cache->reg_list[IR10].arch_info)->enable = true;
1230 ((struct nds32_reg *)reg_cache->reg_list[IR12].arch_info)->enable = true;
1231 ((struct nds32_reg *)reg_cache->reg_list[IR13].arch_info)->enable = true;
1233 /* Secure MPU has no IPC, IPSW, P_ITYPE */
1234 if (mmu_config->memory_protection != 1)
1235 ((struct nds32_reg *)reg_cache->reg_list[IR7].arch_info)->enable = true;
1238 if ((cpu_version->cpu_id_family == 0x9) ||
1239 (cpu_version->cpu_id_family == 0xA) ||
1240 (cpu_version->cpu_id_family == 0xC) ||
1241 (cpu_version->cpu_id_family == 0xD))
1242 ((struct nds32_reg *)reg_cache->reg_list[IR8].arch_info)->enable = true;
1244 if (misc_config->shadow == 1) {
1245 ((struct nds32_reg *)reg_cache->reg_list[IR16].arch_info)->enable = true;
1246 ((struct nds32_reg *)reg_cache->reg_list[IR17].arch_info)->enable = true;
1249 if (misc_config->ifc)
1250 ((struct nds32_reg *)reg_cache->reg_list[IFC_LP].arch_info)->enable = true;
1252 if (nds32->privilege_level != 0)
1253 ((struct nds32_reg *)reg_cache->reg_list[MR0].arch_info)->enable = false;
1255 if (mmu_config->memory_protection == 1) {
1256 if (mmu_config->memory_protection_version == 24)
1257 ((struct nds32_reg *)reg_cache->reg_list[MR4].arch_info)->enable = true;
1259 if (nds32->privilege_level == 0) {
1260 if ((mmu_config->memory_protection_version == 16) ||
1261 (mmu_config->memory_protection_version == 24)) {
1262 ((struct nds32_reg *)reg_cache->reg_list[MR11].arch_info)->enable = true;
1263 ((struct nds32_reg *)reg_cache->reg_list[SECUR0].arch_info)->enable = true;
1264 ((struct nds32_reg *)reg_cache->reg_list[IR20].arch_info)->enable = true;
1265 ((struct nds32_reg *)reg_cache->reg_list[IR22].arch_info)->enable = true;
1266 ((struct nds32_reg *)reg_cache->reg_list[IR24].arch_info)->enable = true;
1267 ((struct nds32_reg *)reg_cache->reg_list[IR30].arch_info)->enable = true;
1269 if (misc_config->shadow == 1) {
1270 ((struct nds32_reg *)reg_cache->reg_list[IR21].arch_info)->enable = true;
1271 ((struct nds32_reg *)reg_cache->reg_list[IR23].arch_info)->enable = true;
1272 ((struct nds32_reg *)reg_cache->reg_list[IR25].arch_info)->enable = true;
1276 } else if (mmu_config->memory_protection == 2) {
1277 ((struct nds32_reg *)reg_cache->reg_list[MR1].arch_info)->enable = true;
1278 ((struct nds32_reg *)reg_cache->reg_list[MR4].arch_info)->enable = true;
1280 if ((cpu_version->cpu_id_family != 0xA) && (cpu_version->cpu_id_family != 0xC) &&
1281 (cpu_version->cpu_id_family != 0xD))
1282 ((struct nds32_reg *)reg_cache->reg_list[MR5].arch_info)->enable = true;
1285 if (mmu_config->memory_protection > 0) {
1286 ((struct nds32_reg *)reg_cache->reg_list[MR2].arch_info)->enable = true;
1287 ((struct nds32_reg *)reg_cache->reg_list[MR3].arch_info)->enable = true;
1290 if (memory_config->ilm_base != 0)
1291 if (nds32->privilege_level == 0)
1292 ((struct nds32_reg *)reg_cache->reg_list[MR6].arch_info)->enable = true;
1294 if (memory_config->dlm_base != 0)
1295 if (nds32->privilege_level == 0)
1296 ((struct nds32_reg *)reg_cache->reg_list[MR7].arch_info)->enable = true;
1298 if ((memory_config->icache.line_size != 0) && (memory_config->dcache.line_size != 0))
1299 ((struct nds32_reg *)reg_cache->reg_list[MR8].arch_info)->enable = true;
1301 if (misc_config->high_speed_memory_port)
1302 ((struct nds32_reg *)reg_cache->reg_list[MR9].arch_info)->enable = true;
1304 if (mr10_exist)
1305 ((struct nds32_reg *)reg_cache->reg_list[MR10].arch_info)->enable = true;
1307 if (misc_config->edm) {
1308 int dr_reg_n = nds32->edm.breakpoint_num * 5;
1310 for (int i = 0 ; i < dr_reg_n ; i++)
1311 ((struct nds32_reg *)reg_cache->reg_list[DR0 + i].arch_info)->enable = true;
1313 ((struct nds32_reg *)reg_cache->reg_list[DR41].arch_info)->enable = true;
1314 ((struct nds32_reg *)reg_cache->reg_list[DR43].arch_info)->enable = true;
1315 ((struct nds32_reg *)reg_cache->reg_list[DR44].arch_info)->enable = true;
1316 ((struct nds32_reg *)reg_cache->reg_list[DR45].arch_info)->enable = true;
1319 if (misc_config->debug_tracer) {
1320 ((struct nds32_reg *)reg_cache->reg_list[DR46].arch_info)->enable = true;
1321 ((struct nds32_reg *)reg_cache->reg_list[DR47].arch_info)->enable = true;
1324 if (misc_config->performance_monitor) {
1325 ((struct nds32_reg *)reg_cache->reg_list[PFR0].arch_info)->enable = true;
1326 ((struct nds32_reg *)reg_cache->reg_list[PFR1].arch_info)->enable = true;
1327 ((struct nds32_reg *)reg_cache->reg_list[PFR2].arch_info)->enable = true;
1328 ((struct nds32_reg *)reg_cache->reg_list[PFR3].arch_info)->enable = true;
1331 if (misc_config->local_memory_dma) {
1332 ((struct nds32_reg *)reg_cache->reg_list[DMAR0].arch_info)->enable = true;
1333 ((struct nds32_reg *)reg_cache->reg_list[DMAR1].arch_info)->enable = true;
1334 ((struct nds32_reg *)reg_cache->reg_list[DMAR2].arch_info)->enable = true;
1335 ((struct nds32_reg *)reg_cache->reg_list[DMAR3].arch_info)->enable = true;
1336 ((struct nds32_reg *)reg_cache->reg_list[DMAR4].arch_info)->enable = true;
1337 ((struct nds32_reg *)reg_cache->reg_list[DMAR5].arch_info)->enable = true;
1338 ((struct nds32_reg *)reg_cache->reg_list[DMAR6].arch_info)->enable = true;
1339 ((struct nds32_reg *)reg_cache->reg_list[DMAR7].arch_info)->enable = true;
1340 ((struct nds32_reg *)reg_cache->reg_list[DMAR8].arch_info)->enable = true;
1341 ((struct nds32_reg *)reg_cache->reg_list[DMAR9].arch_info)->enable = true;
1342 ((struct nds32_reg *)reg_cache->reg_list[DMAR10].arch_info)->enable = true;
1345 if ((misc_config->local_memory_dma || misc_config->performance_monitor) &&
1346 (no_racr0 == false))
1347 ((struct nds32_reg *)reg_cache->reg_list[RACR].arch_info)->enable = true;
1349 if (cpu_version->cop_fpu_extension || (misc_config->audio_isa != 0))
1350 ((struct nds32_reg *)reg_cache->reg_list[FUCPR].arch_info)->enable = true;
1352 if (misc_config->audio_isa != 0) {
1353 if (misc_config->audio_isa > 1) {
1354 ((struct nds32_reg *)reg_cache->reg_list[D0L24].arch_info)->enable = true;
1355 ((struct nds32_reg *)reg_cache->reg_list[D1L24].arch_info)->enable = true;
1358 ((struct nds32_reg *)reg_cache->reg_list[I0].arch_info)->enable = true;
1359 ((struct nds32_reg *)reg_cache->reg_list[I1].arch_info)->enable = true;
1360 ((struct nds32_reg *)reg_cache->reg_list[I2].arch_info)->enable = true;
1361 ((struct nds32_reg *)reg_cache->reg_list[I3].arch_info)->enable = true;
1362 ((struct nds32_reg *)reg_cache->reg_list[I4].arch_info)->enable = true;
1363 ((struct nds32_reg *)reg_cache->reg_list[I5].arch_info)->enable = true;
1364 ((struct nds32_reg *)reg_cache->reg_list[I6].arch_info)->enable = true;
1365 ((struct nds32_reg *)reg_cache->reg_list[I7].arch_info)->enable = true;
1366 ((struct nds32_reg *)reg_cache->reg_list[M1].arch_info)->enable = true;
1367 ((struct nds32_reg *)reg_cache->reg_list[M2].arch_info)->enable = true;
1368 ((struct nds32_reg *)reg_cache->reg_list[M3].arch_info)->enable = true;
1369 ((struct nds32_reg *)reg_cache->reg_list[M5].arch_info)->enable = true;
1370 ((struct nds32_reg *)reg_cache->reg_list[M6].arch_info)->enable = true;
1371 ((struct nds32_reg *)reg_cache->reg_list[M7].arch_info)->enable = true;
1372 ((struct nds32_reg *)reg_cache->reg_list[MOD].arch_info)->enable = true;
1373 ((struct nds32_reg *)reg_cache->reg_list[LBE].arch_info)->enable = true;
1374 ((struct nds32_reg *)reg_cache->reg_list[LE].arch_info)->enable = true;
1375 ((struct nds32_reg *)reg_cache->reg_list[LC].arch_info)->enable = true;
1376 ((struct nds32_reg *)reg_cache->reg_list[ADM_VBASE].arch_info)->enable = true;
1377 ((struct nds32_reg *)reg_cache->reg_list[SHFT_CTL0].arch_info)->enable = true;
1378 ((struct nds32_reg *)reg_cache->reg_list[SHFT_CTL1].arch_info)->enable = true;
1380 uint32_t value_mod;
1381 uint32_t fucpr_backup;
1382 /* enable fpu and get configuration */
1383 nds32_get_mapped_reg(nds32, FUCPR, &fucpr_backup);
1384 if ((fucpr_backup & 0x80000000) == 0)
1385 nds32_set_mapped_reg(nds32, FUCPR, fucpr_backup | 0x80000000);
1386 nds32_get_mapped_reg(nds32, MOD, &value_mod);
1387 /* restore origin fucpr value */
1388 if ((fucpr_backup & 0x80000000) == 0)
1389 nds32_set_mapped_reg(nds32, FUCPR, fucpr_backup);
1391 if ((value_mod >> 6) & 0x1) {
1392 ((struct nds32_reg *)reg_cache->reg_list[CB_CTL].arch_info)->enable = true;
1393 ((struct nds32_reg *)reg_cache->reg_list[CBB0].arch_info)->enable = true;
1394 ((struct nds32_reg *)reg_cache->reg_list[CBB1].arch_info)->enable = true;
1395 ((struct nds32_reg *)reg_cache->reg_list[CBB2].arch_info)->enable = true;
1396 ((struct nds32_reg *)reg_cache->reg_list[CBB3].arch_info)->enable = true;
1397 ((struct nds32_reg *)reg_cache->reg_list[CBE0].arch_info)->enable = true;
1398 ((struct nds32_reg *)reg_cache->reg_list[CBE1].arch_info)->enable = true;
1399 ((struct nds32_reg *)reg_cache->reg_list[CBE2].arch_info)->enable = true;
1400 ((struct nds32_reg *)reg_cache->reg_list[CBE3].arch_info)->enable = true;
1404 if ((cpu_version->cpu_id_family == 0x9) ||
1405 (cpu_version->cpu_id_family == 0xA) ||
1406 (cpu_version->cpu_id_family == 0xC)) {
1408 ((struct nds32_reg *)reg_cache->reg_list[IDR0].arch_info)->enable = true;
1409 ((struct nds32_reg *)reg_cache->reg_list[IDR1].arch_info)->enable = true;
1411 if ((cpu_version->cpu_id_family == 0xC) && (cpu_version->revision == 0x0C))
1412 ((struct nds32_reg *)reg_cache->reg_list[IDR0].arch_info)->enable = false;
1415 uint32_t ir3_value;
1416 uint32_t ivb_prog_pri_lvl;
1417 uint32_t ivb_ivic_ver;
1419 nds32_get_mapped_reg(nds32, IR3, &ir3_value);
1420 ivb_prog_pri_lvl = ir3_value & 0x1;
1421 ivb_ivic_ver = (ir3_value >> 11) & 0x3;
1423 if ((ivb_prog_pri_lvl == 1) || (ivb_ivic_ver >= 1)) {
1424 ((struct nds32_reg *)reg_cache->reg_list[IR18].arch_info)->enable = true;
1425 ((struct nds32_reg *)reg_cache->reg_list[IR19].arch_info)->enable = true;
1428 if (ivb_ivic_ver >= 1) {
1429 ((struct nds32_reg *)reg_cache->reg_list[IR26].arch_info)->enable = true;
1430 ((struct nds32_reg *)reg_cache->reg_list[IR27].arch_info)->enable = true;
1431 ((struct nds32_reg *)reg_cache->reg_list[IR28].arch_info)->enable = true;
1432 ((struct nds32_reg *)reg_cache->reg_list[IR29].arch_info)->enable = true;
1435 return ERROR_OK;
1438 int nds32_init_register_table(struct nds32 *nds32)
1440 nds32_init_must_have_registers(nds32);
1442 return ERROR_OK;
1445 int nds32_add_software_breakpoint(struct target *target,
1446 struct breakpoint *breakpoint)
1448 uint32_t data;
1449 uint32_t check_data;
1450 uint32_t break_insn;
1452 /* check the breakpoint size */
1453 target->type->read_buffer(target, breakpoint->address, 4, (uint8_t *)&data);
1455 /* backup origin instruction
1456 * instruction is big-endian */
1457 if (*(char *)&data & 0x80) { /* 16-bits instruction */
1458 breakpoint->length = 2;
1459 break_insn = NDS32_BREAK_16;
1460 } else { /* 32-bits instruction */
1461 breakpoint->length = 4;
1462 break_insn = NDS32_BREAK_32;
1465 if (breakpoint->orig_instr != NULL)
1466 free(breakpoint->orig_instr);
1468 breakpoint->orig_instr = malloc(breakpoint->length);
1469 memcpy(breakpoint->orig_instr, &data, breakpoint->length);
1471 /* self-modified code */
1472 target->type->write_buffer(target, breakpoint->address, breakpoint->length, (const uint8_t *)&break_insn);
1473 /* write_back & invalidate dcache & invalidate icache */
1474 nds32_cache_sync(target, breakpoint->address, breakpoint->length);
1476 /* read back to check */
1477 target->type->read_buffer(target, breakpoint->address, breakpoint->length, (uint8_t *)&check_data);
1478 if (memcmp(&check_data, &break_insn, breakpoint->length) == 0)
1479 return ERROR_OK;
1481 return ERROR_FAIL;
1484 int nds32_remove_software_breakpoint(struct target *target,
1485 struct breakpoint *breakpoint)
1487 uint32_t check_data;
1488 uint32_t break_insn;
1490 if (breakpoint->length == 2)
1491 break_insn = NDS32_BREAK_16;
1492 else if (breakpoint->length == 4)
1493 break_insn = NDS32_BREAK_32;
1494 else
1495 return ERROR_FAIL;
1497 target->type->read_buffer(target, breakpoint->address, breakpoint->length,
1498 (uint8_t *)&check_data);
1500 /* break instruction is modified */
1501 if (memcmp(&check_data, &break_insn, breakpoint->length) != 0)
1502 return ERROR_FAIL;
1504 /* self-modified code */
1505 target->type->write_buffer(target, breakpoint->address, breakpoint->length,
1506 breakpoint->orig_instr);
1508 /* write_back & invalidate dcache & invalidate icache */
1509 nds32_cache_sync(target, breakpoint->address, breakpoint->length);
1511 return ERROR_OK;
1515 * Restore the processor context on an Andes target. The full processor
1516 * context is analyzed to see if any of the registers are dirty on this end, but
1517 * have a valid new value. If this is the case, the processor is changed to the
1518 * appropriate mode and the new register values are written out to the
1519 * processor. If there happens to be a dirty register with an invalid value, an
1520 * error will be logged.
1522 * @param target Pointer to the Andes target to have its context restored
1523 * @return Error status if the target is not halted.
1525 int nds32_restore_context(struct target *target)
1527 struct nds32 *nds32 = target_to_nds32(target);
1528 struct aice_port_s *aice = target_to_aice(target);
1529 struct reg_cache *reg_cache = nds32->core_cache;
1530 struct reg *reg;
1531 struct nds32_reg *reg_arch_info;
1532 unsigned int i;
1534 LOG_DEBUG("-");
1536 if (target->state != TARGET_HALTED) {
1537 LOG_WARNING("target not halted");
1538 return ERROR_TARGET_NOT_HALTED;
1541 /* check if there are dirty registers */
1542 for (i = 0; i < reg_cache->num_regs; i++) {
1543 reg = &(reg_cache->reg_list[i]);
1544 if (reg->dirty == true) {
1545 if (reg->valid == true) {
1547 LOG_DEBUG("examining dirty reg: %s", reg->name);
1548 LOG_DEBUG("writing register %i "
1549 "with value 0x%8.8" PRIx32, i, buf_get_u32(reg->value, 0, 32));
1551 reg_arch_info = reg->arch_info;
1552 if (FD0 <= reg_arch_info->num && reg_arch_info->num <= FD31)
1553 aice_write_reg_64(aice, reg_arch_info->num, reg_arch_info->value_64);
1554 else
1555 aice_write_register(aice, reg_arch_info->num, reg_arch_info->value);
1556 reg->valid = true;
1557 reg->dirty = false;
1562 return ERROR_OK;
1565 int nds32_edm_config(struct nds32 *nds32)
1567 struct target *target = nds32->target;
1568 struct aice_port_s *aice = target_to_aice(target);
1569 uint32_t edm_cfg;
1570 uint32_t edm_ctl;
1572 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CFG, &edm_cfg);
1574 nds32->edm.version = (edm_cfg >> 16) & 0xFFFF;
1575 LOG_INFO("EDM version 0x%04" PRIx32, nds32->edm.version);
1577 nds32->edm.breakpoint_num = (edm_cfg & 0x7) + 1;
1579 if ((nds32->edm.version & 0x1000) || (0x60 <= nds32->edm.version))
1580 nds32->edm.access_control = true;
1581 else
1582 nds32->edm.access_control = false;
1584 if ((edm_cfg >> 4) & 0x1)
1585 nds32->edm.direct_access_local_memory = true;
1586 else
1587 nds32->edm.direct_access_local_memory = false;
1589 if (nds32->edm.version <= 0x20)
1590 nds32->edm.direct_access_local_memory = false;
1592 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CTL, &edm_ctl);
1593 if (edm_ctl & (0x1 << 29))
1594 nds32->edm.support_max_stop = true;
1595 else
1596 nds32->edm.support_max_stop = false;
1598 /* set passcode for secure MCU */
1599 nds32_login(nds32);
1601 return ERROR_OK;
1604 int nds32_config(struct nds32 *nds32)
1606 nds32_init_config(nds32);
1608 /* init optional system registers according to config registers */
1609 nds32_init_option_registers(nds32);
1611 /* get max interrupt level */
1612 if (nds32->misc_config.interruption_level)
1613 nds32->max_interrupt_level = 2;
1614 else
1615 nds32->max_interrupt_level = 3;
1617 /* get ILM/DLM size from MR6/MR7 */
1618 uint32_t value_mr6, value_mr7;
1619 uint32_t size_index;
1620 nds32_get_mapped_reg(nds32, MR6, &value_mr6);
1621 size_index = (value_mr6 >> 1) & 0xF;
1622 nds32->memory.ilm_size = NDS32_LM_SIZE_TABLE[size_index];
1624 nds32_get_mapped_reg(nds32, MR7, &value_mr7);
1625 size_index = (value_mr7 >> 1) & 0xF;
1626 nds32->memory.dlm_size = NDS32_LM_SIZE_TABLE[size_index];
1628 return ERROR_OK;
1631 int nds32_init_arch_info(struct target *target, struct nds32 *nds32)
1633 target->arch_info = nds32;
1634 nds32->target = target;
1636 nds32->common_magic = NDS32_COMMON_MAGIC;
1637 nds32->init_arch_info_after_halted = false;
1638 nds32->auto_convert_hw_bp = true;
1639 nds32->global_stop = false;
1640 nds32->soft_reset_halt = false;
1641 nds32->edm_passcode = NULL;
1642 nds32->privilege_level = 0;
1643 nds32->boot_time = 1500;
1644 nds32->reset_halt_as_examine = false;
1645 nds32->keep_target_edm_ctl = false;
1646 nds32->word_access_mem = false;
1647 nds32->virtual_hosting = true;
1648 nds32->hit_syscall = false;
1649 nds32->active_syscall_id = NDS32_SYSCALL_UNDEFINED;
1650 nds32->virtual_hosting_errno = 0;
1651 nds32->virtual_hosting_ctrl_c = false;
1652 nds32->attached = false;
1654 nds32->syscall_break.asid = 0;
1655 nds32->syscall_break.length = 4;
1656 nds32->syscall_break.set = 0;
1657 nds32->syscall_break.orig_instr = NULL;
1658 nds32->syscall_break.next = NULL;
1659 nds32->syscall_break.unique_id = 0x515CAll + target->target_number;
1660 nds32->syscall_break.linked_BRP = 0;
1662 nds32_reg_init();
1664 if (ERROR_FAIL == nds32_reg_cache_init(target, nds32))
1665 return ERROR_FAIL;
1667 if (ERROR_OK != nds32_init_register_table(nds32))
1668 return ERROR_FAIL;
1670 return ERROR_OK;
1673 int nds32_virtual_to_physical(struct target *target, uint32_t address, uint32_t *physical)
1675 struct nds32 *nds32 = target_to_nds32(target);
1677 if (nds32->memory.address_translation == false) {
1678 *physical = address;
1679 return ERROR_OK;
1682 if (ERROR_OK == nds32_probe_tlb(nds32, address, physical))
1683 return ERROR_OK;
1685 if (ERROR_OK == nds32_walk_page_table(nds32, address, physical))
1686 return ERROR_OK;
1688 return ERROR_FAIL;
1691 int nds32_cache_sync(struct target *target, uint32_t address, uint32_t length)
1693 struct aice_port_s *aice = target_to_aice(target);
1694 struct nds32 *nds32 = target_to_nds32(target);
1695 struct nds32_cache *dcache = &(nds32->memory.dcache);
1696 struct nds32_cache *icache = &(nds32->memory.icache);
1697 uint32_t dcache_line_size = NDS32_LINE_SIZE_TABLE[dcache->line_size];
1698 uint32_t icache_line_size = NDS32_LINE_SIZE_TABLE[icache->line_size];
1699 uint32_t cur_address;
1700 int result;
1701 uint32_t start_line, end_line;
1702 uint32_t cur_line;
1704 if ((dcache->line_size != 0) && (dcache->enable == true)) {
1705 /* address / dcache_line_size */
1706 start_line = address >> (dcache->line_size + 2);
1707 /* (address + length - 1) / dcache_line_size */
1708 end_line = (address + length - 1) >> (dcache->line_size + 2);
1710 for (cur_address = address, cur_line = start_line ;
1711 cur_line <= end_line ;
1712 cur_address += dcache_line_size, cur_line++) {
1713 /* D$ write back */
1714 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1D_VA_WB, cur_address);
1715 if (result != ERROR_OK)
1716 return result;
1718 /* D$ invalidate */
1719 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1D_VA_INVAL, cur_address);
1720 if (result != ERROR_OK)
1721 return result;
1725 if ((icache->line_size != 0) && (icache->enable == true)) {
1726 /* address / icache_line_size */
1727 start_line = address >> (icache->line_size + 2);
1728 /* (address + length - 1) / icache_line_size */
1729 end_line = (address + length - 1) >> (icache->line_size + 2);
1731 for (cur_address = address, cur_line = start_line ;
1732 cur_line <= end_line ;
1733 cur_address += icache_line_size, cur_line++) {
1734 /* Because PSW.IT is turned off under debug exception, address MUST
1735 * be physical address. L1I_VA_INVALIDATE uses PSW.IT to decide
1736 * address translation or not. */
1737 uint32_t physical_addr;
1738 if (ERROR_FAIL == target->type->virt2phys(target, cur_address,
1739 &physical_addr))
1740 return ERROR_FAIL;
1742 /* I$ invalidate */
1743 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1I_VA_INVAL, physical_addr);
1744 if (result != ERROR_OK)
1745 return result;
1749 return ERROR_OK;
1752 uint32_t nds32_nextpc(struct nds32 *nds32, int current, uint32_t address)
1754 if (!current)
1755 nds32_set_mapped_reg(nds32, PC, address);
1756 else
1757 nds32_get_mapped_reg(nds32, PC, &address);
1759 return address;
1762 int nds32_step(struct target *target, int current,
1763 uint32_t address, int handle_breakpoints)
1765 LOG_DEBUG("target->state: %s",
1766 target_state_name(target));
1768 if (target->state != TARGET_HALTED) {
1769 LOG_WARNING("target was not halted");
1770 return ERROR_TARGET_NOT_HALTED;
1773 struct nds32 *nds32 = target_to_nds32(target);
1775 address = nds32_nextpc(nds32, current, address);
1777 LOG_DEBUG("STEP PC %08" PRIx32 "%s", address, !current ? "!" : "");
1779 /** set DSSIM */
1780 uint32_t ir14_value;
1781 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1782 if (nds32->step_isr_enable)
1783 ir14_value |= (0x1 << 31);
1784 else
1785 ir14_value &= ~(0x1 << 31);
1786 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1788 /* check hit_syscall before leave_debug_state() because
1789 * leave_debug_state() may clear hit_syscall flag */
1790 bool no_step = false;
1791 if (nds32->hit_syscall)
1792 /* step after hit_syscall should be ignored because
1793 * leave_debug_state will step implicitly to skip the
1794 * syscall */
1795 no_step = true;
1797 /********* TODO: maybe create another function to handle this part */
1798 CHECK_RETVAL(nds32->leave_debug_state(nds32, true));
1799 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
1801 if (no_step == false) {
1802 struct aice_port_s *aice = target_to_aice(target);
1803 if (ERROR_OK != aice_step(aice))
1804 return ERROR_FAIL;
1807 /* save state */
1808 CHECK_RETVAL(nds32->enter_debug_state(nds32, true));
1809 /********* TODO: maybe create another function to handle this part */
1811 /* restore DSSIM */
1812 if (nds32->step_isr_enable) {
1813 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1814 ir14_value &= ~(0x1 << 31);
1815 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1818 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
1820 return ERROR_OK;
1823 static int nds32_step_without_watchpoint(struct nds32 *nds32)
1825 struct target *target = nds32->target;
1827 if (target->state != TARGET_HALTED) {
1828 LOG_WARNING("target was not halted");
1829 return ERROR_TARGET_NOT_HALTED;
1832 /** set DSSIM */
1833 uint32_t ir14_value;
1834 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1835 if (nds32->step_isr_enable)
1836 ir14_value |= (0x1 << 31);
1837 else
1838 ir14_value &= ~(0x1 << 31);
1839 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1841 /********* TODO: maybe create another function to handle this part */
1842 CHECK_RETVAL(nds32->leave_debug_state(nds32, false));
1844 struct aice_port_s *aice = target_to_aice(target);
1846 if (ERROR_OK != aice_step(aice))
1847 return ERROR_FAIL;
1849 /* save state */
1850 CHECK_RETVAL(nds32->enter_debug_state(nds32, false));
1851 /********* TODO: maybe create another function to handle this part */
1853 /* restore DSSIM */
1854 if (nds32->step_isr_enable) {
1855 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1856 ir14_value &= ~(0x1 << 31);
1857 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1860 return ERROR_OK;
1863 int nds32_target_state(struct nds32 *nds32, enum target_state *state)
1865 struct aice_port_s *aice = target_to_aice(nds32->target);
1866 enum aice_target_state_s nds32_state;
1868 if (aice_state(aice, &nds32_state) != ERROR_OK)
1869 return ERROR_FAIL;
1871 switch (nds32_state) {
1872 case AICE_DISCONNECT:
1873 LOG_INFO("USB is disconnected");
1874 return ERROR_FAIL;
1875 case AICE_TARGET_DETACH:
1876 LOG_INFO("Target is disconnected");
1877 return ERROR_FAIL;
1878 case AICE_TARGET_UNKNOWN:
1879 *state = TARGET_UNKNOWN;
1880 break;
1881 case AICE_TARGET_RUNNING:
1882 *state = TARGET_RUNNING;
1883 break;
1884 case AICE_TARGET_HALTED:
1885 *state = TARGET_HALTED;
1886 break;
1887 case AICE_TARGET_RESET:
1888 *state = TARGET_RESET;
1889 break;
1890 case AICE_TARGET_DEBUG_RUNNING:
1891 *state = TARGET_DEBUG_RUNNING;
1892 break;
1893 default:
1894 return ERROR_FAIL;
1897 return ERROR_OK;
1900 int nds32_examine_debug_reason(struct nds32 *nds32)
1902 uint32_t reason;
1903 struct target *target = nds32->target;
1905 if (nds32->hit_syscall == true) {
1906 LOG_DEBUG("Hit syscall breakpoint");
1907 target->debug_reason = DBG_REASON_BREAKPOINT;
1908 return ERROR_OK;
1911 nds32->get_debug_reason(nds32, &reason);
1913 LOG_DEBUG("nds32 examines debug reason: %s", nds32_debug_type_name[reason]);
1915 /* Examine debug reason */
1916 switch (reason) {
1917 case NDS32_DEBUG_BREAK:
1918 case NDS32_DEBUG_BREAK_16:
1919 case NDS32_DEBUG_INST_BREAK:
1921 uint32_t value_pc;
1922 uint32_t opcode;
1923 struct nds32_instruction instruction;
1925 nds32_get_mapped_reg(nds32, PC, &value_pc);
1927 if (ERROR_OK != nds32_read_opcode(nds32, value_pc, &opcode))
1928 return ERROR_FAIL;
1929 if (ERROR_OK != nds32_evaluate_opcode(nds32, opcode, value_pc,
1930 &instruction))
1931 return ERROR_FAIL;
1933 /* hit 'break 0x7FFF' */
1934 if ((instruction.info.opc_6 == 0x32) &&
1935 (instruction.info.sub_opc == 0xA) &&
1936 (instruction.info.imm == 0x7FFF)) {
1937 target->debug_reason = DBG_REASON_EXIT;
1938 } else
1939 target->debug_reason = DBG_REASON_BREAKPOINT;
1941 break;
1942 case NDS32_DEBUG_DATA_ADDR_WATCHPOINT_PRECISE:
1943 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_PRECISE:
1944 case NDS32_DEBUG_LOAD_STORE_GLOBAL_STOP: /* GLOBAL_STOP is precise exception */
1946 int result;
1948 result = nds32->get_watched_address(nds32,
1949 &(nds32->watched_address), reason);
1950 /* do single step(without watchpoints) to skip the "watched" instruction */
1951 nds32_step_without_watchpoint(nds32);
1953 /* before single_step, save exception address */
1954 if (ERROR_OK != result)
1955 return ERROR_FAIL;
1957 target->debug_reason = DBG_REASON_WATCHPOINT;
1959 break;
1960 case NDS32_DEBUG_DEBUG_INTERRUPT:
1961 target->debug_reason = DBG_REASON_DBGRQ;
1962 break;
1963 case NDS32_DEBUG_HARDWARE_SINGLE_STEP:
1964 target->debug_reason = DBG_REASON_SINGLESTEP;
1965 break;
1966 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_IMPRECISE:
1967 case NDS32_DEBUG_DATA_ADDR_WATCHPOINT_NEXT_PRECISE:
1968 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_NEXT_PRECISE:
1969 if (ERROR_OK != nds32->get_watched_address(nds32,
1970 &(nds32->watched_address), reason))
1971 return ERROR_FAIL;
1973 target->debug_reason = DBG_REASON_WATCHPOINT;
1974 break;
1975 default:
1976 target->debug_reason = DBG_REASON_UNDEFINED;
1977 break;
1980 return ERROR_OK;
1983 int nds32_login(struct nds32 *nds32)
1985 struct target *target = nds32->target;
1986 struct aice_port_s *aice = target_to_aice(target);
1987 uint32_t passcode_length;
1988 char command_sequence[129];
1989 char command_str[33];
1990 char code_str[9];
1991 uint32_t copy_length;
1992 uint32_t code;
1993 uint32_t i;
1995 LOG_DEBUG("nds32_login");
1997 if (nds32->edm_passcode != NULL) {
1998 /* convert EDM passcode to command sequences */
1999 passcode_length = strlen(nds32->edm_passcode);
2000 command_sequence[0] = '\0';
2001 for (i = 0; i < passcode_length; i += 8) {
2002 if (passcode_length - i < 8)
2003 copy_length = passcode_length - i;
2004 else
2005 copy_length = 8;
2007 strncpy(code_str, nds32->edm_passcode + i, copy_length);
2008 code_str[copy_length] = '\0';
2009 code = strtoul(code_str, NULL, 16);
2011 sprintf(command_str, "write_misc gen_port0 0x%x;", code);
2012 strcat(command_sequence, command_str);
2015 if (ERROR_OK != aice_program_edm(aice, command_sequence))
2016 return ERROR_FAIL;
2018 /* get current privilege level */
2019 uint32_t value_edmsw;
2020 aice_read_debug_reg(aice, NDS_EDM_SR_EDMSW, &value_edmsw);
2021 nds32->privilege_level = (value_edmsw >> 16) & 0x3;
2022 LOG_INFO("Current privilege level: %d", nds32->privilege_level);
2025 if (nds32_edm_ops_num > 0) {
2026 const char *reg_name;
2027 for (i = 0 ; i < nds32_edm_ops_num ; i++) {
2028 code = nds32_edm_ops[i].value;
2029 if (nds32_edm_ops[i].reg_no == 6)
2030 reg_name = "gen_port0";
2031 else if (nds32_edm_ops[i].reg_no == 7)
2032 reg_name = "gen_port1";
2033 else
2034 return ERROR_FAIL;
2036 sprintf(command_str, "write_misc %s 0x%x;", reg_name, code);
2037 if (ERROR_OK != aice_program_edm(aice, command_str))
2038 return ERROR_FAIL;
2042 return ERROR_OK;
2045 int nds32_halt(struct target *target)
2047 struct nds32 *nds32 = target_to_nds32(target);
2048 struct aice_port_s *aice = target_to_aice(target);
2049 enum target_state state;
2051 LOG_DEBUG("target->state: %s",
2052 target_state_name(target));
2054 if (target->state == TARGET_HALTED) {
2055 LOG_DEBUG("target was already halted");
2056 return ERROR_OK;
2059 if (nds32_target_state(nds32, &state) != ERROR_OK)
2060 return ERROR_FAIL;
2062 if (TARGET_HALTED != state)
2063 /* TODO: if state == TARGET_HALTED, check ETYPE is DBGI or not */
2064 if (ERROR_OK != aice_halt(aice))
2065 return ERROR_FAIL;
2067 CHECK_RETVAL(nds32->enter_debug_state(nds32, true));
2069 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
2071 return ERROR_OK;
2074 /* poll current target status */
2075 int nds32_poll(struct target *target)
2077 struct nds32 *nds32 = target_to_nds32(target);
2078 enum target_state state;
2080 if (nds32_target_state(nds32, &state) != ERROR_OK)
2081 return ERROR_FAIL;
2083 if (state == TARGET_HALTED) {
2084 if (target->state != TARGET_HALTED) {
2085 /* if false_hit, continue free_run */
2086 if (ERROR_OK != nds32->enter_debug_state(nds32, true)) {
2087 struct aice_port_s *aice = target_to_aice(target);
2088 aice_run(aice);
2089 return ERROR_OK;
2092 LOG_DEBUG("Change target state to TARGET_HALTED.");
2094 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
2096 } else if (state == TARGET_RESET) {
2097 if (target->state == TARGET_HALTED) {
2098 /* similar to assert srst */
2099 register_cache_invalidate(nds32->core_cache);
2100 target->state = TARGET_RESET;
2102 /* TODO: deassert srst */
2103 } else if (target->state == TARGET_RUNNING) {
2104 /* reset as running */
2105 LOG_WARNING("<-- TARGET WARNING! The debug target has been reset. -->");
2107 } else {
2108 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
2109 LOG_DEBUG("Change target state to TARGET_RUNNING.");
2110 target->state = TARGET_RUNNING;
2111 target->debug_reason = DBG_REASON_NOTHALTED;
2115 return ERROR_OK;
2118 int nds32_resume(struct target *target, int current,
2119 uint32_t address, int handle_breakpoints, int debug_execution)
2121 LOG_DEBUG("current %d address %08x handle_breakpoints %d debug_execution %d",
2122 current, address, handle_breakpoints, debug_execution);
2124 struct nds32 *nds32 = target_to_nds32(target);
2126 if (target->state != TARGET_HALTED) {
2127 LOG_ERROR("Target not halted");
2128 return ERROR_TARGET_NOT_HALTED;
2131 address = nds32_nextpc(nds32, current, address);
2133 LOG_DEBUG("RESUME PC %08" PRIx32 "%s", address, !current ? "!" : "");
2135 if (!debug_execution)
2136 target_free_all_working_areas(target);
2138 /* Disable HSS to avoid users misuse HSS */
2139 if (nds32_reach_max_interrupt_level(nds32) == false) {
2140 uint32_t value_ir0;
2141 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
2142 value_ir0 &= ~(0x1 << 11);
2143 nds32_set_mapped_reg(nds32, IR0, value_ir0);
2146 CHECK_RETVAL(nds32->leave_debug_state(nds32, true));
2147 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
2149 if (nds32->virtual_hosting_ctrl_c == false) {
2150 struct aice_port_s *aice = target_to_aice(target);
2151 aice_run(aice);
2152 } else
2153 nds32->virtual_hosting_ctrl_c = false;
2155 target->debug_reason = DBG_REASON_NOTHALTED;
2156 if (!debug_execution)
2157 target->state = TARGET_RUNNING;
2158 else
2159 target->state = TARGET_DEBUG_RUNNING;
2161 LOG_DEBUG("target->state: %s",
2162 target_state_name(target));
2164 return ERROR_OK;
2167 static int nds32_soft_reset_halt(struct target *target)
2169 /* TODO: test it */
2170 struct nds32 *nds32 = target_to_nds32(target);
2171 struct aice_port_s *aice = target_to_aice(target);
2173 aice_assert_srst(aice, AICE_SRST);
2175 /* halt core and set pc to 0x0 */
2176 int retval = target_halt(target);
2177 if (retval != ERROR_OK)
2178 return retval;
2180 /* start fetching from IVB */
2181 uint32_t value_ir3;
2182 nds32_get_mapped_reg(nds32, IR3, &value_ir3);
2183 nds32_set_mapped_reg(nds32, PC, value_ir3 & 0xFFFF0000);
2185 return ERROR_OK;
2188 int nds32_assert_reset(struct target *target)
2190 struct nds32 *nds32 = target_to_nds32(target);
2191 struct aice_port_s *aice = target_to_aice(target);
2192 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
2194 if (target->reset_halt) {
2195 if ((nds32->soft_reset_halt)
2196 || (nds32->edm.version < 0x51)
2197 || ((nds32->edm.version == 0x51)
2198 && (cpu_version->revision == 0x1C)
2199 && (cpu_version->cpu_id_family == 0xC)
2200 && (cpu_version->cpu_id_version == 0x0)))
2201 nds32_soft_reset_halt(target);
2202 else
2203 aice_assert_srst(aice, AICE_RESET_HOLD);
2204 } else {
2205 aice_assert_srst(aice, AICE_SRST);
2206 alive_sleep(nds32->boot_time);
2209 /* set passcode for secure MCU after core reset */
2210 nds32_login(nds32);
2212 /* registers are now invalid */
2213 register_cache_invalidate(nds32->core_cache);
2215 target->state = TARGET_RESET;
2217 return ERROR_OK;
2220 static int nds32_gdb_attach(struct nds32 *nds32)
2222 LOG_DEBUG("nds32_gdb_attach, target coreid: %d", nds32->target->coreid);
2224 if (nds32->attached == false) {
2226 if (nds32->keep_target_edm_ctl) {
2227 /* backup target EDM_CTL */
2228 struct aice_port_s *aice = target_to_aice(nds32->target);
2229 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CTL, &nds32->backup_edm_ctl);
2232 target_halt(nds32->target);
2234 nds32->attached = true;
2237 return ERROR_OK;
2240 static int nds32_gdb_detach(struct nds32 *nds32)
2242 LOG_DEBUG("nds32_gdb_detach");
2243 bool backup_virtual_hosting_setting;
2245 if (nds32->attached) {
2247 backup_virtual_hosting_setting = nds32->virtual_hosting;
2248 /* turn off virtual hosting before resume as gdb-detach */
2249 nds32->virtual_hosting = false;
2250 target_resume(nds32->target, 1, 0, 0, 0);
2251 nds32->virtual_hosting = backup_virtual_hosting_setting;
2253 if (nds32->keep_target_edm_ctl) {
2254 /* restore target EDM_CTL */
2255 struct aice_port_s *aice = target_to_aice(nds32->target);
2256 aice_write_debug_reg(aice, NDS_EDM_SR_EDM_CTL, nds32->backup_edm_ctl);
2259 nds32->attached = false;
2262 return ERROR_OK;
2265 static int nds32_callback_event_handler(struct target *target,
2266 enum target_event event, void *priv)
2268 int retval = ERROR_OK;
2269 int target_number = *(int *)priv;
2271 if (target_number != target->target_number)
2272 return ERROR_OK;
2274 struct nds32 *nds32 = target_to_nds32(target);
2276 switch (event) {
2277 case TARGET_EVENT_GDB_ATTACH:
2278 retval = nds32_gdb_attach(nds32);
2279 break;
2280 case TARGET_EVENT_GDB_DETACH:
2281 retval = nds32_gdb_detach(nds32);
2282 break;
2283 default:
2284 break;
2287 return retval;
2290 int nds32_init(struct nds32 *nds32)
2292 /* Initialize anything we can set up without talking to the target */
2293 nds32->memory.access_channel = NDS_MEMORY_ACC_CPU;
2295 /* register event callback */
2296 target_register_event_callback(nds32_callback_event_handler,
2297 &(nds32->target->target_number));
2299 return ERROR_OK;
2302 int nds32_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
2304 /* fill syscall parameters to file-I/O info */
2305 if (NULL == fileio_info) {
2306 LOG_ERROR("Target has not initial file-I/O data structure");
2307 return ERROR_FAIL;
2310 struct nds32 *nds32 = target_to_nds32(target);
2311 uint32_t value_ir6;
2312 uint32_t syscall_id;
2314 if (nds32->hit_syscall == false)
2315 return ERROR_FAIL;
2317 nds32_get_mapped_reg(nds32, IR6, &value_ir6);
2318 syscall_id = (value_ir6 >> 16) & 0x7FFF;
2319 nds32->active_syscall_id = syscall_id;
2321 LOG_DEBUG("hit syscall ID: 0x%x", syscall_id);
2323 /* free previous identifier storage */
2324 if (NULL != fileio_info->identifier) {
2325 free(fileio_info->identifier);
2326 fileio_info->identifier = NULL;
2329 switch (syscall_id) {
2330 case NDS32_SYSCALL_EXIT:
2331 fileio_info->identifier = (char *)malloc(5);
2332 sprintf(fileio_info->identifier, "exit");
2333 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2334 break;
2335 case NDS32_SYSCALL_OPEN:
2337 uint8_t filename[256];
2338 fileio_info->identifier = (char *)malloc(5);
2339 sprintf(fileio_info->identifier, "open");
2340 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2341 /* reserve fileio_info->param_2 for length of path */
2342 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_3));
2343 nds32_get_mapped_reg(nds32, R2, &(fileio_info->param_4));
2345 target->type->read_buffer(target, fileio_info->param_1,
2346 256, filename);
2347 fileio_info->param_2 = strlen((char *)filename) + 1;
2349 break;
2350 case NDS32_SYSCALL_CLOSE:
2351 fileio_info->identifier = (char *)malloc(6);
2352 sprintf(fileio_info->identifier, "close");
2353 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2354 break;
2355 case NDS32_SYSCALL_READ:
2356 fileio_info->identifier = (char *)malloc(5);
2357 sprintf(fileio_info->identifier, "read");
2358 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2359 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_2));
2360 nds32_get_mapped_reg(nds32, R2, &(fileio_info->param_3));
2361 break;
2362 case NDS32_SYSCALL_WRITE:
2363 fileio_info->identifier = (char *)malloc(6);
2364 sprintf(fileio_info->identifier, "write");
2365 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2366 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_2));
2367 nds32_get_mapped_reg(nds32, R2, &(fileio_info->param_3));
2368 break;
2369 case NDS32_SYSCALL_LSEEK:
2370 fileio_info->identifier = (char *)malloc(6);
2371 sprintf(fileio_info->identifier, "lseek");
2372 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2373 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_2));
2374 nds32_get_mapped_reg(nds32, R2, &(fileio_info->param_3));
2375 break;
2376 case NDS32_SYSCALL_UNLINK:
2378 uint8_t filename[256];
2379 fileio_info->identifier = (char *)malloc(7);
2380 sprintf(fileio_info->identifier, "unlink");
2381 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2382 /* reserve fileio_info->param_2 for length of path */
2384 target->type->read_buffer(target, fileio_info->param_1,
2385 256, filename);
2386 fileio_info->param_2 = strlen((char *)filename) + 1;
2388 break;
2389 case NDS32_SYSCALL_RENAME:
2391 uint8_t filename[256];
2392 fileio_info->identifier = (char *)malloc(7);
2393 sprintf(fileio_info->identifier, "rename");
2394 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2395 /* reserve fileio_info->param_2 for length of old path */
2396 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_3));
2397 /* reserve fileio_info->param_4 for length of new path */
2399 target->type->read_buffer(target, fileio_info->param_1,
2400 256, filename);
2401 fileio_info->param_2 = strlen((char *)filename) + 1;
2403 target->type->read_buffer(target, fileio_info->param_3,
2404 256, filename);
2405 fileio_info->param_4 = strlen((char *)filename) + 1;
2407 break;
2408 case NDS32_SYSCALL_FSTAT:
2409 fileio_info->identifier = (char *)malloc(6);
2410 sprintf(fileio_info->identifier, "fstat");
2411 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2412 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_2));
2413 break;
2414 case NDS32_SYSCALL_STAT:
2416 uint8_t filename[256];
2417 fileio_info->identifier = (char *)malloc(5);
2418 sprintf(fileio_info->identifier, "stat");
2419 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2420 /* reserve fileio_info->param_2 for length of old path */
2421 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_3));
2423 target->type->read_buffer(target, fileio_info->param_1,
2424 256, filename);
2425 fileio_info->param_2 = strlen((char *)filename) + 1;
2427 break;
2428 case NDS32_SYSCALL_GETTIMEOFDAY:
2429 fileio_info->identifier = (char *)malloc(13);
2430 sprintf(fileio_info->identifier, "gettimeofday");
2431 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2432 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_2));
2433 break;
2434 case NDS32_SYSCALL_ISATTY:
2435 fileio_info->identifier = (char *)malloc(7);
2436 sprintf(fileio_info->identifier, "isatty");
2437 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2438 break;
2439 case NDS32_SYSCALL_SYSTEM:
2441 uint8_t command[256];
2442 fileio_info->identifier = (char *)malloc(7);
2443 sprintf(fileio_info->identifier, "system");
2444 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2445 /* reserve fileio_info->param_2 for length of old path */
2447 target->type->read_buffer(target, fileio_info->param_1,
2448 256, command);
2449 fileio_info->param_2 = strlen((char *)command) + 1;
2451 break;
2452 case NDS32_SYSCALL_ERRNO:
2453 fileio_info->identifier = (char *)malloc(6);
2454 sprintf(fileio_info->identifier, "errno");
2455 nds32_set_mapped_reg(nds32, R0, nds32->virtual_hosting_errno);
2456 break;
2457 default:
2458 fileio_info->identifier = (char *)malloc(8);
2459 sprintf(fileio_info->identifier, "unknown");
2460 break;
2463 return ERROR_OK;
2466 int nds32_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
2468 LOG_DEBUG("syscall return code: 0x%x, errno: 0x%x, ctrl_c: %s",
2469 retcode, fileio_errno, ctrl_c ? "true" : "false");
2471 struct nds32 *nds32 = target_to_nds32(target);
2473 nds32_set_mapped_reg(nds32, R0, (uint32_t)retcode);
2475 nds32->virtual_hosting_errno = fileio_errno;
2476 nds32->virtual_hosting_ctrl_c = ctrl_c;
2477 nds32->active_syscall_id = NDS32_SYSCALL_UNDEFINED;
2479 return ERROR_OK;
2482 int nds32_profiling(struct target *target, uint32_t *samples,
2483 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2485 /* sample $PC every 10 milliseconds */
2486 uint32_t iteration = seconds * 100;
2487 struct aice_port_s *aice = target_to_aice(target);
2488 struct nds32 *nds32 = target_to_nds32(target);
2490 if (max_num_samples < iteration)
2491 iteration = max_num_samples;
2493 int pc_regnum = nds32->register_map(nds32, PC);
2494 aice_profiling(aice, 10, iteration, pc_regnum, samples, num_samples);
2496 register_cache_invalidate(nds32->core_cache);
2498 return ERROR_OK;
2501 int nds32_gdb_fileio_write_memory(struct nds32 *nds32, uint32_t address,
2502 uint32_t size, const uint8_t *buffer)
2504 if ((NDS32_SYSCALL_FSTAT == nds32->active_syscall_id) ||
2505 (NDS32_SYSCALL_STAT == nds32->active_syscall_id)) {
2506 /* If doing GDB file-I/O, target should convert 'struct stat'
2507 * from gdb-format to target-format */
2508 uint8_t stat_buffer[NDS32_STRUCT_STAT_SIZE];
2509 /* st_dev 2 */
2510 stat_buffer[0] = buffer[3];
2511 stat_buffer[1] = buffer[2];
2512 /* st_ino 2 */
2513 stat_buffer[2] = buffer[7];
2514 stat_buffer[3] = buffer[6];
2515 /* st_mode 4 */
2516 stat_buffer[4] = buffer[11];
2517 stat_buffer[5] = buffer[10];
2518 stat_buffer[6] = buffer[9];
2519 stat_buffer[7] = buffer[8];
2520 /* st_nlink 2 */
2521 stat_buffer[8] = buffer[15];
2522 stat_buffer[9] = buffer[16];
2523 /* st_uid 2 */
2524 stat_buffer[10] = buffer[19];
2525 stat_buffer[11] = buffer[18];
2526 /* st_gid 2 */
2527 stat_buffer[12] = buffer[23];
2528 stat_buffer[13] = buffer[22];
2529 /* st_rdev 2 */
2530 stat_buffer[14] = buffer[27];
2531 stat_buffer[15] = buffer[26];
2532 /* st_size 4 */
2533 stat_buffer[16] = buffer[35];
2534 stat_buffer[17] = buffer[34];
2535 stat_buffer[18] = buffer[33];
2536 stat_buffer[19] = buffer[32];
2537 /* st_atime 4 */
2538 stat_buffer[20] = buffer[55];
2539 stat_buffer[21] = buffer[54];
2540 stat_buffer[22] = buffer[53];
2541 stat_buffer[23] = buffer[52];
2542 /* st_spare1 4 */
2543 stat_buffer[24] = 0;
2544 stat_buffer[25] = 0;
2545 stat_buffer[26] = 0;
2546 stat_buffer[27] = 0;
2547 /* st_mtime 4 */
2548 stat_buffer[28] = buffer[59];
2549 stat_buffer[29] = buffer[58];
2550 stat_buffer[30] = buffer[57];
2551 stat_buffer[31] = buffer[56];
2552 /* st_spare2 4 */
2553 stat_buffer[32] = 0;
2554 stat_buffer[33] = 0;
2555 stat_buffer[34] = 0;
2556 stat_buffer[35] = 0;
2557 /* st_ctime 4 */
2558 stat_buffer[36] = buffer[63];
2559 stat_buffer[37] = buffer[62];
2560 stat_buffer[38] = buffer[61];
2561 stat_buffer[39] = buffer[60];
2562 /* st_spare3 4 */
2563 stat_buffer[40] = 0;
2564 stat_buffer[41] = 0;
2565 stat_buffer[42] = 0;
2566 stat_buffer[43] = 0;
2567 /* st_blksize 4 */
2568 stat_buffer[44] = buffer[43];
2569 stat_buffer[45] = buffer[42];
2570 stat_buffer[46] = buffer[41];
2571 stat_buffer[47] = buffer[40];
2572 /* st_blocks 4 */
2573 stat_buffer[48] = buffer[51];
2574 stat_buffer[49] = buffer[50];
2575 stat_buffer[50] = buffer[49];
2576 stat_buffer[51] = buffer[48];
2577 /* st_spare4 8 */
2578 stat_buffer[52] = 0;
2579 stat_buffer[53] = 0;
2580 stat_buffer[54] = 0;
2581 stat_buffer[55] = 0;
2582 stat_buffer[56] = 0;
2583 stat_buffer[57] = 0;
2584 stat_buffer[58] = 0;
2585 stat_buffer[59] = 0;
2587 return nds32_write_buffer(nds32->target, address, NDS32_STRUCT_STAT_SIZE, stat_buffer);
2588 } else if (NDS32_SYSCALL_GETTIMEOFDAY == nds32->active_syscall_id) {
2589 /* If doing GDB file-I/O, target should convert 'struct timeval'
2590 * from gdb-format to target-format */
2591 uint8_t timeval_buffer[NDS32_STRUCT_TIMEVAL_SIZE];
2592 timeval_buffer[0] = buffer[3];
2593 timeval_buffer[1] = buffer[2];
2594 timeval_buffer[2] = buffer[1];
2595 timeval_buffer[3] = buffer[0];
2596 timeval_buffer[4] = buffer[11];
2597 timeval_buffer[5] = buffer[10];
2598 timeval_buffer[6] = buffer[9];
2599 timeval_buffer[7] = buffer[8];
2601 return nds32_write_buffer(nds32->target, address, NDS32_STRUCT_TIMEVAL_SIZE, timeval_buffer);
2604 return nds32_write_buffer(nds32->target, address, size, buffer);
2607 int nds32_reset_halt(struct nds32 *nds32)
2609 LOG_INFO("reset halt as init");
2611 struct aice_port_s *aice = target_to_aice(nds32->target);
2612 aice_assert_srst(aice, AICE_RESET_HOLD);
2614 return ERROR_OK;