zy1000: keep up with new command registration stuff
[openocd/genbsdl.git] / src / target / armv4_5.c
blobb5e33ff5466fdf2ec010349c24e2db57830c97e0
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2008 by Spencer Oliver *
6 * spen@spen-soft.co.uk *
7 * *
8 * Copyright (C) 2008 by Oyvind Harboe *
9 * oyvind.harboe@zylin.com *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
30 #include "armv4_5.h"
31 #include "arm_jtag.h"
32 #include "breakpoints.h"
33 #include "arm_disassembler.h"
34 #include "binarybuffer.h"
35 #include "algorithm.h"
36 #include "register.h"
39 /* offsets into armv4_5 core register cache */
40 enum {
41 // ARMV4_5_CPSR = 31,
42 ARMV4_5_SPSR_FIQ = 32,
43 ARMV4_5_SPSR_IRQ = 33,
44 ARMV4_5_SPSR_SVC = 34,
45 ARMV4_5_SPSR_ABT = 35,
46 ARMV4_5_SPSR_UND = 36,
47 ARM_SPSR_MON = 39,
50 static const uint8_t arm_usr_indices[17] = {
51 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, ARMV4_5_CPSR,
54 static const uint8_t arm_fiq_indices[8] = {
55 16, 17, 18, 19, 20, 21, 22, ARMV4_5_SPSR_FIQ,
58 static const uint8_t arm_irq_indices[3] = {
59 23, 24, ARMV4_5_SPSR_IRQ,
62 static const uint8_t arm_svc_indices[3] = {
63 25, 26, ARMV4_5_SPSR_SVC,
66 static const uint8_t arm_abt_indices[3] = {
67 27, 28, ARMV4_5_SPSR_ABT,
70 static const uint8_t arm_und_indices[3] = {
71 29, 30, ARMV4_5_SPSR_UND,
74 static const uint8_t arm_mon_indices[3] = {
75 37, 38, ARM_SPSR_MON,
78 static const struct {
79 const char *name;
80 unsigned short psr;
81 /* For user and system modes, these list indices for all registers.
82 * otherwise they're just indices for the shadow registers and SPSR.
84 unsigned short n_indices;
85 const uint8_t *indices;
86 } arm_mode_data[] = {
87 /* Seven modes are standard from ARM7 on. "System" and "User" share
88 * the same registers; other modes shadow from 3 to 8 registers.
91 .name = "User",
92 .psr = ARMV4_5_MODE_USR,
93 .n_indices = ARRAY_SIZE(arm_usr_indices),
94 .indices = arm_usr_indices,
97 .name = "FIQ",
98 .psr = ARMV4_5_MODE_FIQ,
99 .n_indices = ARRAY_SIZE(arm_fiq_indices),
100 .indices = arm_fiq_indices,
103 .name = "Supervisor",
104 .psr = ARMV4_5_MODE_SVC,
105 .n_indices = ARRAY_SIZE(arm_svc_indices),
106 .indices = arm_svc_indices,
109 .name = "Abort",
110 .psr = ARMV4_5_MODE_ABT,
111 .n_indices = ARRAY_SIZE(arm_abt_indices),
112 .indices = arm_abt_indices,
115 .name = "IRQ",
116 .psr = ARMV4_5_MODE_IRQ,
117 .n_indices = ARRAY_SIZE(arm_irq_indices),
118 .indices = arm_irq_indices,
121 .name = "Undefined instruction",
122 .psr = ARMV4_5_MODE_UND,
123 .n_indices = ARRAY_SIZE(arm_und_indices),
124 .indices = arm_und_indices,
127 .name = "System",
128 .psr = ARMV4_5_MODE_SYS,
129 .n_indices = ARRAY_SIZE(arm_usr_indices),
130 .indices = arm_usr_indices,
132 /* TrustZone "Security Extensions" add a secure monitor mode.
133 * This is distinct from a "debug monitor" which can support
134 * non-halting debug, in conjunction with some debuggers.
137 .name = "Secure Monitor",
138 .psr = ARM_MODE_MON,
139 .n_indices = ARRAY_SIZE(arm_mon_indices),
140 .indices = arm_mon_indices,
144 /** Map PSR mode bits to the name of an ARM processor operating mode. */
145 const char *arm_mode_name(unsigned psr_mode)
147 for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
148 if (arm_mode_data[i].psr == psr_mode)
149 return arm_mode_data[i].name;
151 LOG_ERROR("unrecognized psr mode: %#02x", psr_mode);
152 return "UNRECOGNIZED";
155 /** Return true iff the parameter denotes a valid ARM processor mode. */
156 bool is_arm_mode(unsigned psr_mode)
158 for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
159 if (arm_mode_data[i].psr == psr_mode)
160 return true;
162 return false;
165 /** Map PSR mode bits to linear number indexing armv4_5_core_reg_map */
166 int armv4_5_mode_to_number(enum armv4_5_mode mode)
168 switch (mode) {
169 case ARMV4_5_MODE_ANY:
170 /* map MODE_ANY to user mode */
171 case ARMV4_5_MODE_USR:
172 return 0;
173 case ARMV4_5_MODE_FIQ:
174 return 1;
175 case ARMV4_5_MODE_IRQ:
176 return 2;
177 case ARMV4_5_MODE_SVC:
178 return 3;
179 case ARMV4_5_MODE_ABT:
180 return 4;
181 case ARMV4_5_MODE_UND:
182 return 5;
183 case ARMV4_5_MODE_SYS:
184 return 6;
185 case ARM_MODE_MON:
186 return 7;
187 default:
188 LOG_ERROR("invalid mode value encountered %d", mode);
189 return -1;
193 /** Map linear number indexing armv4_5_core_reg_map to PSR mode bits. */
194 enum armv4_5_mode armv4_5_number_to_mode(int number)
196 switch (number) {
197 case 0:
198 return ARMV4_5_MODE_USR;
199 case 1:
200 return ARMV4_5_MODE_FIQ;
201 case 2:
202 return ARMV4_5_MODE_IRQ;
203 case 3:
204 return ARMV4_5_MODE_SVC;
205 case 4:
206 return ARMV4_5_MODE_ABT;
207 case 5:
208 return ARMV4_5_MODE_UND;
209 case 6:
210 return ARMV4_5_MODE_SYS;
211 case 7:
212 return ARM_MODE_MON;
213 default:
214 LOG_ERROR("mode index out of bounds %d", number);
215 return ARMV4_5_MODE_ANY;
219 char* armv4_5_state_strings[] =
221 "ARM", "Thumb", "Jazelle", "ThumbEE",
224 /* Templates for ARM core registers.
226 * NOTE: offsets in this table are coupled to the arm_mode_data
227 * table above, the armv4_5_core_reg_map array below, and also to
228 * the ARMV4_5_CPSR symbol (which should vanish after ARM11 updates).
230 static const struct {
231 /* The name is used for e.g. the "regs" command. */
232 const char *name;
234 /* The {cookie, mode} tuple uniquely identifies one register.
235 * In a given mode, cookies 0..15 map to registers R0..R15,
236 * with R13..R15 usually called SP, LR, PC.
238 * MODE_ANY is used as *input* to the mapping, and indicates
239 * various special cases (sigh) and errors.
241 * Cookie 16 is (currently) confusing, since it indicates
242 * CPSR -or- SPSR depending on whether 'mode' is MODE_ANY.
243 * (Exception modes have both CPSR and SPSR registers ...)
245 unsigned cookie;
246 enum armv4_5_mode mode;
247 } arm_core_regs[] = {
248 /* IMPORTANT: we guarantee that the first eight cached registers
249 * correspond to r0..r7, and the fifteenth to PC, so that callers
250 * don't need to map them.
252 { .name = "r0", .cookie = 0, .mode = ARMV4_5_MODE_ANY, },
253 { .name = "r1", .cookie = 1, .mode = ARMV4_5_MODE_ANY, },
254 { .name = "r2", .cookie = 2, .mode = ARMV4_5_MODE_ANY, },
255 { .name = "r3", .cookie = 3, .mode = ARMV4_5_MODE_ANY, },
256 { .name = "r4", .cookie = 4, .mode = ARMV4_5_MODE_ANY, },
257 { .name = "r5", .cookie = 5, .mode = ARMV4_5_MODE_ANY, },
258 { .name = "r6", .cookie = 6, .mode = ARMV4_5_MODE_ANY, },
259 { .name = "r7", .cookie = 7, .mode = ARMV4_5_MODE_ANY, },
261 /* NOTE: regs 8..12 might be shadowed by FIQ ... flagging
262 * them as MODE_ANY creates special cases. (ANY means
263 * "not mapped" elsewhere; here it's "everything but FIQ".)
265 { .name = "r8", .cookie = 8, .mode = ARMV4_5_MODE_ANY, },
266 { .name = "r9", .cookie = 9, .mode = ARMV4_5_MODE_ANY, },
267 { .name = "r10", .cookie = 10, .mode = ARMV4_5_MODE_ANY, },
268 { .name = "r11", .cookie = 11, .mode = ARMV4_5_MODE_ANY, },
269 { .name = "r12", .cookie = 12, .mode = ARMV4_5_MODE_ANY, },
271 /* NOTE all MODE_USR registers are equivalent to MODE_SYS ones */
272 { .name = "sp_usr", .cookie = 13, .mode = ARMV4_5_MODE_USR, },
273 { .name = "lr_usr", .cookie = 14, .mode = ARMV4_5_MODE_USR, },
275 /* guaranteed to be at index 15 */
276 { .name = "pc", .cookie = 15, .mode = ARMV4_5_MODE_ANY, },
278 { .name = "r8_fiq", .cookie = 8, .mode = ARMV4_5_MODE_FIQ, },
279 { .name = "r9_fiq", .cookie = 9, .mode = ARMV4_5_MODE_FIQ, },
280 { .name = "r10_fiq", .cookie = 10, .mode = ARMV4_5_MODE_FIQ, },
281 { .name = "r11_fiq", .cookie = 11, .mode = ARMV4_5_MODE_FIQ, },
282 { .name = "r12_fiq", .cookie = 12, .mode = ARMV4_5_MODE_FIQ, },
284 { .name = "lr_fiq", .cookie = 13, .mode = ARMV4_5_MODE_FIQ, },
285 { .name = "sp_fiq", .cookie = 14, .mode = ARMV4_5_MODE_FIQ, },
287 { .name = "lr_irq", .cookie = 13, .mode = ARMV4_5_MODE_IRQ, },
288 { .name = "sp_irq", .cookie = 14, .mode = ARMV4_5_MODE_IRQ, },
290 { .name = "lr_svc", .cookie = 13, .mode = ARMV4_5_MODE_SVC, },
291 { .name = "sp_svc", .cookie = 14, .mode = ARMV4_5_MODE_SVC, },
293 { .name = "lr_abt", .cookie = 13, .mode = ARMV4_5_MODE_ABT, },
294 { .name = "sp_abt", .cookie = 14, .mode = ARMV4_5_MODE_ABT, },
296 { .name = "lr_und", .cookie = 13, .mode = ARMV4_5_MODE_UND, },
297 { .name = "sp_und", .cookie = 14, .mode = ARMV4_5_MODE_UND, },
299 { .name = "cpsr", .cookie = 16, .mode = ARMV4_5_MODE_ANY, },
300 { .name = "spsr_fiq", .cookie = 16, .mode = ARMV4_5_MODE_FIQ, },
301 { .name = "spsr_irq", .cookie = 16, .mode = ARMV4_5_MODE_IRQ, },
302 { .name = "spsr_svc", .cookie = 16, .mode = ARMV4_5_MODE_SVC, },
303 { .name = "spsr_abt", .cookie = 16, .mode = ARMV4_5_MODE_ABT, },
304 { .name = "spsr_und", .cookie = 16, .mode = ARMV4_5_MODE_UND, },
306 { .name = "lr_mon", .cookie = 13, .mode = ARM_MODE_MON, },
307 { .name = "sp_mon", .cookie = 14, .mode = ARM_MODE_MON, },
308 { .name = "spsr_mon", .cookie = 16, .mode = ARM_MODE_MON, },
311 /* map core mode (USR, FIQ, ...) and register number to
312 * indices into the register cache
314 const int armv4_5_core_reg_map[8][17] =
316 { /* USR */
317 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
319 { /* FIQ (8 shadows of USR, vs normal 3) */
320 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 15, 32
322 { /* IRQ */
323 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 23, 24, 15, 33
325 { /* SVC */
326 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 25, 26, 15, 34
328 { /* ABT */
329 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 27, 28, 15, 35
331 { /* UND */
332 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 29, 30, 15, 36
334 { /* SYS (same registers as USR) */
335 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
337 { /* MON */
338 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 37, 38, 15, 39,
343 * Configures host-side ARM records to reflect the specified CPSR.
344 * Later, code can use arm_reg_current() to map register numbers
345 * according to how they are exposed by this mode.
347 void arm_set_cpsr(struct arm *arm, uint32_t cpsr)
349 enum armv4_5_mode mode = cpsr & 0x1f;
350 int num;
352 /* NOTE: this may be called very early, before the register
353 * cache is set up. We can't defend against many errors, in
354 * particular against CPSRs that aren't valid *here* ...
356 if (arm->cpsr) {
357 buf_set_u32(arm->cpsr->value, 0, 32, cpsr);
358 arm->cpsr->valid = 1;
359 arm->cpsr->dirty = 0;
362 arm->core_mode = mode;
364 /* mode_to_number() warned; set up a somewhat-sane mapping */
365 num = armv4_5_mode_to_number(mode);
366 if (num < 0) {
367 mode = ARMV4_5_MODE_USR;
368 num = 0;
371 arm->map = &armv4_5_core_reg_map[num][0];
372 arm->spsr = (mode == ARMV4_5_MODE_USR || mode == ARMV4_5_MODE_SYS)
373 ? NULL
374 : arm->core_cache->reg_list + arm->map[16];
376 /* Older ARMs won't have the J bit */
377 enum armv4_5_state state;
379 if (cpsr & (1 << 5)) { /* T */
380 if (cpsr & (1 << 24)) { /* J */
381 LOG_WARNING("ThumbEE -- incomplete support");
382 state = ARM_STATE_THUMB_EE;
383 } else
384 state = ARMV4_5_STATE_THUMB;
385 } else {
386 if (cpsr & (1 << 24)) { /* J */
387 LOG_ERROR("Jazelle state handling is BROKEN!");
388 state = ARMV4_5_STATE_JAZELLE;
389 } else
390 state = ARMV4_5_STATE_ARM;
392 arm->core_state = state;
394 LOG_DEBUG("set CPSR %#8.8x: %s mode, %s state", (unsigned) cpsr,
395 arm_mode_name(mode),
396 armv4_5_state_strings[arm->core_state]);
400 * Returns handle to the register currently mapped to a given number.
401 * Someone must have called arm_set_cpsr() before.
403 * \param arm This core's state and registers are used.
404 * \param regnum From 0..15 corresponding to R0..R14 and PC.
405 * Note that R0..R7 don't require mapping; you may access those
406 * as the first eight entries in the register cache. Likewise
407 * R15 (PC) doesn't need mapping; you may also access it directly.
408 * However, R8..R14, and SPSR (arm->spsr) *must* be mapped.
409 * CPSR (arm->cpsr) is also not mapped.
411 struct reg *arm_reg_current(struct arm *arm, unsigned regnum)
413 struct reg *r;
415 if (regnum > 16)
416 return NULL;
418 r = arm->core_cache->reg_list + arm->map[regnum];
420 /* e.g. invalid CPSR said "secure monitor" mode on a core
421 * that doesn't support it...
423 if (!r) {
424 LOG_ERROR("Invalid CPSR mode");
425 r = arm->core_cache->reg_list + regnum;
428 return r;
431 static const uint8_t arm_gdb_dummy_fp_value[12];
434 * Dummy FPA registers are required to support GDB on ARM.
435 * Register packets require eight obsolete FPA register values.
436 * Modern ARM cores use Vector Floating Point (VFP), if they
437 * have any floating point support. VFP is not FPA-compatible.
439 struct reg arm_gdb_dummy_fp_reg =
441 .name = "GDB dummy FPA register",
442 .value = (uint8_t *) arm_gdb_dummy_fp_value,
443 .valid = 1,
444 .size = 96,
447 static const uint8_t arm_gdb_dummy_fps_value[4];
450 * Dummy FPA status registers are required to support GDB on ARM.
451 * Register packets require an obsolete FPA status register.
453 struct reg arm_gdb_dummy_fps_reg =
455 .name = "GDB dummy FPA status register",
456 .value = (uint8_t *) arm_gdb_dummy_fps_value,
457 .valid = 1,
458 .size = 32,
461 static void arm_gdb_dummy_init(void) __attribute__ ((constructor));
463 static void arm_gdb_dummy_init(void)
465 register_init_dummy(&arm_gdb_dummy_fp_reg);
466 register_init_dummy(&arm_gdb_dummy_fps_reg);
469 static int armv4_5_get_core_reg(struct reg *reg)
471 int retval;
472 struct arm_reg *armv4_5 = reg->arch_info;
473 struct target *target = armv4_5->target;
475 if (target->state != TARGET_HALTED)
477 LOG_ERROR("Target not halted");
478 return ERROR_TARGET_NOT_HALTED;
481 retval = armv4_5->armv4_5_common->read_core_reg(target, reg, armv4_5->num, armv4_5->mode);
482 if (retval == ERROR_OK) {
483 reg->valid = 1;
484 reg->dirty = 0;
487 return retval;
490 static int armv4_5_set_core_reg(struct reg *reg, uint8_t *buf)
492 struct arm_reg *armv4_5 = reg->arch_info;
493 struct target *target = armv4_5->target;
494 struct arm *armv4_5_target = target_to_armv4_5(target);
495 uint32_t value = buf_get_u32(buf, 0, 32);
497 if (target->state != TARGET_HALTED)
499 LOG_ERROR("Target not halted");
500 return ERROR_TARGET_NOT_HALTED;
503 /* Except for CPSR, the "reg" command exposes a writeback model
504 * for the register cache.
506 if (reg == armv4_5_target->cpsr) {
507 arm_set_cpsr(armv4_5_target, value);
509 /* Older cores need help to be in ARM mode during halt
510 * mode debug, so we clear the J and T bits if we flush.
511 * For newer cores (v6/v7a/v7r) we don't need that, but
512 * it won't hurt since CPSR is always flushed anyway.
514 if (armv4_5_target->core_mode !=
515 (enum armv4_5_mode)(value & 0x1f)) {
516 LOG_DEBUG("changing ARM core mode to '%s'",
517 arm_mode_name(value & 0x1f));
518 value &= ~((1 << 24) | (1 << 5));
519 armv4_5_target->write_core_reg(target, reg,
520 16, ARMV4_5_MODE_ANY, value);
522 } else {
523 buf_set_u32(reg->value, 0, 32, value);
524 reg->valid = 1;
526 reg->dirty = 1;
528 return ERROR_OK;
531 static const struct reg_arch_type arm_reg_type = {
532 .get = armv4_5_get_core_reg,
533 .set = armv4_5_set_core_reg,
536 struct reg_cache* armv4_5_build_reg_cache(struct target *target, struct arm *armv4_5_common)
538 int num_regs = ARRAY_SIZE(arm_core_regs);
539 struct reg_cache *cache = malloc(sizeof(struct reg_cache));
540 struct reg *reg_list = calloc(num_regs, sizeof(struct reg));
541 struct arm_reg *arch_info = calloc(num_regs, sizeof(struct arm_reg));
542 int i;
544 if (!cache || !reg_list || !arch_info) {
545 free(cache);
546 free(reg_list);
547 free(arch_info);
548 return NULL;
551 cache->name = "ARM registers";
552 cache->next = NULL;
553 cache->reg_list = reg_list;
554 cache->num_regs = 0;
556 for (i = 0; i < num_regs; i++)
558 /* Skip registers this core doesn't expose */
559 if (arm_core_regs[i].mode == ARM_MODE_MON
560 && armv4_5_common->core_type != ARM_MODE_MON)
561 continue;
563 /* REVISIT handle Cortex-M, which only shadows R13/SP */
565 arch_info[i].num = arm_core_regs[i].cookie;
566 arch_info[i].mode = arm_core_regs[i].mode;
567 arch_info[i].target = target;
568 arch_info[i].armv4_5_common = armv4_5_common;
570 reg_list[i].name = (char *) arm_core_regs[i].name;
571 reg_list[i].size = 32;
572 reg_list[i].value = &arch_info[i].value;
573 reg_list[i].type = &arm_reg_type;
574 reg_list[i].arch_info = &arch_info[i];
576 cache->num_regs++;
579 armv4_5_common->cpsr = reg_list + ARMV4_5_CPSR;
580 armv4_5_common->core_cache = cache;
581 return cache;
584 int armv4_5_arch_state(struct target *target)
586 struct arm *armv4_5 = target_to_armv4_5(target);
588 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
590 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
591 return ERROR_FAIL;
594 LOG_USER("target halted in %s state due to %s, current mode: %s\ncpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "",
595 armv4_5_state_strings[armv4_5->core_state],
596 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name,
597 arm_mode_name(armv4_5->core_mode),
598 buf_get_u32(armv4_5->cpsr->value, 0, 32),
599 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
601 return ERROR_OK;
604 #define ARMV4_5_CORE_REG_MODENUM(cache, mode, num) \
605 cache->reg_list[armv4_5_core_reg_map[mode][num]]
607 COMMAND_HANDLER(handle_armv4_5_reg_command)
609 struct target *target = get_current_target(CMD_CTX);
610 struct arm *armv4_5 = target_to_armv4_5(target);
611 unsigned num_regs;
612 struct reg *regs;
614 if (!is_arm(armv4_5))
616 command_print(CMD_CTX, "current target isn't an ARM");
617 return ERROR_FAIL;
620 if (target->state != TARGET_HALTED)
622 command_print(CMD_CTX, "error: target must be halted for register accesses");
623 return ERROR_FAIL;
626 if (!is_arm_mode(armv4_5->core_mode))
627 return ERROR_FAIL;
629 if (!armv4_5->full_context) {
630 command_print(CMD_CTX, "error: target doesn't support %s",
631 CMD_NAME);
632 return ERROR_FAIL;
635 num_regs = armv4_5->core_cache->num_regs;
636 regs = armv4_5->core_cache->reg_list;
638 for (unsigned mode = 0; mode < ARRAY_SIZE(arm_mode_data); mode++) {
639 const char *name;
640 char *sep = "\n";
641 char *shadow = "";
643 /* label this bank of registers (or shadows) */
644 switch (arm_mode_data[mode].psr) {
645 case ARMV4_5_MODE_SYS:
646 continue;
647 case ARMV4_5_MODE_USR:
648 name = "System and User";
649 sep = "";
650 break;
651 case ARM_MODE_MON:
652 if (armv4_5->core_type != ARM_MODE_MON)
653 continue;
654 /* FALLTHROUGH */
655 default:
656 name = arm_mode_data[mode].name;
657 shadow = "shadow ";
658 break;
660 command_print(CMD_CTX, "%s%s mode %sregisters",
661 sep, name, shadow);
663 /* display N rows of up to 4 registers each */
664 for (unsigned i = 0; i < arm_mode_data[mode].n_indices;) {
665 char output[80];
666 int output_len = 0;
668 for (unsigned j = 0; j < 4; j++, i++) {
669 uint32_t value;
670 struct reg *reg = regs;
672 if (i >= arm_mode_data[mode].n_indices)
673 break;
675 reg += arm_mode_data[mode].indices[i];
677 /* REVISIT be smarter about faults... */
678 if (!reg->valid)
679 armv4_5->full_context(target);
681 value = buf_get_u32(reg->value, 0, 32);
682 output_len += snprintf(output + output_len,
683 sizeof(output) - output_len,
684 "%8s: %8.8" PRIx32 " ",
685 reg->name, value);
687 command_print(CMD_CTX, "%s", output);
691 return ERROR_OK;
694 COMMAND_HANDLER(handle_armv4_5_core_state_command)
696 struct target *target = get_current_target(CMD_CTX);
697 struct arm *armv4_5 = target_to_armv4_5(target);
699 if (!is_arm(armv4_5))
701 command_print(CMD_CTX, "current target isn't an ARM");
702 return ERROR_FAIL;
705 if (CMD_ARGC > 0)
707 if (strcmp(CMD_ARGV[0], "arm") == 0)
709 armv4_5->core_state = ARMV4_5_STATE_ARM;
711 if (strcmp(CMD_ARGV[0], "thumb") == 0)
713 armv4_5->core_state = ARMV4_5_STATE_THUMB;
717 command_print(CMD_CTX, "core state: %s", armv4_5_state_strings[armv4_5->core_state]);
719 return ERROR_OK;
722 COMMAND_HANDLER(handle_armv4_5_disassemble_command)
724 int retval = ERROR_OK;
725 struct target *target = get_current_target(CMD_CTX);
726 struct arm *arm = target ? target_to_arm(target) : NULL;
727 uint32_t address;
728 int count = 1;
729 int thumb = 0;
731 if (!is_arm(arm)) {
732 command_print(CMD_CTX, "current target isn't an ARM");
733 return ERROR_FAIL;
736 switch (CMD_ARGC) {
737 case 3:
738 if (strcmp(CMD_ARGV[2], "thumb") != 0)
739 goto usage;
740 thumb = 1;
741 /* FALL THROUGH */
742 case 2:
743 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
744 /* FALL THROUGH */
745 case 1:
746 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
747 if (address & 0x01) {
748 if (!thumb) {
749 command_print(CMD_CTX, "Disassemble as Thumb");
750 thumb = 1;
752 address &= ~1;
754 break;
755 default:
756 usage:
757 command_print(CMD_CTX,
758 "usage: arm disassemble <address> [<count> ['thumb']]");
759 count = 0;
760 retval = ERROR_FAIL;
763 while (count-- > 0) {
764 struct arm_instruction cur_instruction;
766 if (thumb) {
767 /* Always use Thumb2 disassembly for best handling
768 * of 32-bit BL/BLX, and to work with newer cores
769 * (some ARMv6, all ARMv7) that use Thumb2.
771 retval = thumb2_opcode(target, address,
772 &cur_instruction);
773 if (retval != ERROR_OK)
774 break;
775 } else {
776 uint32_t opcode;
778 retval = target_read_u32(target, address, &opcode);
779 if (retval != ERROR_OK)
780 break;
781 retval = arm_evaluate_opcode(opcode, address,
782 &cur_instruction) != ERROR_OK;
783 if (retval != ERROR_OK)
784 break;
786 command_print(CMD_CTX, "%s", cur_instruction.text);
787 address += cur_instruction.instruction_size;
790 return retval;
793 static const struct command_registration arm_exec_command_handlers[] = {
795 .name = "reg",
796 .handler = &handle_armv4_5_reg_command,
797 .mode = COMMAND_EXEC,
798 .help = "display ARM core registers",
801 .name = "core_state",
802 .handler = &handle_armv4_5_core_state_command,
803 .mode = COMMAND_EXEC,
804 .usage = "<arm | thumb>",
805 .help = "display/change ARM core state",
808 .name = "disassemble",
809 .handler = &handle_armv4_5_disassemble_command,
810 .mode = COMMAND_EXEC,
811 .usage = "<address> [<count> ['thumb']]",
812 .help = "disassemble instructions ",
814 COMMAND_REGISTRATION_DONE
816 const struct command_registration arm_command_handlers[] = {
818 .name = "arm",
819 .mode = COMMAND_ANY,
820 .help = "ARM command group",
821 .chain = arm_exec_command_handlers,
823 COMMAND_REGISTRATION_DONE
826 int armv4_5_get_gdb_reg_list(struct target *target, struct reg **reg_list[], int *reg_list_size)
828 struct arm *armv4_5 = target_to_armv4_5(target);
829 int i;
831 if (!is_arm_mode(armv4_5->core_mode))
832 return ERROR_FAIL;
834 *reg_list_size = 26;
835 *reg_list = malloc(sizeof(struct reg*) * (*reg_list_size));
837 for (i = 0; i < 16; i++)
838 (*reg_list)[i] = arm_reg_current(armv4_5, i);
840 for (i = 16; i < 24; i++)
841 (*reg_list)[i] = &arm_gdb_dummy_fp_reg;
843 (*reg_list)[24] = &arm_gdb_dummy_fps_reg;
844 (*reg_list)[25] = armv4_5->cpsr;
846 return ERROR_OK;
849 /* wait for execution to complete and check exit point */
850 static int armv4_5_run_algorithm_completion(struct target *target, uint32_t exit_point, int timeout_ms, void *arch_info)
852 int retval;
853 struct arm *armv4_5 = target_to_armv4_5(target);
855 if ((retval = target_wait_state(target, TARGET_HALTED, timeout_ms)) != ERROR_OK)
857 return retval;
859 if (target->state != TARGET_HALTED)
861 if ((retval = target_halt(target)) != ERROR_OK)
862 return retval;
863 if ((retval = target_wait_state(target, TARGET_HALTED, 500)) != ERROR_OK)
865 return retval;
867 return ERROR_TARGET_TIMEOUT;
870 /* fast exit: ARMv5+ code can use BKPT */
871 if (exit_point && buf_get_u32(armv4_5->core_cache->reg_list[15].value,
872 0, 32) != exit_point)
874 LOG_WARNING("target reentered debug state, but not at the desired exit point: 0x%4.4" PRIx32 "",
875 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
876 return ERROR_TARGET_TIMEOUT;
879 return ERROR_OK;
882 int armv4_5_run_algorithm_inner(struct target *target,
883 int num_mem_params, struct mem_param *mem_params,
884 int num_reg_params, struct reg_param *reg_params,
885 uint32_t entry_point, uint32_t exit_point,
886 int timeout_ms, void *arch_info,
887 int (*run_it)(struct target *target, uint32_t exit_point,
888 int timeout_ms, void *arch_info))
890 struct arm *armv4_5 = target_to_armv4_5(target);
891 struct armv4_5_algorithm *armv4_5_algorithm_info = arch_info;
892 enum armv4_5_state core_state = armv4_5->core_state;
893 uint32_t context[17];
894 uint32_t cpsr;
895 int exit_breakpoint_size = 0;
896 int i;
897 int retval = ERROR_OK;
899 LOG_DEBUG("Running algorithm");
901 if (armv4_5_algorithm_info->common_magic != ARMV4_5_COMMON_MAGIC)
903 LOG_ERROR("current target isn't an ARMV4/5 target");
904 return ERROR_TARGET_INVALID;
907 if (target->state != TARGET_HALTED)
909 LOG_WARNING("target not halted");
910 return ERROR_TARGET_NOT_HALTED;
913 if (!is_arm_mode(armv4_5->core_mode))
914 return ERROR_FAIL;
916 /* armv5 and later can terminate with BKPT instruction; less overhead */
917 if (!exit_point && armv4_5->is_armv4)
919 LOG_ERROR("ARMv4 target needs HW breakpoint location");
920 return ERROR_FAIL;
923 /* save r0..pc, cpsr-or-spsr, and then cpsr-for-sure;
924 * they'll be restored later.
926 for (i = 0; i <= 16; i++)
928 struct reg *r;
930 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
931 armv4_5_algorithm_info->core_mode, i);
932 if (!r->valid)
933 armv4_5->read_core_reg(target, r, i,
934 armv4_5_algorithm_info->core_mode);
935 context[i] = buf_get_u32(r->value, 0, 32);
937 cpsr = buf_get_u32(armv4_5->cpsr->value, 0, 32);
939 for (i = 0; i < num_mem_params; i++)
941 if ((retval = target_write_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value)) != ERROR_OK)
943 return retval;
947 for (i = 0; i < num_reg_params; i++)
949 struct reg *reg = register_get_by_name(armv4_5->core_cache, reg_params[i].reg_name, 0);
950 if (!reg)
952 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
953 return ERROR_INVALID_ARGUMENTS;
956 if (reg->size != reg_params[i].size)
958 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
959 return ERROR_INVALID_ARGUMENTS;
962 if ((retval = armv4_5_set_core_reg(reg, reg_params[i].value)) != ERROR_OK)
964 return retval;
968 armv4_5->core_state = armv4_5_algorithm_info->core_state;
969 if (armv4_5->core_state == ARMV4_5_STATE_ARM)
970 exit_breakpoint_size = 4;
971 else if (armv4_5->core_state == ARMV4_5_STATE_THUMB)
972 exit_breakpoint_size = 2;
973 else
975 LOG_ERROR("BUG: can't execute algorithms when not in ARM or Thumb state");
976 return ERROR_INVALID_ARGUMENTS;
979 if (armv4_5_algorithm_info->core_mode != ARMV4_5_MODE_ANY)
981 LOG_DEBUG("setting core_mode: 0x%2.2x",
982 armv4_5_algorithm_info->core_mode);
983 buf_set_u32(armv4_5->cpsr->value, 0, 5,
984 armv4_5_algorithm_info->core_mode);
985 armv4_5->cpsr->dirty = 1;
986 armv4_5->cpsr->valid = 1;
989 /* terminate using a hardware or (ARMv5+) software breakpoint */
990 if (exit_point && (retval = breakpoint_add(target, exit_point,
991 exit_breakpoint_size, BKPT_HARD)) != ERROR_OK)
993 LOG_ERROR("can't add HW breakpoint to terminate algorithm");
994 return ERROR_TARGET_FAILURE;
997 if ((retval = target_resume(target, 0, entry_point, 1, 1)) != ERROR_OK)
999 return retval;
1001 int retvaltemp;
1002 retval = run_it(target, exit_point, timeout_ms, arch_info);
1004 if (exit_point)
1005 breakpoint_remove(target, exit_point);
1007 if (retval != ERROR_OK)
1008 return retval;
1010 for (i = 0; i < num_mem_params; i++)
1012 if (mem_params[i].direction != PARAM_OUT)
1013 if ((retvaltemp = target_read_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value)) != ERROR_OK)
1015 retval = retvaltemp;
1019 for (i = 0; i < num_reg_params; i++)
1021 if (reg_params[i].direction != PARAM_OUT)
1024 struct reg *reg = register_get_by_name(armv4_5->core_cache, reg_params[i].reg_name, 0);
1025 if (!reg)
1027 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
1028 retval = ERROR_INVALID_ARGUMENTS;
1029 continue;
1032 if (reg->size != reg_params[i].size)
1034 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
1035 retval = ERROR_INVALID_ARGUMENTS;
1036 continue;
1039 buf_set_u32(reg_params[i].value, 0, 32, buf_get_u32(reg->value, 0, 32));
1043 /* restore everything we saved before (17 or 18 registers) */
1044 for (i = 0; i <= 16; i++)
1046 uint32_t regvalue;
1047 regvalue = buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_algorithm_info->core_mode, i).value, 0, 32);
1048 if (regvalue != context[i])
1050 LOG_DEBUG("restoring register %s with value 0x%8.8" PRIx32 "", ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_algorithm_info->core_mode, i).name, context[i]);
1051 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_algorithm_info->core_mode, i).value, 0, 32, context[i]);
1052 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_algorithm_info->core_mode, i).valid = 1;
1053 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_algorithm_info->core_mode, i).dirty = 1;
1057 arm_set_cpsr(armv4_5, cpsr);
1058 armv4_5->cpsr->dirty = 1;
1060 armv4_5->core_state = core_state;
1062 return retval;
1065 int armv4_5_run_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, uint32_t entry_point, uint32_t exit_point, int timeout_ms, void *arch_info)
1067 return armv4_5_run_algorithm_inner(target, num_mem_params, mem_params, num_reg_params, reg_params, entry_point, exit_point, timeout_ms, arch_info, armv4_5_run_algorithm_completion);
1071 * Runs ARM code in the target to calculate a CRC32 checksum.
1073 * \todo On ARMv5+, rely on BKPT termination for reduced overhead.
1075 int arm_checksum_memory(struct target *target,
1076 uint32_t address, uint32_t count, uint32_t *checksum)
1078 struct working_area *crc_algorithm;
1079 struct armv4_5_algorithm armv4_5_info;
1080 struct reg_param reg_params[2];
1081 int retval;
1082 uint32_t i;
1084 static const uint32_t arm_crc_code[] = {
1085 0xE1A02000, /* mov r2, r0 */
1086 0xE3E00000, /* mov r0, #0xffffffff */
1087 0xE1A03001, /* mov r3, r1 */
1088 0xE3A04000, /* mov r4, #0 */
1089 0xEA00000B, /* b ncomp */
1090 /* nbyte: */
1091 0xE7D21004, /* ldrb r1, [r2, r4] */
1092 0xE59F7030, /* ldr r7, CRC32XOR */
1093 0xE0200C01, /* eor r0, r0, r1, asl 24 */
1094 0xE3A05000, /* mov r5, #0 */
1095 /* loop: */
1096 0xE3500000, /* cmp r0, #0 */
1097 0xE1A06080, /* mov r6, r0, asl #1 */
1098 0xE2855001, /* add r5, r5, #1 */
1099 0xE1A00006, /* mov r0, r6 */
1100 0xB0260007, /* eorlt r0, r6, r7 */
1101 0xE3550008, /* cmp r5, #8 */
1102 0x1AFFFFF8, /* bne loop */
1103 0xE2844001, /* add r4, r4, #1 */
1104 /* ncomp: */
1105 0xE1540003, /* cmp r4, r3 */
1106 0x1AFFFFF1, /* bne nbyte */
1107 /* end: */
1108 0xEAFFFFFE, /* b end */
1109 /* CRC32XOR: */
1110 0x04C11DB7 /* .word 0x04C11DB7 */
1113 retval = target_alloc_working_area(target,
1114 sizeof(arm_crc_code), &crc_algorithm);
1115 if (retval != ERROR_OK)
1116 return retval;
1118 /* convert code into a buffer in target endianness */
1119 for (i = 0; i < ARRAY_SIZE(arm_crc_code); i++) {
1120 retval = target_write_u32(target,
1121 crc_algorithm->address + i * sizeof(uint32_t),
1122 arm_crc_code[i]);
1123 if (retval != ERROR_OK)
1124 return retval;
1127 armv4_5_info.common_magic = ARMV4_5_COMMON_MAGIC;
1128 armv4_5_info.core_mode = ARMV4_5_MODE_SVC;
1129 armv4_5_info.core_state = ARMV4_5_STATE_ARM;
1131 init_reg_param(&reg_params[0], "r0", 32, PARAM_IN_OUT);
1132 init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
1134 buf_set_u32(reg_params[0].value, 0, 32, address);
1135 buf_set_u32(reg_params[1].value, 0, 32, count);
1137 /* 20 second timeout/megabyte */
1138 int timeout = 20000 * (1 + (count / (1024 * 1024)));
1140 retval = target_run_algorithm(target, 0, NULL, 2, reg_params,
1141 crc_algorithm->address,
1142 crc_algorithm->address + sizeof(arm_crc_code) - 8,
1143 timeout, &armv4_5_info);
1144 if (retval != ERROR_OK) {
1145 LOG_ERROR("error executing ARM crc algorithm");
1146 destroy_reg_param(&reg_params[0]);
1147 destroy_reg_param(&reg_params[1]);
1148 target_free_working_area(target, crc_algorithm);
1149 return retval;
1152 *checksum = buf_get_u32(reg_params[0].value, 0, 32);
1154 destroy_reg_param(&reg_params[0]);
1155 destroy_reg_param(&reg_params[1]);
1157 target_free_working_area(target, crc_algorithm);
1159 return ERROR_OK;
1163 * Runs ARM code in the target to check whether a memory block holds
1164 * all ones. NOR flash which has been erased, and thus may be written,
1165 * holds all ones.
1167 * \todo On ARMv5+, rely on BKPT termination for reduced overhead.
1169 int arm_blank_check_memory(struct target *target,
1170 uint32_t address, uint32_t count, uint32_t *blank)
1172 struct working_area *check_algorithm;
1173 struct reg_param reg_params[3];
1174 struct armv4_5_algorithm armv4_5_info;
1175 int retval;
1176 uint32_t i;
1178 static const uint32_t check_code[] = {
1179 /* loop: */
1180 0xe4d03001, /* ldrb r3, [r0], #1 */
1181 0xe0022003, /* and r2, r2, r3 */
1182 0xe2511001, /* subs r1, r1, #1 */
1183 0x1afffffb, /* bne loop */
1184 /* end: */
1185 0xeafffffe /* b end */
1188 /* make sure we have a working area */
1189 retval = target_alloc_working_area(target,
1190 sizeof(check_code), &check_algorithm);
1191 if (retval != ERROR_OK)
1192 return retval;
1194 /* convert code into a buffer in target endianness */
1195 for (i = 0; i < ARRAY_SIZE(check_code); i++) {
1196 retval = target_write_u32(target,
1197 check_algorithm->address
1198 + i * sizeof(uint32_t),
1199 check_code[i]);
1200 if (retval != ERROR_OK)
1201 return retval;
1204 armv4_5_info.common_magic = ARMV4_5_COMMON_MAGIC;
1205 armv4_5_info.core_mode = ARMV4_5_MODE_SVC;
1206 armv4_5_info.core_state = ARMV4_5_STATE_ARM;
1208 init_reg_param(&reg_params[0], "r0", 32, PARAM_OUT);
1209 buf_set_u32(reg_params[0].value, 0, 32, address);
1211 init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
1212 buf_set_u32(reg_params[1].value, 0, 32, count);
1214 init_reg_param(&reg_params[2], "r2", 32, PARAM_IN_OUT);
1215 buf_set_u32(reg_params[2].value, 0, 32, 0xff);
1217 retval = target_run_algorithm(target, 0, NULL, 3, reg_params,
1218 check_algorithm->address,
1219 check_algorithm->address + sizeof(check_code) - 4,
1220 10000, &armv4_5_info);
1221 if (retval != ERROR_OK) {
1222 destroy_reg_param(&reg_params[0]);
1223 destroy_reg_param(&reg_params[1]);
1224 destroy_reg_param(&reg_params[2]);
1225 target_free_working_area(target, check_algorithm);
1226 return retval;
1229 *blank = buf_get_u32(reg_params[2].value, 0, 32);
1231 destroy_reg_param(&reg_params[0]);
1232 destroy_reg_param(&reg_params[1]);
1233 destroy_reg_param(&reg_params[2]);
1235 target_free_working_area(target, check_algorithm);
1237 return ERROR_OK;
1240 static int arm_full_context(struct target *target)
1242 struct arm *armv4_5 = target_to_armv4_5(target);
1243 unsigned num_regs = armv4_5->core_cache->num_regs;
1244 struct reg *reg = armv4_5->core_cache->reg_list;
1245 int retval = ERROR_OK;
1247 for (; num_regs && retval == ERROR_OK; num_regs--, reg++) {
1248 if (reg->valid)
1249 continue;
1250 retval = armv4_5_get_core_reg(reg);
1252 return retval;
1255 int armv4_5_init_arch_info(struct target *target, struct arm *armv4_5)
1257 target->arch_info = armv4_5;
1258 armv4_5->target = target;
1260 armv4_5->common_magic = ARMV4_5_COMMON_MAGIC;
1261 arm_set_cpsr(armv4_5, ARMV4_5_MODE_USR);
1263 /* core_type may be overridden by subtype logic */
1264 armv4_5->core_type = ARMV4_5_MODE_ANY;
1266 /* default full_context() has no core-specific optimizations */
1267 if (!armv4_5->full_context && armv4_5->read_core_reg)
1268 armv4_5->full_context = arm_full_context;
1270 return ERROR_OK;