armv4_5: prevent segfault when gdb connects to an underinitialised target
[openocd.git] / src / target / armv4_5.c
blob91830f57c90472931909bad1d4a4cfb735c1357e
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2008 by Spencer Oliver *
6 * spen@spen-soft.co.uk *
7 * *
8 * Copyright (C) 2008 by Oyvind Harboe *
9 * oyvind.harboe@zylin.com *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
25 ***************************************************************************/
27 #ifdef HAVE_CONFIG_H
28 #include "config.h"
29 #endif
31 #include "arm.h"
32 #include "armv4_5.h"
33 #include "arm_jtag.h"
34 #include "breakpoints.h"
35 #include "arm_disassembler.h"
36 #include <helper/binarybuffer.h>
37 #include "algorithm.h"
38 #include "register.h"
40 /* offsets into armv4_5 core register cache */
41 enum {
42 /* ARMV4_5_CPSR = 31, */
43 ARMV4_5_SPSR_FIQ = 32,
44 ARMV4_5_SPSR_IRQ = 33,
45 ARMV4_5_SPSR_SVC = 34,
46 ARMV4_5_SPSR_ABT = 35,
47 ARMV4_5_SPSR_UND = 36,
48 ARM_SPSR_MON = 39,
51 static const uint8_t arm_usr_indices[17] = {
52 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, ARMV4_5_CPSR,
55 static const uint8_t arm_fiq_indices[8] = {
56 16, 17, 18, 19, 20, 21, 22, ARMV4_5_SPSR_FIQ,
59 static const uint8_t arm_irq_indices[3] = {
60 23, 24, ARMV4_5_SPSR_IRQ,
63 static const uint8_t arm_svc_indices[3] = {
64 25, 26, ARMV4_5_SPSR_SVC,
67 static const uint8_t arm_abt_indices[3] = {
68 27, 28, ARMV4_5_SPSR_ABT,
71 static const uint8_t arm_und_indices[3] = {
72 29, 30, ARMV4_5_SPSR_UND,
75 static const uint8_t arm_mon_indices[3] = {
76 37, 38, ARM_SPSR_MON,
79 static const struct {
80 const char *name;
81 unsigned short psr;
82 /* For user and system modes, these list indices for all registers.
83 * otherwise they're just indices for the shadow registers and SPSR.
85 unsigned short n_indices;
86 const uint8_t *indices;
87 } arm_mode_data[] = {
88 /* Seven modes are standard from ARM7 on. "System" and "User" share
89 * the same registers; other modes shadow from 3 to 8 registers.
92 .name = "User",
93 .psr = ARM_MODE_USR,
94 .n_indices = ARRAY_SIZE(arm_usr_indices),
95 .indices = arm_usr_indices,
98 .name = "FIQ",
99 .psr = ARM_MODE_FIQ,
100 .n_indices = ARRAY_SIZE(arm_fiq_indices),
101 .indices = arm_fiq_indices,
104 .name = "Supervisor",
105 .psr = ARM_MODE_SVC,
106 .n_indices = ARRAY_SIZE(arm_svc_indices),
107 .indices = arm_svc_indices,
110 .name = "Abort",
111 .psr = ARM_MODE_ABT,
112 .n_indices = ARRAY_SIZE(arm_abt_indices),
113 .indices = arm_abt_indices,
116 .name = "IRQ",
117 .psr = ARM_MODE_IRQ,
118 .n_indices = ARRAY_SIZE(arm_irq_indices),
119 .indices = arm_irq_indices,
122 .name = "Undefined instruction",
123 .psr = ARM_MODE_UND,
124 .n_indices = ARRAY_SIZE(arm_und_indices),
125 .indices = arm_und_indices,
128 .name = "System",
129 .psr = ARM_MODE_SYS,
130 .n_indices = ARRAY_SIZE(arm_usr_indices),
131 .indices = arm_usr_indices,
133 /* TrustZone "Security Extensions" add a secure monitor mode.
134 * This is distinct from a "debug monitor" which can support
135 * non-halting debug, in conjunction with some debuggers.
138 .name = "Secure Monitor",
139 .psr = ARM_MODE_MON,
140 .n_indices = ARRAY_SIZE(arm_mon_indices),
141 .indices = arm_mon_indices,
144 /* These special modes are currently only supported
145 * by ARMv6M and ARMv7M profiles */
147 .name = "Thread",
148 .psr = ARM_MODE_THREAD,
151 .name = "Thread (User)",
152 .psr = ARM_MODE_USER_THREAD,
155 .name = "Handler",
156 .psr = ARM_MODE_HANDLER,
160 /** Map PSR mode bits to the name of an ARM processor operating mode. */
161 const char *arm_mode_name(unsigned psr_mode)
163 for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
164 if (arm_mode_data[i].psr == psr_mode)
165 return arm_mode_data[i].name;
167 LOG_ERROR("unrecognized psr mode: %#02x", psr_mode);
168 return "UNRECOGNIZED";
171 /** Return true iff the parameter denotes a valid ARM processor mode. */
172 bool is_arm_mode(unsigned psr_mode)
174 for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
175 if (arm_mode_data[i].psr == psr_mode)
176 return true;
178 return false;
181 /** Map PSR mode bits to linear number indexing armv4_5_core_reg_map */
182 int arm_mode_to_number(enum arm_mode mode)
184 switch (mode) {
185 case ARM_MODE_ANY:
186 /* map MODE_ANY to user mode */
187 case ARM_MODE_USR:
188 return 0;
189 case ARM_MODE_FIQ:
190 return 1;
191 case ARM_MODE_IRQ:
192 return 2;
193 case ARM_MODE_SVC:
194 return 3;
195 case ARM_MODE_ABT:
196 return 4;
197 case ARM_MODE_UND:
198 return 5;
199 case ARM_MODE_SYS:
200 return 6;
201 case ARM_MODE_MON:
202 return 7;
203 default:
204 LOG_ERROR("invalid mode value encountered %d", mode);
205 return -1;
209 /** Map linear number indexing armv4_5_core_reg_map to PSR mode bits. */
210 enum arm_mode armv4_5_number_to_mode(int number)
212 switch (number) {
213 case 0:
214 return ARM_MODE_USR;
215 case 1:
216 return ARM_MODE_FIQ;
217 case 2:
218 return ARM_MODE_IRQ;
219 case 3:
220 return ARM_MODE_SVC;
221 case 4:
222 return ARM_MODE_ABT;
223 case 5:
224 return ARM_MODE_UND;
225 case 6:
226 return ARM_MODE_SYS;
227 case 7:
228 return ARM_MODE_MON;
229 default:
230 LOG_ERROR("mode index out of bounds %d", number);
231 return ARM_MODE_ANY;
235 static const char *arm_state_strings[] = {
236 "ARM", "Thumb", "Jazelle", "ThumbEE",
239 /* Templates for ARM core registers.
241 * NOTE: offsets in this table are coupled to the arm_mode_data
242 * table above, the armv4_5_core_reg_map array below, and also to
243 * the ARMV4_5_CPSR symbol (which should vanish after ARM11 updates).
245 static const struct {
246 /* The name is used for e.g. the "regs" command. */
247 const char *name;
249 /* The {cookie, mode} tuple uniquely identifies one register.
250 * In a given mode, cookies 0..15 map to registers R0..R15,
251 * with R13..R15 usually called SP, LR, PC.
253 * MODE_ANY is used as *input* to the mapping, and indicates
254 * various special cases (sigh) and errors.
256 * Cookie 16 is (currently) confusing, since it indicates
257 * CPSR -or- SPSR depending on whether 'mode' is MODE_ANY.
258 * (Exception modes have both CPSR and SPSR registers ...)
260 unsigned cookie;
261 enum arm_mode mode;
262 } arm_core_regs[] = {
263 /* IMPORTANT: we guarantee that the first eight cached registers
264 * correspond to r0..r7, and the fifteenth to PC, so that callers
265 * don't need to map them.
267 { .name = "r0", .cookie = 0, .mode = ARM_MODE_ANY, },
268 { .name = "r1", .cookie = 1, .mode = ARM_MODE_ANY, },
269 { .name = "r2", .cookie = 2, .mode = ARM_MODE_ANY, },
270 { .name = "r3", .cookie = 3, .mode = ARM_MODE_ANY, },
271 { .name = "r4", .cookie = 4, .mode = ARM_MODE_ANY, },
272 { .name = "r5", .cookie = 5, .mode = ARM_MODE_ANY, },
273 { .name = "r6", .cookie = 6, .mode = ARM_MODE_ANY, },
274 { .name = "r7", .cookie = 7, .mode = ARM_MODE_ANY, },
276 /* NOTE: regs 8..12 might be shadowed by FIQ ... flagging
277 * them as MODE_ANY creates special cases. (ANY means
278 * "not mapped" elsewhere; here it's "everything but FIQ".)
280 { .name = "r8", .cookie = 8, .mode = ARM_MODE_ANY, },
281 { .name = "r9", .cookie = 9, .mode = ARM_MODE_ANY, },
282 { .name = "r10", .cookie = 10, .mode = ARM_MODE_ANY, },
283 { .name = "r11", .cookie = 11, .mode = ARM_MODE_ANY, },
284 { .name = "r12", .cookie = 12, .mode = ARM_MODE_ANY, },
286 /* NOTE all MODE_USR registers are equivalent to MODE_SYS ones */
287 { .name = "sp_usr", .cookie = 13, .mode = ARM_MODE_USR, },
288 { .name = "lr_usr", .cookie = 14, .mode = ARM_MODE_USR, },
290 /* guaranteed to be at index 15 */
291 { .name = "pc", .cookie = 15, .mode = ARM_MODE_ANY, },
293 { .name = "r8_fiq", .cookie = 8, .mode = ARM_MODE_FIQ, },
294 { .name = "r9_fiq", .cookie = 9, .mode = ARM_MODE_FIQ, },
295 { .name = "r10_fiq", .cookie = 10, .mode = ARM_MODE_FIQ, },
296 { .name = "r11_fiq", .cookie = 11, .mode = ARM_MODE_FIQ, },
297 { .name = "r12_fiq", .cookie = 12, .mode = ARM_MODE_FIQ, },
299 { .name = "sp_fiq", .cookie = 13, .mode = ARM_MODE_FIQ, },
300 { .name = "lr_fiq", .cookie = 14, .mode = ARM_MODE_FIQ, },
302 { .name = "sp_irq", .cookie = 13, .mode = ARM_MODE_IRQ, },
303 { .name = "lr_irq", .cookie = 14, .mode = ARM_MODE_IRQ, },
305 { .name = "sp_svc", .cookie = 13, .mode = ARM_MODE_SVC, },
306 { .name = "lr_svc", .cookie = 14, .mode = ARM_MODE_SVC, },
308 { .name = "sp_abt", .cookie = 13, .mode = ARM_MODE_ABT, },
309 { .name = "lr_abt", .cookie = 14, .mode = ARM_MODE_ABT, },
311 { .name = "sp_und", .cookie = 13, .mode = ARM_MODE_UND, },
312 { .name = "lr_und", .cookie = 14, .mode = ARM_MODE_UND, },
314 { .name = "cpsr", .cookie = 16, .mode = ARM_MODE_ANY, },
315 { .name = "spsr_fiq", .cookie = 16, .mode = ARM_MODE_FIQ, },
316 { .name = "spsr_irq", .cookie = 16, .mode = ARM_MODE_IRQ, },
317 { .name = "spsr_svc", .cookie = 16, .mode = ARM_MODE_SVC, },
318 { .name = "spsr_abt", .cookie = 16, .mode = ARM_MODE_ABT, },
319 { .name = "spsr_und", .cookie = 16, .mode = ARM_MODE_UND, },
321 { .name = "sp_mon", .cookie = 13, .mode = ARM_MODE_MON, },
322 { .name = "lr_mon", .cookie = 14, .mode = ARM_MODE_MON, },
323 { .name = "spsr_mon", .cookie = 16, .mode = ARM_MODE_MON, },
326 /* map core mode (USR, FIQ, ...) and register number to
327 * indices into the register cache
329 const int armv4_5_core_reg_map[8][17] = {
330 { /* USR */
331 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
333 { /* FIQ (8 shadows of USR, vs normal 3) */
334 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 15, 32
336 { /* IRQ */
337 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 23, 24, 15, 33
339 { /* SVC */
340 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 25, 26, 15, 34
342 { /* ABT */
343 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 27, 28, 15, 35
345 { /* UND */
346 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 29, 30, 15, 36
348 { /* SYS (same registers as USR) */
349 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
351 { /* MON */
352 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 37, 38, 15, 39,
357 * Configures host-side ARM records to reflect the specified CPSR.
358 * Later, code can use arm_reg_current() to map register numbers
359 * according to how they are exposed by this mode.
361 void arm_set_cpsr(struct arm *arm, uint32_t cpsr)
363 enum arm_mode mode = cpsr & 0x1f;
364 int num;
366 /* NOTE: this may be called very early, before the register
367 * cache is set up. We can't defend against many errors, in
368 * particular against CPSRs that aren't valid *here* ...
370 if (arm->cpsr) {
371 buf_set_u32(arm->cpsr->value, 0, 32, cpsr);
372 arm->cpsr->valid = 1;
373 arm->cpsr->dirty = 0;
376 arm->core_mode = mode;
378 /* mode_to_number() warned; set up a somewhat-sane mapping */
379 num = arm_mode_to_number(mode);
380 if (num < 0) {
381 mode = ARM_MODE_USR;
382 num = 0;
385 arm->map = &armv4_5_core_reg_map[num][0];
386 arm->spsr = (mode == ARM_MODE_USR || mode == ARM_MODE_SYS)
387 ? NULL
388 : arm->core_cache->reg_list + arm->map[16];
390 /* Older ARMs won't have the J bit */
391 enum arm_state state;
393 if (cpsr & (1 << 5)) { /* T */
394 if (cpsr & (1 << 24)) { /* J */
395 LOG_WARNING("ThumbEE -- incomplete support");
396 state = ARM_STATE_THUMB_EE;
397 } else
398 state = ARM_STATE_THUMB;
399 } else {
400 if (cpsr & (1 << 24)) { /* J */
401 LOG_ERROR("Jazelle state handling is BROKEN!");
402 state = ARM_STATE_JAZELLE;
403 } else
404 state = ARM_STATE_ARM;
406 arm->core_state = state;
408 LOG_DEBUG("set CPSR %#8.8x: %s mode, %s state", (unsigned) cpsr,
409 arm_mode_name(mode),
410 arm_state_strings[arm->core_state]);
414 * Returns handle to the register currently mapped to a given number.
415 * Someone must have called arm_set_cpsr() before.
417 * \param arm This core's state and registers are used.
418 * \param regnum From 0..15 corresponding to R0..R14 and PC.
419 * Note that R0..R7 don't require mapping; you may access those
420 * as the first eight entries in the register cache. Likewise
421 * R15 (PC) doesn't need mapping; you may also access it directly.
422 * However, R8..R14, and SPSR (arm->spsr) *must* be mapped.
423 * CPSR (arm->cpsr) is also not mapped.
425 struct reg *arm_reg_current(struct arm *arm, unsigned regnum)
427 struct reg *r;
429 if (regnum > 16)
430 return NULL;
432 if (!arm->map) {
433 LOG_ERROR("Register map is not available yet, the target is not fully initialised");
434 r = arm->core_cache->reg_list + regnum;
435 } else
436 r = arm->core_cache->reg_list + arm->map[regnum];
438 /* e.g. invalid CPSR said "secure monitor" mode on a core
439 * that doesn't support it...
441 if (!r) {
442 LOG_ERROR("Invalid CPSR mode");
443 r = arm->core_cache->reg_list + regnum;
446 return r;
449 static const uint8_t arm_gdb_dummy_fp_value[12];
452 * Dummy FPA registers are required to support GDB on ARM.
453 * Register packets require eight obsolete FPA register values.
454 * Modern ARM cores use Vector Floating Point (VFP), if they
455 * have any floating point support. VFP is not FPA-compatible.
457 struct reg arm_gdb_dummy_fp_reg = {
458 .name = "GDB dummy FPA register",
459 .value = (uint8_t *) arm_gdb_dummy_fp_value,
460 .valid = 1,
461 .size = 96,
464 static const uint8_t arm_gdb_dummy_fps_value[4];
467 * Dummy FPA status registers are required to support GDB on ARM.
468 * Register packets require an obsolete FPA status register.
470 struct reg arm_gdb_dummy_fps_reg = {
471 .name = "GDB dummy FPA status register",
472 .value = (uint8_t *) arm_gdb_dummy_fps_value,
473 .valid = 1,
474 .size = 32,
477 static void arm_gdb_dummy_init(void) __attribute__ ((constructor));
479 static void arm_gdb_dummy_init(void)
481 register_init_dummy(&arm_gdb_dummy_fp_reg);
482 register_init_dummy(&arm_gdb_dummy_fps_reg);
485 static int armv4_5_get_core_reg(struct reg *reg)
487 int retval;
488 struct arm_reg *reg_arch_info = reg->arch_info;
489 struct target *target = reg_arch_info->target;
491 if (target->state != TARGET_HALTED) {
492 LOG_ERROR("Target not halted");
493 return ERROR_TARGET_NOT_HALTED;
496 retval = reg_arch_info->arm->read_core_reg(target, reg,
497 reg_arch_info->num, reg_arch_info->mode);
498 if (retval == ERROR_OK) {
499 reg->valid = 1;
500 reg->dirty = 0;
503 return retval;
506 static int armv4_5_set_core_reg(struct reg *reg, uint8_t *buf)
508 struct arm_reg *reg_arch_info = reg->arch_info;
509 struct target *target = reg_arch_info->target;
510 struct arm *armv4_5_target = target_to_arm(target);
511 uint32_t value = buf_get_u32(buf, 0, 32);
513 if (target->state != TARGET_HALTED) {
514 LOG_ERROR("Target not halted");
515 return ERROR_TARGET_NOT_HALTED;
518 /* Except for CPSR, the "reg" command exposes a writeback model
519 * for the register cache.
521 if (reg == armv4_5_target->cpsr) {
522 arm_set_cpsr(armv4_5_target, value);
524 /* Older cores need help to be in ARM mode during halt
525 * mode debug, so we clear the J and T bits if we flush.
526 * For newer cores (v6/v7a/v7r) we don't need that, but
527 * it won't hurt since CPSR is always flushed anyway.
529 if (armv4_5_target->core_mode !=
530 (enum arm_mode)(value & 0x1f)) {
531 LOG_DEBUG("changing ARM core mode to '%s'",
532 arm_mode_name(value & 0x1f));
533 value &= ~((1 << 24) | (1 << 5));
534 armv4_5_target->write_core_reg(target, reg,
535 16, ARM_MODE_ANY, value);
537 } else {
538 buf_set_u32(reg->value, 0, 32, value);
539 reg->valid = 1;
541 reg->dirty = 1;
543 return ERROR_OK;
546 static const struct reg_arch_type arm_reg_type = {
547 .get = armv4_5_get_core_reg,
548 .set = armv4_5_set_core_reg,
551 struct reg_cache *arm_build_reg_cache(struct target *target, struct arm *arm)
553 int num_regs = ARRAY_SIZE(arm_core_regs);
554 struct reg_cache *cache = malloc(sizeof(struct reg_cache));
555 struct reg *reg_list = calloc(num_regs, sizeof(struct reg));
556 struct arm_reg *reg_arch_info = calloc(num_regs, sizeof(struct arm_reg));
557 int i;
559 if (!cache || !reg_list || !reg_arch_info) {
560 free(cache);
561 free(reg_list);
562 free(reg_arch_info);
563 return NULL;
566 cache->name = "ARM registers";
567 cache->next = NULL;
568 cache->reg_list = reg_list;
569 cache->num_regs = 0;
571 for (i = 0; i < num_regs; i++) {
572 /* Skip registers this core doesn't expose */
573 if (arm_core_regs[i].mode == ARM_MODE_MON
574 && arm->core_type != ARM_MODE_MON)
575 continue;
577 /* REVISIT handle Cortex-M, which only shadows R13/SP */
579 reg_arch_info[i].num = arm_core_regs[i].cookie;
580 reg_arch_info[i].mode = arm_core_regs[i].mode;
581 reg_arch_info[i].target = target;
582 reg_arch_info[i].arm = arm;
584 reg_list[i].name = (char *) arm_core_regs[i].name;
585 reg_list[i].size = 32;
586 reg_list[i].value = &reg_arch_info[i].value;
587 reg_list[i].type = &arm_reg_type;
588 reg_list[i].arch_info = &reg_arch_info[i];
590 cache->num_regs++;
593 arm->pc = reg_list + 15;
594 arm->cpsr = reg_list + ARMV4_5_CPSR;
595 arm->core_cache = cache;
596 return cache;
599 int arm_arch_state(struct target *target)
601 struct arm *arm = target_to_arm(target);
603 if (arm->common_magic != ARM_COMMON_MAGIC) {
604 LOG_ERROR("BUG: called for a non-ARM target");
605 return ERROR_FAIL;
608 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
609 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "%s",
610 arm_state_strings[arm->core_state],
611 debug_reason_name(target),
612 arm_mode_name(arm->core_mode),
613 buf_get_u32(arm->cpsr->value, 0, 32),
614 buf_get_u32(arm->pc->value, 0, 32),
615 arm->is_semihosting ? ", semihosting" : "");
617 return ERROR_OK;
620 #define ARMV4_5_CORE_REG_MODENUM(cache, mode, num) \
621 (cache->reg_list[armv4_5_core_reg_map[mode][num]])
623 COMMAND_HANDLER(handle_armv4_5_reg_command)
625 struct target *target = get_current_target(CMD_CTX);
626 struct arm *arm = target_to_arm(target);
627 struct reg *regs;
629 if (!is_arm(arm)) {
630 command_print(CMD_CTX, "current target isn't an ARM");
631 return ERROR_FAIL;
634 if (target->state != TARGET_HALTED) {
635 command_print(CMD_CTX, "error: target must be halted for register accesses");
636 return ERROR_FAIL;
639 if (arm->core_type != ARM_MODE_ANY) {
640 command_print(CMD_CTX,
641 "Microcontroller Profile not supported - use standard reg cmd");
642 return ERROR_OK;
645 if (!is_arm_mode(arm->core_mode)) {
646 LOG_ERROR("not a valid arm core mode - communication failure?");
647 return ERROR_FAIL;
650 if (!arm->full_context) {
651 command_print(CMD_CTX, "error: target doesn't support %s",
652 CMD_NAME);
653 return ERROR_FAIL;
656 regs = arm->core_cache->reg_list;
658 for (unsigned mode = 0; mode < ARRAY_SIZE(arm_mode_data); mode++) {
659 const char *name;
660 char *sep = "\n";
661 char *shadow = "";
663 /* label this bank of registers (or shadows) */
664 switch (arm_mode_data[mode].psr) {
665 case ARM_MODE_SYS:
666 continue;
667 case ARM_MODE_USR:
668 name = "System and User";
669 sep = "";
670 break;
671 case ARM_MODE_MON:
672 if (arm->core_type != ARM_MODE_MON)
673 continue;
674 /* FALLTHROUGH */
675 default:
676 name = arm_mode_data[mode].name;
677 shadow = "shadow ";
678 break;
680 command_print(CMD_CTX, "%s%s mode %sregisters",
681 sep, name, shadow);
683 /* display N rows of up to 4 registers each */
684 for (unsigned i = 0; i < arm_mode_data[mode].n_indices; ) {
685 char output[80];
686 int output_len = 0;
688 for (unsigned j = 0; j < 4; j++, i++) {
689 uint32_t value;
690 struct reg *reg = regs;
692 if (i >= arm_mode_data[mode].n_indices)
693 break;
695 reg += arm_mode_data[mode].indices[i];
697 /* REVISIT be smarter about faults... */
698 if (!reg->valid)
699 arm->full_context(target);
701 value = buf_get_u32(reg->value, 0, 32);
702 output_len += snprintf(output + output_len,
703 sizeof(output) - output_len,
704 "%8s: %8.8" PRIx32 " ",
705 reg->name, value);
707 command_print(CMD_CTX, "%s", output);
711 return ERROR_OK;
714 COMMAND_HANDLER(handle_armv4_5_core_state_command)
716 struct target *target = get_current_target(CMD_CTX);
717 struct arm *arm = target_to_arm(target);
719 if (!is_arm(arm)) {
720 command_print(CMD_CTX, "current target isn't an ARM");
721 return ERROR_FAIL;
724 if (arm->core_type == ARM_MODE_THREAD) {
725 /* armv7m not supported */
726 command_print(CMD_CTX, "Unsupported Command");
727 return ERROR_OK;
730 if (CMD_ARGC > 0) {
731 if (strcmp(CMD_ARGV[0], "arm") == 0)
732 arm->core_state = ARM_STATE_ARM;
733 if (strcmp(CMD_ARGV[0], "thumb") == 0)
734 arm->core_state = ARM_STATE_THUMB;
737 command_print(CMD_CTX, "core state: %s", arm_state_strings[arm->core_state]);
739 return ERROR_OK;
742 COMMAND_HANDLER(handle_arm_disassemble_command)
744 int retval = ERROR_OK;
745 struct target *target = get_current_target(CMD_CTX);
747 if (target == NULL) {
748 LOG_ERROR("No target selected");
749 return ERROR_FAIL;
752 struct arm *arm = target_to_arm(target);
753 uint32_t address;
754 int count = 1;
755 int thumb = 0;
757 if (!is_arm(arm)) {
758 command_print(CMD_CTX, "current target isn't an ARM");
759 return ERROR_FAIL;
762 if (arm->core_type == ARM_MODE_THREAD) {
763 /* armv7m is always thumb mode */
764 thumb = 1;
767 switch (CMD_ARGC) {
768 case 3:
769 if (strcmp(CMD_ARGV[2], "thumb") != 0)
770 goto usage;
771 thumb = 1;
772 /* FALL THROUGH */
773 case 2:
774 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
775 /* FALL THROUGH */
776 case 1:
777 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
778 if (address & 0x01) {
779 if (!thumb) {
780 command_print(CMD_CTX, "Disassemble as Thumb");
781 thumb = 1;
783 address &= ~1;
785 break;
786 default:
787 usage:
788 count = 0;
789 retval = ERROR_COMMAND_SYNTAX_ERROR;
792 while (count-- > 0) {
793 struct arm_instruction cur_instruction;
795 if (thumb) {
796 /* Always use Thumb2 disassembly for best handling
797 * of 32-bit BL/BLX, and to work with newer cores
798 * (some ARMv6, all ARMv7) that use Thumb2.
800 retval = thumb2_opcode(target, address,
801 &cur_instruction);
802 if (retval != ERROR_OK)
803 break;
804 } else {
805 uint32_t opcode;
807 retval = target_read_u32(target, address, &opcode);
808 if (retval != ERROR_OK)
809 break;
810 retval = arm_evaluate_opcode(opcode, address,
811 &cur_instruction) != ERROR_OK;
812 if (retval != ERROR_OK)
813 break;
815 command_print(CMD_CTX, "%s", cur_instruction.text);
816 address += cur_instruction.instruction_size;
819 return retval;
822 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
824 struct command_context *context;
825 struct target *target;
826 struct arm *arm;
827 int retval;
829 context = current_command_context(interp);
830 assert(context != NULL);
832 target = get_current_target(context);
833 if (target == NULL) {
834 LOG_ERROR("%s: no current target", __func__);
835 return JIM_ERR;
837 if (!target_was_examined(target)) {
838 LOG_ERROR("%s: not yet examined", target_name(target));
839 return JIM_ERR;
841 arm = target_to_arm(target);
842 if (!is_arm(arm)) {
843 LOG_ERROR("%s: not an ARM", target_name(target));
844 return JIM_ERR;
847 if ((argc < 6) || (argc > 7)) {
848 /* FIXME use the command name to verify # params... */
849 LOG_ERROR("%s: wrong number of arguments", __func__);
850 return JIM_ERR;
853 int cpnum;
854 uint32_t op1;
855 uint32_t op2;
856 uint32_t CRn;
857 uint32_t CRm;
858 uint32_t value;
859 long l;
861 /* NOTE: parameter sequence matches ARM instruction set usage:
862 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
863 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
864 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
866 retval = Jim_GetLong(interp, argv[1], &l);
867 if (retval != JIM_OK)
868 return retval;
869 if (l & ~0xf) {
870 LOG_ERROR("%s: %s %d out of range", __func__,
871 "coprocessor", (int) l);
872 return JIM_ERR;
874 cpnum = l;
876 retval = Jim_GetLong(interp, argv[2], &l);
877 if (retval != JIM_OK)
878 return retval;
879 if (l & ~0x7) {
880 LOG_ERROR("%s: %s %d out of range", __func__,
881 "op1", (int) l);
882 return JIM_ERR;
884 op1 = l;
886 retval = Jim_GetLong(interp, argv[3], &l);
887 if (retval != JIM_OK)
888 return retval;
889 if (l & ~0xf) {
890 LOG_ERROR("%s: %s %d out of range", __func__,
891 "CRn", (int) l);
892 return JIM_ERR;
894 CRn = l;
896 retval = Jim_GetLong(interp, argv[4], &l);
897 if (retval != JIM_OK)
898 return retval;
899 if (l & ~0xf) {
900 LOG_ERROR("%s: %s %d out of range", __func__,
901 "CRm", (int) l);
902 return JIM_ERR;
904 CRm = l;
906 retval = Jim_GetLong(interp, argv[5], &l);
907 if (retval != JIM_OK)
908 return retval;
909 if (l & ~0x7) {
910 LOG_ERROR("%s: %s %d out of range", __func__,
911 "op2", (int) l);
912 return JIM_ERR;
914 op2 = l;
916 value = 0;
918 /* FIXME don't assume "mrc" vs "mcr" from the number of params;
919 * that could easily be a typo! Check both...
921 * FIXME change the call syntax here ... simplest to just pass
922 * the MRC() or MCR() instruction to be executed. That will also
923 * let us support the "mrc2" and "mcr2" opcodes (toggling one bit)
924 * if that's ever needed.
926 if (argc == 7) {
927 retval = Jim_GetLong(interp, argv[6], &l);
928 if (retval != JIM_OK)
929 return retval;
930 value = l;
932 /* NOTE: parameters reordered! */
933 /* ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2) */
934 retval = arm->mcr(target, cpnum, op1, op2, CRn, CRm, value);
935 if (retval != ERROR_OK)
936 return JIM_ERR;
937 } else {
938 /* NOTE: parameters reordered! */
939 /* ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2) */
940 retval = arm->mrc(target, cpnum, op1, op2, CRn, CRm, &value);
941 if (retval != ERROR_OK)
942 return JIM_ERR;
944 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
947 return JIM_OK;
950 COMMAND_HANDLER(handle_arm_semihosting_command)
952 struct target *target = get_current_target(CMD_CTX);
954 if (target == NULL) {
955 LOG_ERROR("No target selected");
956 return ERROR_FAIL;
959 struct arm *arm = target_to_arm(target);
961 if (!is_arm(arm)) {
962 command_print(CMD_CTX, "current target isn't an ARM");
963 return ERROR_FAIL;
966 if (!arm->setup_semihosting) {
967 command_print(CMD_CTX, "semihosting not supported for current target");
968 return ERROR_FAIL;
971 if (CMD_ARGC > 0) {
972 int semihosting;
974 COMMAND_PARSE_ENABLE(CMD_ARGV[0], semihosting);
976 if (!target_was_examined(target)) {
977 LOG_ERROR("Target not examined yet");
978 return ERROR_FAIL;
981 if (arm->setup_semihosting(target, semihosting) != ERROR_OK) {
982 LOG_ERROR("Failed to Configure semihosting");
983 return ERROR_FAIL;
986 /* FIXME never let that "catch" be dropped! */
987 arm->is_semihosting = semihosting;
990 command_print(CMD_CTX, "semihosting is %s",
991 arm->is_semihosting
992 ? "enabled" : "disabled");
994 return ERROR_OK;
997 static const struct command_registration arm_exec_command_handlers[] = {
999 .name = "reg",
1000 .handler = handle_armv4_5_reg_command,
1001 .mode = COMMAND_EXEC,
1002 .help = "display ARM core registers",
1003 .usage = "",
1006 .name = "core_state",
1007 .handler = handle_armv4_5_core_state_command,
1008 .mode = COMMAND_EXEC,
1009 .usage = "['arm'|'thumb']",
1010 .help = "display/change ARM core state",
1013 .name = "disassemble",
1014 .handler = handle_arm_disassemble_command,
1015 .mode = COMMAND_EXEC,
1016 .usage = "address [count ['thumb']]",
1017 .help = "disassemble instructions ",
1020 .name = "mcr",
1021 .mode = COMMAND_EXEC,
1022 .jim_handler = &jim_mcrmrc,
1023 .help = "write coprocessor register",
1024 .usage = "cpnum op1 CRn CRm op2 value",
1027 .name = "mrc",
1028 .jim_handler = &jim_mcrmrc,
1029 .help = "read coprocessor register",
1030 .usage = "cpnum op1 CRn CRm op2",
1033 "semihosting",
1034 .handler = handle_arm_semihosting_command,
1035 .mode = COMMAND_EXEC,
1036 .usage = "['enable'|'disable']",
1037 .help = "activate support for semihosting operations",
1040 COMMAND_REGISTRATION_DONE
1042 const struct command_registration arm_command_handlers[] = {
1044 .name = "arm",
1045 .mode = COMMAND_ANY,
1046 .help = "ARM command group",
1047 .usage = "",
1048 .chain = arm_exec_command_handlers,
1050 COMMAND_REGISTRATION_DONE
1053 int arm_get_gdb_reg_list(struct target *target,
1054 struct reg **reg_list[], int *reg_list_size)
1056 struct arm *arm = target_to_arm(target);
1057 int i;
1059 if (!is_arm_mode(arm->core_mode)) {
1060 LOG_ERROR("not a valid arm core mode - communication failure?");
1061 return ERROR_FAIL;
1064 *reg_list_size = 26;
1065 *reg_list = malloc(sizeof(struct reg *) * (*reg_list_size));
1067 for (i = 0; i < 16; i++)
1068 (*reg_list)[i] = arm_reg_current(arm, i);
1070 for (i = 16; i < 24; i++)
1071 (*reg_list)[i] = &arm_gdb_dummy_fp_reg;
1073 (*reg_list)[24] = &arm_gdb_dummy_fps_reg;
1074 (*reg_list)[25] = arm->cpsr;
1076 return ERROR_OK;
1079 /* wait for execution to complete and check exit point */
1080 static int armv4_5_run_algorithm_completion(struct target *target,
1081 uint32_t exit_point,
1082 int timeout_ms,
1083 void *arch_info)
1085 int retval;
1086 struct arm *arm = target_to_arm(target);
1088 retval = target_wait_state(target, TARGET_HALTED, timeout_ms);
1089 if (retval != ERROR_OK)
1090 return retval;
1091 if (target->state != TARGET_HALTED) {
1092 retval = target_halt(target);
1093 if (retval != ERROR_OK)
1094 return retval;
1095 retval = target_wait_state(target, TARGET_HALTED, 500);
1096 if (retval != ERROR_OK)
1097 return retval;
1098 return ERROR_TARGET_TIMEOUT;
1101 /* fast exit: ARMv5+ code can use BKPT */
1102 if (exit_point && buf_get_u32(arm->pc->value, 0, 32) != exit_point) {
1103 LOG_WARNING(
1104 "target reentered debug state, but not at the desired exit point: 0x%4.4" PRIx32 "",
1105 buf_get_u32(arm->pc->value, 0, 32));
1106 return ERROR_TARGET_TIMEOUT;
1109 return ERROR_OK;
1112 int armv4_5_run_algorithm_inner(struct target *target,
1113 int num_mem_params, struct mem_param *mem_params,
1114 int num_reg_params, struct reg_param *reg_params,
1115 uint32_t entry_point, uint32_t exit_point,
1116 int timeout_ms, void *arch_info,
1117 int (*run_it)(struct target *target, uint32_t exit_point,
1118 int timeout_ms, void *arch_info))
1120 struct arm *arm = target_to_arm(target);
1121 struct arm_algorithm *arm_algorithm_info = arch_info;
1122 enum arm_state core_state = arm->core_state;
1123 uint32_t context[17];
1124 uint32_t cpsr;
1125 int exit_breakpoint_size = 0;
1126 int i;
1127 int retval = ERROR_OK;
1129 LOG_DEBUG("Running algorithm");
1131 if (arm_algorithm_info->common_magic != ARM_COMMON_MAGIC) {
1132 LOG_ERROR("current target isn't an ARMV4/5 target");
1133 return ERROR_TARGET_INVALID;
1136 if (target->state != TARGET_HALTED) {
1137 LOG_WARNING("target not halted");
1138 return ERROR_TARGET_NOT_HALTED;
1141 if (!is_arm_mode(arm->core_mode)) {
1142 LOG_ERROR("not a valid arm core mode - communication failure?");
1143 return ERROR_FAIL;
1146 /* armv5 and later can terminate with BKPT instruction; less overhead */
1147 if (!exit_point && arm->is_armv4) {
1148 LOG_ERROR("ARMv4 target needs HW breakpoint location");
1149 return ERROR_FAIL;
1152 /* save r0..pc, cpsr-or-spsr, and then cpsr-for-sure;
1153 * they'll be restored later.
1155 for (i = 0; i <= 16; i++) {
1156 struct reg *r;
1158 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1159 arm_algorithm_info->core_mode, i);
1160 if (!r->valid)
1161 arm->read_core_reg(target, r, i,
1162 arm_algorithm_info->core_mode);
1163 context[i] = buf_get_u32(r->value, 0, 32);
1165 cpsr = buf_get_u32(arm->cpsr->value, 0, 32);
1167 for (i = 0; i < num_mem_params; i++) {
1168 retval = target_write_buffer(target, mem_params[i].address, mem_params[i].size,
1169 mem_params[i].value);
1170 if (retval != ERROR_OK)
1171 return retval;
1174 for (i = 0; i < num_reg_params; i++) {
1175 struct reg *reg = register_get_by_name(arm->core_cache, reg_params[i].reg_name, 0);
1176 if (!reg) {
1177 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
1178 return ERROR_COMMAND_SYNTAX_ERROR;
1181 if (reg->size != reg_params[i].size) {
1182 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size",
1183 reg_params[i].reg_name);
1184 return ERROR_COMMAND_SYNTAX_ERROR;
1187 retval = armv4_5_set_core_reg(reg, reg_params[i].value);
1188 if (retval != ERROR_OK)
1189 return retval;
1192 arm->core_state = arm_algorithm_info->core_state;
1193 if (arm->core_state == ARM_STATE_ARM)
1194 exit_breakpoint_size = 4;
1195 else if (arm->core_state == ARM_STATE_THUMB)
1196 exit_breakpoint_size = 2;
1197 else {
1198 LOG_ERROR("BUG: can't execute algorithms when not in ARM or Thumb state");
1199 return ERROR_COMMAND_SYNTAX_ERROR;
1202 if (arm_algorithm_info->core_mode != ARM_MODE_ANY) {
1203 LOG_DEBUG("setting core_mode: 0x%2.2x",
1204 arm_algorithm_info->core_mode);
1205 buf_set_u32(arm->cpsr->value, 0, 5,
1206 arm_algorithm_info->core_mode);
1207 arm->cpsr->dirty = 1;
1208 arm->cpsr->valid = 1;
1211 /* terminate using a hardware or (ARMv5+) software breakpoint */
1212 if (exit_point) {
1213 retval = breakpoint_add(target, exit_point,
1214 exit_breakpoint_size, BKPT_HARD);
1215 if (retval != ERROR_OK) {
1216 LOG_ERROR("can't add HW breakpoint to terminate algorithm");
1217 return ERROR_TARGET_FAILURE;
1221 retval = target_resume(target, 0, entry_point, 1, 1);
1222 if (retval != ERROR_OK)
1223 return retval;
1224 retval = run_it(target, exit_point, timeout_ms, arch_info);
1226 if (exit_point)
1227 breakpoint_remove(target, exit_point);
1229 if (retval != ERROR_OK)
1230 return retval;
1232 for (i = 0; i < num_mem_params; i++) {
1233 if (mem_params[i].direction != PARAM_OUT) {
1234 int retvaltemp = target_read_buffer(target, mem_params[i].address,
1235 mem_params[i].size,
1236 mem_params[i].value);
1237 if (retvaltemp != ERROR_OK)
1238 retval = retvaltemp;
1242 for (i = 0; i < num_reg_params; i++) {
1243 if (reg_params[i].direction != PARAM_OUT) {
1245 struct reg *reg = register_get_by_name(arm->core_cache,
1246 reg_params[i].reg_name,
1248 if (!reg) {
1249 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
1250 retval = ERROR_COMMAND_SYNTAX_ERROR;
1251 continue;
1254 if (reg->size != reg_params[i].size) {
1255 LOG_ERROR(
1256 "BUG: register '%s' size doesn't match reg_params[i].size",
1257 reg_params[i].reg_name);
1258 retval = ERROR_COMMAND_SYNTAX_ERROR;
1259 continue;
1262 buf_set_u32(reg_params[i].value, 0, 32, buf_get_u32(reg->value, 0, 32));
1266 /* restore everything we saved before (17 or 18 registers) */
1267 for (i = 0; i <= 16; i++) {
1268 uint32_t regvalue;
1269 regvalue = buf_get_u32(ARMV4_5_CORE_REG_MODE(arm->core_cache,
1270 arm_algorithm_info->core_mode, i).value, 0, 32);
1271 if (regvalue != context[i]) {
1272 LOG_DEBUG("restoring register %s with value 0x%8.8" PRIx32 "",
1273 ARMV4_5_CORE_REG_MODE(arm->core_cache,
1274 arm_algorithm_info->core_mode, i).name, context[i]);
1275 buf_set_u32(ARMV4_5_CORE_REG_MODE(arm->core_cache,
1276 arm_algorithm_info->core_mode, i).value, 0, 32, context[i]);
1277 ARMV4_5_CORE_REG_MODE(arm->core_cache, arm_algorithm_info->core_mode,
1278 i).valid = 1;
1279 ARMV4_5_CORE_REG_MODE(arm->core_cache, arm_algorithm_info->core_mode,
1280 i).dirty = 1;
1284 arm_set_cpsr(arm, cpsr);
1285 arm->cpsr->dirty = 1;
1287 arm->core_state = core_state;
1289 return retval;
1292 int armv4_5_run_algorithm(struct target *target,
1293 int num_mem_params,
1294 struct mem_param *mem_params,
1295 int num_reg_params,
1296 struct reg_param *reg_params,
1297 uint32_t entry_point,
1298 uint32_t exit_point,
1299 int timeout_ms,
1300 void *arch_info)
1302 return armv4_5_run_algorithm_inner(target,
1303 num_mem_params,
1304 mem_params,
1305 num_reg_params,
1306 reg_params,
1307 entry_point,
1308 exit_point,
1309 timeout_ms,
1310 arch_info,
1311 armv4_5_run_algorithm_completion);
1315 * Runs ARM code in the target to calculate a CRC32 checksum.
1318 int arm_checksum_memory(struct target *target,
1319 uint32_t address, uint32_t count, uint32_t *checksum)
1321 struct working_area *crc_algorithm;
1322 struct arm_algorithm arm_algo;
1323 struct arm *arm = target_to_arm(target);
1324 struct reg_param reg_params[2];
1325 int retval;
1326 uint32_t i;
1327 uint32_t exit_var = 0;
1329 /* see contrib/loaders/checksum/armv4_5_crc.s for src */
1331 static const uint32_t arm_crc_code[] = {
1332 0xE1A02000, /* mov r2, r0 */
1333 0xE3E00000, /* mov r0, #0xffffffff */
1334 0xE1A03001, /* mov r3, r1 */
1335 0xE3A04000, /* mov r4, #0 */
1336 0xEA00000B, /* b ncomp */
1337 /* nbyte: */
1338 0xE7D21004, /* ldrb r1, [r2, r4] */
1339 0xE59F7030, /* ldr r7, CRC32XOR */
1340 0xE0200C01, /* eor r0, r0, r1, asl 24 */
1341 0xE3A05000, /* mov r5, #0 */
1342 /* loop: */
1343 0xE3500000, /* cmp r0, #0 */
1344 0xE1A06080, /* mov r6, r0, asl #1 */
1345 0xE2855001, /* add r5, r5, #1 */
1346 0xE1A00006, /* mov r0, r6 */
1347 0xB0260007, /* eorlt r0, r6, r7 */
1348 0xE3550008, /* cmp r5, #8 */
1349 0x1AFFFFF8, /* bne loop */
1350 0xE2844001, /* add r4, r4, #1 */
1351 /* ncomp: */
1352 0xE1540003, /* cmp r4, r3 */
1353 0x1AFFFFF1, /* bne nbyte */
1354 /* end: */
1355 0xe1200070, /* bkpt #0 */
1356 /* CRC32XOR: */
1357 0x04C11DB7 /* .word 0x04C11DB7 */
1360 retval = target_alloc_working_area(target,
1361 sizeof(arm_crc_code), &crc_algorithm);
1362 if (retval != ERROR_OK)
1363 return retval;
1365 /* convert code into a buffer in target endianness */
1366 for (i = 0; i < ARRAY_SIZE(arm_crc_code); i++) {
1367 retval = target_write_u32(target,
1368 crc_algorithm->address + i * sizeof(uint32_t),
1369 arm_crc_code[i]);
1370 if (retval != ERROR_OK)
1371 return retval;
1374 arm_algo.common_magic = ARM_COMMON_MAGIC;
1375 arm_algo.core_mode = ARM_MODE_SVC;
1376 arm_algo.core_state = ARM_STATE_ARM;
1378 init_reg_param(&reg_params[0], "r0", 32, PARAM_IN_OUT);
1379 init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
1381 buf_set_u32(reg_params[0].value, 0, 32, address);
1382 buf_set_u32(reg_params[1].value, 0, 32, count);
1384 /* 20 second timeout/megabyte */
1385 int timeout = 20000 * (1 + (count / (1024 * 1024)));
1387 /* armv4 must exit using a hardware breakpoint */
1388 if (arm->is_armv4)
1389 exit_var = crc_algorithm->address + sizeof(arm_crc_code) - 8;
1391 retval = target_run_algorithm(target, 0, NULL, 2, reg_params,
1392 crc_algorithm->address,
1393 exit_var,
1394 timeout, &arm_algo);
1395 if (retval != ERROR_OK) {
1396 LOG_ERROR("error executing ARM crc algorithm");
1397 destroy_reg_param(&reg_params[0]);
1398 destroy_reg_param(&reg_params[1]);
1399 target_free_working_area(target, crc_algorithm);
1400 return retval;
1403 *checksum = buf_get_u32(reg_params[0].value, 0, 32);
1405 destroy_reg_param(&reg_params[0]);
1406 destroy_reg_param(&reg_params[1]);
1408 target_free_working_area(target, crc_algorithm);
1410 return ERROR_OK;
1414 * Runs ARM code in the target to check whether a memory block holds
1415 * all ones. NOR flash which has been erased, and thus may be written,
1416 * holds all ones.
1419 int arm_blank_check_memory(struct target *target,
1420 uint32_t address, uint32_t count, uint32_t *blank)
1422 struct working_area *check_algorithm;
1423 struct reg_param reg_params[3];
1424 struct arm_algorithm arm_algo;
1425 struct arm *arm = target_to_arm(target);
1426 int retval;
1427 uint32_t i;
1428 uint32_t exit_var = 0;
1430 /* see contrib/loaders/erase_check/armv4_5_erase_check.s for src */
1432 static const uint32_t check_code[] = {
1433 /* loop: */
1434 0xe4d03001, /* ldrb r3, [r0], #1 */
1435 0xe0022003, /* and r2, r2, r3 */
1436 0xe2511001, /* subs r1, r1, #1 */
1437 0x1afffffb, /* bne loop */
1438 /* end: */
1439 0xe1200070, /* bkpt #0 */
1442 /* make sure we have a working area */
1443 retval = target_alloc_working_area(target,
1444 sizeof(check_code), &check_algorithm);
1445 if (retval != ERROR_OK)
1446 return retval;
1448 /* convert code into a buffer in target endianness */
1449 for (i = 0; i < ARRAY_SIZE(check_code); i++) {
1450 retval = target_write_u32(target,
1451 check_algorithm->address
1452 + i * sizeof(uint32_t),
1453 check_code[i]);
1454 if (retval != ERROR_OK)
1455 return retval;
1458 arm_algo.common_magic = ARM_COMMON_MAGIC;
1459 arm_algo.core_mode = ARM_MODE_SVC;
1460 arm_algo.core_state = ARM_STATE_ARM;
1462 init_reg_param(&reg_params[0], "r0", 32, PARAM_OUT);
1463 buf_set_u32(reg_params[0].value, 0, 32, address);
1465 init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
1466 buf_set_u32(reg_params[1].value, 0, 32, count);
1468 init_reg_param(&reg_params[2], "r2", 32, PARAM_IN_OUT);
1469 buf_set_u32(reg_params[2].value, 0, 32, 0xff);
1471 /* armv4 must exit using a hardware breakpoint */
1472 if (arm->is_armv4)
1473 exit_var = check_algorithm->address + sizeof(check_code) - 4;
1475 retval = target_run_algorithm(target, 0, NULL, 3, reg_params,
1476 check_algorithm->address,
1477 exit_var,
1478 10000, &arm_algo);
1479 if (retval != ERROR_OK) {
1480 destroy_reg_param(&reg_params[0]);
1481 destroy_reg_param(&reg_params[1]);
1482 destroy_reg_param(&reg_params[2]);
1483 target_free_working_area(target, check_algorithm);
1484 return retval;
1487 *blank = buf_get_u32(reg_params[2].value, 0, 32);
1489 destroy_reg_param(&reg_params[0]);
1490 destroy_reg_param(&reg_params[1]);
1491 destroy_reg_param(&reg_params[2]);
1493 target_free_working_area(target, check_algorithm);
1495 return ERROR_OK;
1498 static int arm_full_context(struct target *target)
1500 struct arm *arm = target_to_arm(target);
1501 unsigned num_regs = arm->core_cache->num_regs;
1502 struct reg *reg = arm->core_cache->reg_list;
1503 int retval = ERROR_OK;
1505 for (; num_regs && retval == ERROR_OK; num_regs--, reg++) {
1506 if (reg->valid)
1507 continue;
1508 retval = armv4_5_get_core_reg(reg);
1510 return retval;
1513 static int arm_default_mrc(struct target *target, int cpnum,
1514 uint32_t op1, uint32_t op2,
1515 uint32_t CRn, uint32_t CRm,
1516 uint32_t *value)
1518 LOG_ERROR("%s doesn't implement MRC", target_type_name(target));
1519 return ERROR_FAIL;
1522 static int arm_default_mcr(struct target *target, int cpnum,
1523 uint32_t op1, uint32_t op2,
1524 uint32_t CRn, uint32_t CRm,
1525 uint32_t value)
1527 LOG_ERROR("%s doesn't implement MCR", target_type_name(target));
1528 return ERROR_FAIL;
1531 int arm_init_arch_info(struct target *target, struct arm *arm)
1533 target->arch_info = arm;
1534 arm->target = target;
1536 arm->common_magic = ARM_COMMON_MAGIC;
1538 /* core_type may be overridden by subtype logic */
1539 if (arm->core_type != ARM_MODE_THREAD) {
1540 arm->core_type = ARM_MODE_ANY;
1541 arm_set_cpsr(arm, ARM_MODE_USR);
1544 /* default full_context() has no core-specific optimizations */
1545 if (!arm->full_context && arm->read_core_reg)
1546 arm->full_context = arm_full_context;
1548 if (!arm->mrc)
1549 arm->mrc = arm_default_mrc;
1550 if (!arm->mcr)
1551 arm->mcr = arm_default_mcr;
1553 return ERROR_OK;