TCL/SPEAr: move DDR activation in common code
[openocd/dsp568013.git] / src / target / armv4_5.c
blobe576d9c0b8a66cbe67ef378e4970e100e9d9aa47
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2008 by Spencer Oliver *
6 * spen@spen-soft.co.uk *
7 * *
8 * Copyright (C) 2008 by Oyvind Harboe *
9 * oyvind.harboe@zylin.com *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
30 #include "arm.h"
31 #include "armv4_5.h"
32 #include "arm_jtag.h"
33 #include "breakpoints.h"
34 #include "arm_disassembler.h"
35 #include <helper/binarybuffer.h>
36 #include "algorithm.h"
37 #include "register.h"
40 /* offsets into armv4_5 core register cache */
41 enum {
42 // ARMV4_5_CPSR = 31,
43 ARMV4_5_SPSR_FIQ = 32,
44 ARMV4_5_SPSR_IRQ = 33,
45 ARMV4_5_SPSR_SVC = 34,
46 ARMV4_5_SPSR_ABT = 35,
47 ARMV4_5_SPSR_UND = 36,
48 ARM_SPSR_MON = 39,
51 static const uint8_t arm_usr_indices[17] = {
52 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, ARMV4_5_CPSR,
55 static const uint8_t arm_fiq_indices[8] = {
56 16, 17, 18, 19, 20, 21, 22, ARMV4_5_SPSR_FIQ,
59 static const uint8_t arm_irq_indices[3] = {
60 23, 24, ARMV4_5_SPSR_IRQ,
63 static const uint8_t arm_svc_indices[3] = {
64 25, 26, ARMV4_5_SPSR_SVC,
67 static const uint8_t arm_abt_indices[3] = {
68 27, 28, ARMV4_5_SPSR_ABT,
71 static const uint8_t arm_und_indices[3] = {
72 29, 30, ARMV4_5_SPSR_UND,
75 static const uint8_t arm_mon_indices[3] = {
76 37, 38, ARM_SPSR_MON,
79 static const struct {
80 const char *name;
81 unsigned short psr;
82 /* For user and system modes, these list indices for all registers.
83 * otherwise they're just indices for the shadow registers and SPSR.
85 unsigned short n_indices;
86 const uint8_t *indices;
87 } arm_mode_data[] = {
88 /* Seven modes are standard from ARM7 on. "System" and "User" share
89 * the same registers; other modes shadow from 3 to 8 registers.
92 .name = "User",
93 .psr = ARM_MODE_USR,
94 .n_indices = ARRAY_SIZE(arm_usr_indices),
95 .indices = arm_usr_indices,
98 .name = "FIQ",
99 .psr = ARM_MODE_FIQ,
100 .n_indices = ARRAY_SIZE(arm_fiq_indices),
101 .indices = arm_fiq_indices,
104 .name = "Supervisor",
105 .psr = ARM_MODE_SVC,
106 .n_indices = ARRAY_SIZE(arm_svc_indices),
107 .indices = arm_svc_indices,
110 .name = "Abort",
111 .psr = ARM_MODE_ABT,
112 .n_indices = ARRAY_SIZE(arm_abt_indices),
113 .indices = arm_abt_indices,
116 .name = "IRQ",
117 .psr = ARM_MODE_IRQ,
118 .n_indices = ARRAY_SIZE(arm_irq_indices),
119 .indices = arm_irq_indices,
122 .name = "Undefined instruction",
123 .psr = ARM_MODE_UND,
124 .n_indices = ARRAY_SIZE(arm_und_indices),
125 .indices = arm_und_indices,
128 .name = "System",
129 .psr = ARM_MODE_SYS,
130 .n_indices = ARRAY_SIZE(arm_usr_indices),
131 .indices = arm_usr_indices,
133 /* TrustZone "Security Extensions" add a secure monitor mode.
134 * This is distinct from a "debug monitor" which can support
135 * non-halting debug, in conjunction with some debuggers.
138 .name = "Secure Monitor",
139 .psr = ARM_MODE_MON,
140 .n_indices = ARRAY_SIZE(arm_mon_indices),
141 .indices = arm_mon_indices,
145 /** Map PSR mode bits to the name of an ARM processor operating mode. */
146 const char *arm_mode_name(unsigned psr_mode)
148 for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
149 if (arm_mode_data[i].psr == psr_mode)
150 return arm_mode_data[i].name;
152 LOG_ERROR("unrecognized psr mode: %#02x", psr_mode);
153 return "UNRECOGNIZED";
156 /** Return true iff the parameter denotes a valid ARM processor mode. */
157 bool is_arm_mode(unsigned psr_mode)
159 for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
160 if (arm_mode_data[i].psr == psr_mode)
161 return true;
163 return false;
166 /** Map PSR mode bits to linear number indexing armv4_5_core_reg_map */
167 int arm_mode_to_number(enum arm_mode mode)
169 switch (mode) {
170 case ARM_MODE_ANY:
171 /* map MODE_ANY to user mode */
172 case ARM_MODE_USR:
173 return 0;
174 case ARM_MODE_FIQ:
175 return 1;
176 case ARM_MODE_IRQ:
177 return 2;
178 case ARM_MODE_SVC:
179 return 3;
180 case ARM_MODE_ABT:
181 return 4;
182 case ARM_MODE_UND:
183 return 5;
184 case ARM_MODE_SYS:
185 return 6;
186 case ARM_MODE_MON:
187 return 7;
188 default:
189 LOG_ERROR("invalid mode value encountered %d", mode);
190 return -1;
194 /** Map linear number indexing armv4_5_core_reg_map to PSR mode bits. */
195 enum arm_mode armv4_5_number_to_mode(int number)
197 switch (number) {
198 case 0:
199 return ARM_MODE_USR;
200 case 1:
201 return ARM_MODE_FIQ;
202 case 2:
203 return ARM_MODE_IRQ;
204 case 3:
205 return ARM_MODE_SVC;
206 case 4:
207 return ARM_MODE_ABT;
208 case 5:
209 return ARM_MODE_UND;
210 case 6:
211 return ARM_MODE_SYS;
212 case 7:
213 return ARM_MODE_MON;
214 default:
215 LOG_ERROR("mode index out of bounds %d", number);
216 return ARM_MODE_ANY;
220 static const char *arm_state_strings[] =
222 "ARM", "Thumb", "Jazelle", "ThumbEE",
225 /* Templates for ARM core registers.
227 * NOTE: offsets in this table are coupled to the arm_mode_data
228 * table above, the armv4_5_core_reg_map array below, and also to
229 * the ARMV4_5_CPSR symbol (which should vanish after ARM11 updates).
231 static const struct {
232 /* The name is used for e.g. the "regs" command. */
233 const char *name;
235 /* The {cookie, mode} tuple uniquely identifies one register.
236 * In a given mode, cookies 0..15 map to registers R0..R15,
237 * with R13..R15 usually called SP, LR, PC.
239 * MODE_ANY is used as *input* to the mapping, and indicates
240 * various special cases (sigh) and errors.
242 * Cookie 16 is (currently) confusing, since it indicates
243 * CPSR -or- SPSR depending on whether 'mode' is MODE_ANY.
244 * (Exception modes have both CPSR and SPSR registers ...)
246 unsigned cookie;
247 enum arm_mode mode;
248 } arm_core_regs[] = {
249 /* IMPORTANT: we guarantee that the first eight cached registers
250 * correspond to r0..r7, and the fifteenth to PC, so that callers
251 * don't need to map them.
253 { .name = "r0", .cookie = 0, .mode = ARM_MODE_ANY, },
254 { .name = "r1", .cookie = 1, .mode = ARM_MODE_ANY, },
255 { .name = "r2", .cookie = 2, .mode = ARM_MODE_ANY, },
256 { .name = "r3", .cookie = 3, .mode = ARM_MODE_ANY, },
257 { .name = "r4", .cookie = 4, .mode = ARM_MODE_ANY, },
258 { .name = "r5", .cookie = 5, .mode = ARM_MODE_ANY, },
259 { .name = "r6", .cookie = 6, .mode = ARM_MODE_ANY, },
260 { .name = "r7", .cookie = 7, .mode = ARM_MODE_ANY, },
262 /* NOTE: regs 8..12 might be shadowed by FIQ ... flagging
263 * them as MODE_ANY creates special cases. (ANY means
264 * "not mapped" elsewhere; here it's "everything but FIQ".)
266 { .name = "r8", .cookie = 8, .mode = ARM_MODE_ANY, },
267 { .name = "r9", .cookie = 9, .mode = ARM_MODE_ANY, },
268 { .name = "r10", .cookie = 10, .mode = ARM_MODE_ANY, },
269 { .name = "r11", .cookie = 11, .mode = ARM_MODE_ANY, },
270 { .name = "r12", .cookie = 12, .mode = ARM_MODE_ANY, },
272 /* NOTE all MODE_USR registers are equivalent to MODE_SYS ones */
273 { .name = "sp_usr", .cookie = 13, .mode = ARM_MODE_USR, },
274 { .name = "lr_usr", .cookie = 14, .mode = ARM_MODE_USR, },
276 /* guaranteed to be at index 15 */
277 { .name = "pc", .cookie = 15, .mode = ARM_MODE_ANY, },
279 { .name = "r8_fiq", .cookie = 8, .mode = ARM_MODE_FIQ, },
280 { .name = "r9_fiq", .cookie = 9, .mode = ARM_MODE_FIQ, },
281 { .name = "r10_fiq", .cookie = 10, .mode = ARM_MODE_FIQ, },
282 { .name = "r11_fiq", .cookie = 11, .mode = ARM_MODE_FIQ, },
283 { .name = "r12_fiq", .cookie = 12, .mode = ARM_MODE_FIQ, },
285 { .name = "sp_fiq", .cookie = 13, .mode = ARM_MODE_FIQ, },
286 { .name = "lr_fiq", .cookie = 14, .mode = ARM_MODE_FIQ, },
288 { .name = "sp_irq", .cookie = 13, .mode = ARM_MODE_IRQ, },
289 { .name = "lr_irq", .cookie = 14, .mode = ARM_MODE_IRQ, },
291 { .name = "sp_svc", .cookie = 13, .mode = ARM_MODE_SVC, },
292 { .name = "lr_svc", .cookie = 14, .mode = ARM_MODE_SVC, },
294 { .name = "sp_abt", .cookie = 13, .mode = ARM_MODE_ABT, },
295 { .name = "lr_abt", .cookie = 14, .mode = ARM_MODE_ABT, },
297 { .name = "sp_und", .cookie = 13, .mode = ARM_MODE_UND, },
298 { .name = "lr_und", .cookie = 14, .mode = ARM_MODE_UND, },
300 { .name = "cpsr", .cookie = 16, .mode = ARM_MODE_ANY, },
301 { .name = "spsr_fiq", .cookie = 16, .mode = ARM_MODE_FIQ, },
302 { .name = "spsr_irq", .cookie = 16, .mode = ARM_MODE_IRQ, },
303 { .name = "spsr_svc", .cookie = 16, .mode = ARM_MODE_SVC, },
304 { .name = "spsr_abt", .cookie = 16, .mode = ARM_MODE_ABT, },
305 { .name = "spsr_und", .cookie = 16, .mode = ARM_MODE_UND, },
307 { .name = "sp_mon", .cookie = 13, .mode = ARM_MODE_MON, },
308 { .name = "lr_mon", .cookie = 14, .mode = ARM_MODE_MON, },
309 { .name = "spsr_mon", .cookie = 16, .mode = ARM_MODE_MON, },
312 /* map core mode (USR, FIQ, ...) and register number to
313 * indices into the register cache
315 const int armv4_5_core_reg_map[8][17] =
317 { /* USR */
318 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
320 { /* FIQ (8 shadows of USR, vs normal 3) */
321 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 15, 32
323 { /* IRQ */
324 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 23, 24, 15, 33
326 { /* SVC */
327 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 25, 26, 15, 34
329 { /* ABT */
330 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 27, 28, 15, 35
332 { /* UND */
333 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 29, 30, 15, 36
335 { /* SYS (same registers as USR) */
336 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
338 { /* MON */
339 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 37, 38, 15, 39,
344 * Configures host-side ARM records to reflect the specified CPSR.
345 * Later, code can use arm_reg_current() to map register numbers
346 * according to how they are exposed by this mode.
348 void arm_set_cpsr(struct arm *arm, uint32_t cpsr)
350 enum arm_mode mode = cpsr & 0x1f;
351 int num;
353 /* NOTE: this may be called very early, before the register
354 * cache is set up. We can't defend against many errors, in
355 * particular against CPSRs that aren't valid *here* ...
357 if (arm->cpsr) {
358 buf_set_u32(arm->cpsr->value, 0, 32, cpsr);
359 arm->cpsr->valid = 1;
360 arm->cpsr->dirty = 0;
363 arm->core_mode = mode;
365 /* mode_to_number() warned; set up a somewhat-sane mapping */
366 num = arm_mode_to_number(mode);
367 if (num < 0) {
368 mode = ARM_MODE_USR;
369 num = 0;
372 arm->map = &armv4_5_core_reg_map[num][0];
373 arm->spsr = (mode == ARM_MODE_USR || mode == ARM_MODE_SYS)
374 ? NULL
375 : arm->core_cache->reg_list + arm->map[16];
377 /* Older ARMs won't have the J bit */
378 enum arm_state state;
380 if (cpsr & (1 << 5)) { /* T */
381 if (cpsr & (1 << 24)) { /* J */
382 LOG_WARNING("ThumbEE -- incomplete support");
383 state = ARM_STATE_THUMB_EE;
384 } else
385 state = ARM_STATE_THUMB;
386 } else {
387 if (cpsr & (1 << 24)) { /* J */
388 LOG_ERROR("Jazelle state handling is BROKEN!");
389 state = ARM_STATE_JAZELLE;
390 } else
391 state = ARM_STATE_ARM;
393 arm->core_state = state;
395 LOG_DEBUG("set CPSR %#8.8x: %s mode, %s state", (unsigned) cpsr,
396 arm_mode_name(mode),
397 arm_state_strings[arm->core_state]);
401 * Returns handle to the register currently mapped to a given number.
402 * Someone must have called arm_set_cpsr() before.
404 * \param arm This core's state and registers are used.
405 * \param regnum From 0..15 corresponding to R0..R14 and PC.
406 * Note that R0..R7 don't require mapping; you may access those
407 * as the first eight entries in the register cache. Likewise
408 * R15 (PC) doesn't need mapping; you may also access it directly.
409 * However, R8..R14, and SPSR (arm->spsr) *must* be mapped.
410 * CPSR (arm->cpsr) is also not mapped.
412 struct reg *arm_reg_current(struct arm *arm, unsigned regnum)
414 struct reg *r;
416 if (regnum > 16)
417 return NULL;
419 r = arm->core_cache->reg_list + arm->map[regnum];
421 /* e.g. invalid CPSR said "secure monitor" mode on a core
422 * that doesn't support it...
424 if (!r) {
425 LOG_ERROR("Invalid CPSR mode");
426 r = arm->core_cache->reg_list + regnum;
429 return r;
432 static const uint8_t arm_gdb_dummy_fp_value[12];
435 * Dummy FPA registers are required to support GDB on ARM.
436 * Register packets require eight obsolete FPA register values.
437 * Modern ARM cores use Vector Floating Point (VFP), if they
438 * have any floating point support. VFP is not FPA-compatible.
440 struct reg arm_gdb_dummy_fp_reg =
442 .name = "GDB dummy FPA register",
443 .value = (uint8_t *) arm_gdb_dummy_fp_value,
444 .valid = 1,
445 .size = 96,
448 static const uint8_t arm_gdb_dummy_fps_value[4];
451 * Dummy FPA status registers are required to support GDB on ARM.
452 * Register packets require an obsolete FPA status register.
454 struct reg arm_gdb_dummy_fps_reg =
456 .name = "GDB dummy FPA status register",
457 .value = (uint8_t *) arm_gdb_dummy_fps_value,
458 .valid = 1,
459 .size = 32,
462 static void arm_gdb_dummy_init(void) __attribute__ ((constructor));
464 static void arm_gdb_dummy_init(void)
466 register_init_dummy(&arm_gdb_dummy_fp_reg);
467 register_init_dummy(&arm_gdb_dummy_fps_reg);
470 static int armv4_5_get_core_reg(struct reg *reg)
472 int retval;
473 struct arm_reg *armv4_5 = reg->arch_info;
474 struct target *target = armv4_5->target;
476 if (target->state != TARGET_HALTED)
478 LOG_ERROR("Target not halted");
479 return ERROR_TARGET_NOT_HALTED;
482 retval = armv4_5->armv4_5_common->read_core_reg(target, reg, armv4_5->num, armv4_5->mode);
483 if (retval == ERROR_OK) {
484 reg->valid = 1;
485 reg->dirty = 0;
488 return retval;
491 static int armv4_5_set_core_reg(struct reg *reg, uint8_t *buf)
493 struct arm_reg *armv4_5 = reg->arch_info;
494 struct target *target = armv4_5->target;
495 struct arm *armv4_5_target = target_to_arm(target);
496 uint32_t value = buf_get_u32(buf, 0, 32);
498 if (target->state != TARGET_HALTED)
500 LOG_ERROR("Target not halted");
501 return ERROR_TARGET_NOT_HALTED;
504 /* Except for CPSR, the "reg" command exposes a writeback model
505 * for the register cache.
507 if (reg == armv4_5_target->cpsr) {
508 arm_set_cpsr(armv4_5_target, value);
510 /* Older cores need help to be in ARM mode during halt
511 * mode debug, so we clear the J and T bits if we flush.
512 * For newer cores (v6/v7a/v7r) we don't need that, but
513 * it won't hurt since CPSR is always flushed anyway.
515 if (armv4_5_target->core_mode !=
516 (enum arm_mode)(value & 0x1f)) {
517 LOG_DEBUG("changing ARM core mode to '%s'",
518 arm_mode_name(value & 0x1f));
519 value &= ~((1 << 24) | (1 << 5));
520 armv4_5_target->write_core_reg(target, reg,
521 16, ARM_MODE_ANY, value);
523 } else {
524 buf_set_u32(reg->value, 0, 32, value);
525 reg->valid = 1;
527 reg->dirty = 1;
529 return ERROR_OK;
532 static const struct reg_arch_type arm_reg_type = {
533 .get = armv4_5_get_core_reg,
534 .set = armv4_5_set_core_reg,
537 struct reg_cache *arm_build_reg_cache(struct target *target, struct arm *arm)
539 int num_regs = ARRAY_SIZE(arm_core_regs);
540 struct reg_cache *cache = malloc(sizeof(struct reg_cache));
541 struct reg *reg_list = calloc(num_regs, sizeof(struct reg));
542 struct arm_reg *arch_info = calloc(num_regs, sizeof(struct arm_reg));
543 int i;
545 if (!cache || !reg_list || !arch_info) {
546 free(cache);
547 free(reg_list);
548 free(arch_info);
549 return NULL;
552 cache->name = "ARM registers";
553 cache->next = NULL;
554 cache->reg_list = reg_list;
555 cache->num_regs = 0;
557 for (i = 0; i < num_regs; i++)
559 /* Skip registers this core doesn't expose */
560 if (arm_core_regs[i].mode == ARM_MODE_MON
561 && arm->core_type != ARM_MODE_MON)
562 continue;
564 /* REVISIT handle Cortex-M, which only shadows R13/SP */
566 arch_info[i].num = arm_core_regs[i].cookie;
567 arch_info[i].mode = arm_core_regs[i].mode;
568 arch_info[i].target = target;
569 arch_info[i].armv4_5_common = arm;
571 reg_list[i].name = (char *) arm_core_regs[i].name;
572 reg_list[i].size = 32;
573 reg_list[i].value = &arch_info[i].value;
574 reg_list[i].type = &arm_reg_type;
575 reg_list[i].arch_info = &arch_info[i];
577 cache->num_regs++;
580 arm->pc = reg_list + 15;
581 arm->cpsr = reg_list + ARMV4_5_CPSR;
582 arm->core_cache = cache;
583 return cache;
586 int arm_arch_state(struct target *target)
588 struct arm *armv4_5 = target_to_arm(target);
590 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
592 LOG_ERROR("BUG: called for a non-ARM target");
593 return ERROR_FAIL;
596 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
597 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "%s",
598 arm_state_strings[armv4_5->core_state],
599 debug_reason_name(target),
600 arm_mode_name(armv4_5->core_mode),
601 buf_get_u32(armv4_5->cpsr->value, 0, 32),
602 buf_get_u32(armv4_5->pc->value, 0, 32),
603 armv4_5->is_semihosting ? ", semihosting" : "");
605 return ERROR_OK;
608 #define ARMV4_5_CORE_REG_MODENUM(cache, mode, num) \
609 cache->reg_list[armv4_5_core_reg_map[mode][num]]
611 COMMAND_HANDLER(handle_armv4_5_reg_command)
613 struct target *target = get_current_target(CMD_CTX);
614 struct arm *armv4_5 = target_to_arm(target);
615 struct reg *regs;
617 if (!is_arm(armv4_5))
619 command_print(CMD_CTX, "current target isn't an ARM");
620 return ERROR_FAIL;
623 if (target->state != TARGET_HALTED)
625 command_print(CMD_CTX, "error: target must be halted for register accesses");
626 return ERROR_FAIL;
629 if (armv4_5->core_type != ARM_MODE_ANY)
631 command_print(CMD_CTX, "Microcontroller Profile not supported - use standard reg cmd");
632 return ERROR_OK;
635 if (!is_arm_mode(armv4_5->core_mode))
637 LOG_ERROR("not a valid arm core mode - communication failure?");
638 return ERROR_FAIL;
641 if (!armv4_5->full_context) {
642 command_print(CMD_CTX, "error: target doesn't support %s",
643 CMD_NAME);
644 return ERROR_FAIL;
647 regs = armv4_5->core_cache->reg_list;
649 for (unsigned mode = 0; mode < ARRAY_SIZE(arm_mode_data); mode++) {
650 const char *name;
651 char *sep = "\n";
652 char *shadow = "";
654 /* label this bank of registers (or shadows) */
655 switch (arm_mode_data[mode].psr) {
656 case ARM_MODE_SYS:
657 continue;
658 case ARM_MODE_USR:
659 name = "System and User";
660 sep = "";
661 break;
662 case ARM_MODE_MON:
663 if (armv4_5->core_type != ARM_MODE_MON)
664 continue;
665 /* FALLTHROUGH */
666 default:
667 name = arm_mode_data[mode].name;
668 shadow = "shadow ";
669 break;
671 command_print(CMD_CTX, "%s%s mode %sregisters",
672 sep, name, shadow);
674 /* display N rows of up to 4 registers each */
675 for (unsigned i = 0; i < arm_mode_data[mode].n_indices;) {
676 char output[80];
677 int output_len = 0;
679 for (unsigned j = 0; j < 4; j++, i++) {
680 uint32_t value;
681 struct reg *reg = regs;
683 if (i >= arm_mode_data[mode].n_indices)
684 break;
686 reg += arm_mode_data[mode].indices[i];
688 /* REVISIT be smarter about faults... */
689 if (!reg->valid)
690 armv4_5->full_context(target);
692 value = buf_get_u32(reg->value, 0, 32);
693 output_len += snprintf(output + output_len,
694 sizeof(output) - output_len,
695 "%8s: %8.8" PRIx32 " ",
696 reg->name, value);
698 command_print(CMD_CTX, "%s", output);
702 return ERROR_OK;
705 COMMAND_HANDLER(handle_armv4_5_core_state_command)
707 struct target *target = get_current_target(CMD_CTX);
708 struct arm *armv4_5 = target_to_arm(target);
710 if (!is_arm(armv4_5))
712 command_print(CMD_CTX, "current target isn't an ARM");
713 return ERROR_FAIL;
716 if (armv4_5->core_type == ARM_MODE_THREAD)
718 /* armv7m not supported */
719 command_print(CMD_CTX, "Unsupported Command");
720 return ERROR_OK;
723 if (CMD_ARGC > 0)
725 if (strcmp(CMD_ARGV[0], "arm") == 0)
727 armv4_5->core_state = ARM_STATE_ARM;
729 if (strcmp(CMD_ARGV[0], "thumb") == 0)
731 armv4_5->core_state = ARM_STATE_THUMB;
735 command_print(CMD_CTX, "core state: %s", arm_state_strings[armv4_5->core_state]);
737 return ERROR_OK;
740 COMMAND_HANDLER(handle_arm_disassemble_command)
742 int retval = ERROR_OK;
743 struct target *target = get_current_target(CMD_CTX);
745 if (target == NULL) {
746 LOG_ERROR("No target selected");
747 return ERROR_FAIL;
750 struct arm *arm = target_to_arm(target);
751 uint32_t address;
752 int count = 1;
753 int thumb = 0;
755 if (!is_arm(arm)) {
756 command_print(CMD_CTX, "current target isn't an ARM");
757 return ERROR_FAIL;
760 if (arm->core_type == ARM_MODE_THREAD)
762 /* armv7m is always thumb mode */
763 thumb = 1;
766 switch (CMD_ARGC) {
767 case 3:
768 if (strcmp(CMD_ARGV[2], "thumb") != 0)
769 goto usage;
770 thumb = 1;
771 /* FALL THROUGH */
772 case 2:
773 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
774 /* FALL THROUGH */
775 case 1:
776 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
777 if (address & 0x01) {
778 if (!thumb) {
779 command_print(CMD_CTX, "Disassemble as Thumb");
780 thumb = 1;
782 address &= ~1;
784 break;
785 default:
786 usage:
787 command_print(CMD_CTX,
788 "usage: arm disassemble <address> [<count> ['thumb']]");
789 count = 0;
790 retval = ERROR_FAIL;
793 while (count-- > 0) {
794 struct arm_instruction cur_instruction;
796 if (thumb) {
797 /* Always use Thumb2 disassembly for best handling
798 * of 32-bit BL/BLX, and to work with newer cores
799 * (some ARMv6, all ARMv7) that use Thumb2.
801 retval = thumb2_opcode(target, address,
802 &cur_instruction);
803 if (retval != ERROR_OK)
804 break;
805 } else {
806 uint32_t opcode;
808 retval = target_read_u32(target, address, &opcode);
809 if (retval != ERROR_OK)
810 break;
811 retval = arm_evaluate_opcode(opcode, address,
812 &cur_instruction) != ERROR_OK;
813 if (retval != ERROR_OK)
814 break;
816 command_print(CMD_CTX, "%s", cur_instruction.text);
817 address += cur_instruction.instruction_size;
820 return retval;
823 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
825 struct command_context *context;
826 struct target *target;
827 struct arm *arm;
828 int retval;
830 context = current_command_context(interp);
831 assert( context != NULL);
833 target = get_current_target(context);
834 if (target == NULL) {
835 LOG_ERROR("%s: no current target", __func__);
836 return JIM_ERR;
838 if (!target_was_examined(target)) {
839 LOG_ERROR("%s: not yet examined", target_name(target));
840 return JIM_ERR;
842 arm = target_to_arm(target);
843 if (!is_arm(arm)) {
844 LOG_ERROR("%s: not an ARM", target_name(target));
845 return JIM_ERR;
848 if ((argc < 6) || (argc > 7)) {
849 /* FIXME use the command name to verify # params... */
850 LOG_ERROR("%s: wrong number of arguments", __func__);
851 return JIM_ERR;
854 int cpnum;
855 uint32_t op1;
856 uint32_t op2;
857 uint32_t CRn;
858 uint32_t CRm;
859 uint32_t value;
860 long l;
862 /* NOTE: parameter sequence matches ARM instruction set usage:
863 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
864 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
865 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
867 retval = Jim_GetLong(interp, argv[1], &l);
868 if (retval != JIM_OK)
869 return retval;
870 if (l & ~0xf) {
871 LOG_ERROR("%s: %s %d out of range", __func__,
872 "coprocessor", (int) l);
873 return JIM_ERR;
875 cpnum = l;
877 retval = Jim_GetLong(interp, argv[2], &l);
878 if (retval != JIM_OK)
879 return retval;
880 if (l & ~0x7) {
881 LOG_ERROR("%s: %s %d out of range", __func__,
882 "op1", (int) l);
883 return JIM_ERR;
885 op1 = l;
887 retval = Jim_GetLong(interp, argv[3], &l);
888 if (retval != JIM_OK)
889 return retval;
890 if (l & ~0xf) {
891 LOG_ERROR("%s: %s %d out of range", __func__,
892 "CRn", (int) l);
893 return JIM_ERR;
895 CRn = l;
897 retval = Jim_GetLong(interp, argv[4], &l);
898 if (retval != JIM_OK)
899 return retval;
900 if (l & ~0xf) {
901 LOG_ERROR("%s: %s %d out of range", __func__,
902 "CRm", (int) l);
903 return JIM_ERR;
905 CRm = l;
907 retval = Jim_GetLong(interp, argv[5], &l);
908 if (retval != JIM_OK)
909 return retval;
910 if (l & ~0x7) {
911 LOG_ERROR("%s: %s %d out of range", __func__,
912 "op2", (int) l);
913 return JIM_ERR;
915 op2 = l;
917 value = 0;
919 /* FIXME don't assume "mrc" vs "mcr" from the number of params;
920 * that could easily be a typo! Check both...
922 * FIXME change the call syntax here ... simplest to just pass
923 * the MRC() or MCR() instruction to be executed. That will also
924 * let us support the "mrc2" and "mcr2" opcodes (toggling one bit)
925 * if that's ever needed.
927 if (argc == 7) {
928 retval = Jim_GetLong(interp, argv[6], &l);
929 if (retval != JIM_OK) {
930 return retval;
932 value = l;
934 /* NOTE: parameters reordered! */
935 // ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2)
936 retval = arm->mcr(target, cpnum, op1, op2, CRn, CRm, value);
937 if (retval != ERROR_OK)
938 return JIM_ERR;
939 } else {
940 /* NOTE: parameters reordered! */
941 // ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2)
942 retval = arm->mrc(target, cpnum, op1, op2, CRn, CRm, &value);
943 if (retval != ERROR_OK)
944 return JIM_ERR;
946 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
949 return JIM_OK;
952 COMMAND_HANDLER(handle_arm_semihosting_command)
954 struct target *target = get_current_target(CMD_CTX);
956 if (target == NULL) {
957 LOG_ERROR("No target selected");
958 return ERROR_FAIL;
961 struct arm *arm = target_to_arm(target);
963 if (!is_arm(arm)) {
964 command_print(CMD_CTX, "current target isn't an ARM");
965 return ERROR_FAIL;
968 if (!arm->setup_semihosting)
970 command_print(CMD_CTX, "semihosting not supported for current target");
973 if (CMD_ARGC > 0)
975 int semihosting;
977 COMMAND_PARSE_ENABLE(CMD_ARGV[0], semihosting);
979 if (!target_was_examined(target))
981 LOG_ERROR("Target not examined yet");
982 return ERROR_FAIL;
985 if (arm->setup_semihosting(target, semihosting) != ERROR_OK) {
986 LOG_ERROR("Failed to Configure semihosting");
987 return ERROR_FAIL;
990 /* FIXME never let that "catch" be dropped! */
991 arm->is_semihosting = semihosting;
994 command_print(CMD_CTX, "semihosting is %s",
995 arm->is_semihosting
996 ? "enabled" : "disabled");
998 return ERROR_OK;
1001 static const struct command_registration arm_exec_command_handlers[] = {
1003 .name = "reg",
1004 .handler = handle_armv4_5_reg_command,
1005 .mode = COMMAND_EXEC,
1006 .help = "display ARM core registers",
1009 .name = "core_state",
1010 .handler = handle_armv4_5_core_state_command,
1011 .mode = COMMAND_EXEC,
1012 .usage = "['arm'|'thumb']",
1013 .help = "display/change ARM core state",
1016 .name = "disassemble",
1017 .handler = handle_arm_disassemble_command,
1018 .mode = COMMAND_EXEC,
1019 .usage = "address [count ['thumb']]",
1020 .help = "disassemble instructions ",
1023 .name = "mcr",
1024 .mode = COMMAND_EXEC,
1025 .jim_handler = &jim_mcrmrc,
1026 .help = "write coprocessor register",
1027 .usage = "cpnum op1 CRn op2 CRm value",
1030 .name = "mrc",
1031 .jim_handler = &jim_mcrmrc,
1032 .help = "read coprocessor register",
1033 .usage = "cpnum op1 CRn op2 CRm",
1036 "semihosting",
1037 .handler = handle_arm_semihosting_command,
1038 .mode = COMMAND_EXEC,
1039 .usage = "['enable'|'disable']",
1040 .help = "activate support for semihosting operations",
1043 COMMAND_REGISTRATION_DONE
1045 const struct command_registration arm_command_handlers[] = {
1047 .name = "arm",
1048 .mode = COMMAND_ANY,
1049 .help = "ARM command group",
1050 .chain = arm_exec_command_handlers,
1052 COMMAND_REGISTRATION_DONE
1055 int arm_get_gdb_reg_list(struct target *target,
1056 struct reg **reg_list[], int *reg_list_size)
1058 struct arm *armv4_5 = target_to_arm(target);
1059 int i;
1061 if (!is_arm_mode(armv4_5->core_mode))
1063 LOG_ERROR("not a valid arm core mode - communication failure?");
1064 return ERROR_FAIL;
1067 *reg_list_size = 26;
1068 *reg_list = malloc(sizeof(struct reg*) * (*reg_list_size));
1070 for (i = 0; i < 16; i++)
1071 (*reg_list)[i] = arm_reg_current(armv4_5, i);
1073 for (i = 16; i < 24; i++)
1074 (*reg_list)[i] = &arm_gdb_dummy_fp_reg;
1076 (*reg_list)[24] = &arm_gdb_dummy_fps_reg;
1077 (*reg_list)[25] = armv4_5->cpsr;
1079 return ERROR_OK;
1082 /* wait for execution to complete and check exit point */
1083 static int armv4_5_run_algorithm_completion(struct target *target, uint32_t exit_point, int timeout_ms, void *arch_info)
1085 int retval;
1086 struct arm *armv4_5 = target_to_arm(target);
1088 if ((retval = target_wait_state(target, TARGET_HALTED, timeout_ms)) != ERROR_OK)
1090 return retval;
1092 if (target->state != TARGET_HALTED)
1094 if ((retval = target_halt(target)) != ERROR_OK)
1095 return retval;
1096 if ((retval = target_wait_state(target, TARGET_HALTED, 500)) != ERROR_OK)
1098 return retval;
1100 return ERROR_TARGET_TIMEOUT;
1103 /* fast exit: ARMv5+ code can use BKPT */
1104 if (exit_point && buf_get_u32(armv4_5->pc->value, 0, 32) != exit_point)
1106 LOG_WARNING("target reentered debug state, but not at the desired exit point: 0x%4.4" PRIx32 "",
1107 buf_get_u32(armv4_5->pc->value, 0, 32));
1108 return ERROR_TARGET_TIMEOUT;
1111 return ERROR_OK;
1114 int armv4_5_run_algorithm_inner(struct target *target,
1115 int num_mem_params, struct mem_param *mem_params,
1116 int num_reg_params, struct reg_param *reg_params,
1117 uint32_t entry_point, uint32_t exit_point,
1118 int timeout_ms, void *arch_info,
1119 int (*run_it)(struct target *target, uint32_t exit_point,
1120 int timeout_ms, void *arch_info))
1122 struct arm *armv4_5 = target_to_arm(target);
1123 struct arm_algorithm *arm_algorithm_info = arch_info;
1124 enum arm_state core_state = armv4_5->core_state;
1125 uint32_t context[17];
1126 uint32_t cpsr;
1127 int exit_breakpoint_size = 0;
1128 int i;
1129 int retval = ERROR_OK;
1131 LOG_DEBUG("Running algorithm");
1133 if (arm_algorithm_info->common_magic != ARM_COMMON_MAGIC)
1135 LOG_ERROR("current target isn't an ARMV4/5 target");
1136 return ERROR_TARGET_INVALID;
1139 if (target->state != TARGET_HALTED)
1141 LOG_WARNING("target not halted");
1142 return ERROR_TARGET_NOT_HALTED;
1145 if (!is_arm_mode(armv4_5->core_mode))
1147 LOG_ERROR("not a valid arm core mode - communication failure?");
1148 return ERROR_FAIL;
1151 /* armv5 and later can terminate with BKPT instruction; less overhead */
1152 if (!exit_point && armv4_5->is_armv4)
1154 LOG_ERROR("ARMv4 target needs HW breakpoint location");
1155 return ERROR_FAIL;
1158 /* save r0..pc, cpsr-or-spsr, and then cpsr-for-sure;
1159 * they'll be restored later.
1161 for (i = 0; i <= 16; i++)
1163 struct reg *r;
1165 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1166 arm_algorithm_info->core_mode, i);
1167 if (!r->valid)
1168 armv4_5->read_core_reg(target, r, i,
1169 arm_algorithm_info->core_mode);
1170 context[i] = buf_get_u32(r->value, 0, 32);
1172 cpsr = buf_get_u32(armv4_5->cpsr->value, 0, 32);
1174 for (i = 0; i < num_mem_params; i++)
1176 if ((retval = target_write_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value)) != ERROR_OK)
1178 return retval;
1182 for (i = 0; i < num_reg_params; i++)
1184 struct reg *reg = register_get_by_name(armv4_5->core_cache, reg_params[i].reg_name, 0);
1185 if (!reg)
1187 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
1188 return ERROR_INVALID_ARGUMENTS;
1191 if (reg->size != reg_params[i].size)
1193 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
1194 return ERROR_INVALID_ARGUMENTS;
1197 if ((retval = armv4_5_set_core_reg(reg, reg_params[i].value)) != ERROR_OK)
1199 return retval;
1203 armv4_5->core_state = arm_algorithm_info->core_state;
1204 if (armv4_5->core_state == ARM_STATE_ARM)
1205 exit_breakpoint_size = 4;
1206 else if (armv4_5->core_state == ARM_STATE_THUMB)
1207 exit_breakpoint_size = 2;
1208 else
1210 LOG_ERROR("BUG: can't execute algorithms when not in ARM or Thumb state");
1211 return ERROR_INVALID_ARGUMENTS;
1214 if (arm_algorithm_info->core_mode != ARM_MODE_ANY)
1216 LOG_DEBUG("setting core_mode: 0x%2.2x",
1217 arm_algorithm_info->core_mode);
1218 buf_set_u32(armv4_5->cpsr->value, 0, 5,
1219 arm_algorithm_info->core_mode);
1220 armv4_5->cpsr->dirty = 1;
1221 armv4_5->cpsr->valid = 1;
1224 /* terminate using a hardware or (ARMv5+) software breakpoint */
1225 if (exit_point && (retval = breakpoint_add(target, exit_point,
1226 exit_breakpoint_size, BKPT_HARD)) != ERROR_OK)
1228 LOG_ERROR("can't add HW breakpoint to terminate algorithm");
1229 return ERROR_TARGET_FAILURE;
1232 if ((retval = target_resume(target, 0, entry_point, 1, 1)) != ERROR_OK)
1234 return retval;
1236 int retvaltemp;
1237 retval = run_it(target, exit_point, timeout_ms, arch_info);
1239 if (exit_point)
1240 breakpoint_remove(target, exit_point);
1242 if (retval != ERROR_OK)
1243 return retval;
1245 for (i = 0; i < num_mem_params; i++)
1247 if (mem_params[i].direction != PARAM_OUT)
1248 if ((retvaltemp = target_read_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value)) != ERROR_OK)
1250 retval = retvaltemp;
1254 for (i = 0; i < num_reg_params; i++)
1256 if (reg_params[i].direction != PARAM_OUT)
1259 struct reg *reg = register_get_by_name(armv4_5->core_cache, reg_params[i].reg_name, 0);
1260 if (!reg)
1262 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
1263 retval = ERROR_INVALID_ARGUMENTS;
1264 continue;
1267 if (reg->size != reg_params[i].size)
1269 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
1270 retval = ERROR_INVALID_ARGUMENTS;
1271 continue;
1274 buf_set_u32(reg_params[i].value, 0, 32, buf_get_u32(reg->value, 0, 32));
1278 /* restore everything we saved before (17 or 18 registers) */
1279 for (i = 0; i <= 16; i++)
1281 uint32_t regvalue;
1282 regvalue = buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).value, 0, 32);
1283 if (regvalue != context[i])
1285 LOG_DEBUG("restoring register %s with value 0x%8.8" PRIx32 "", ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).name, context[i]);
1286 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).value, 0, 32, context[i]);
1287 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).valid = 1;
1288 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).dirty = 1;
1292 arm_set_cpsr(armv4_5, cpsr);
1293 armv4_5->cpsr->dirty = 1;
1295 armv4_5->core_state = core_state;
1297 return retval;
1300 int armv4_5_run_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, uint32_t entry_point, uint32_t exit_point, int timeout_ms, void *arch_info)
1302 return armv4_5_run_algorithm_inner(target, num_mem_params, mem_params, num_reg_params, reg_params, entry_point, exit_point, timeout_ms, arch_info, armv4_5_run_algorithm_completion);
1306 * Runs ARM code in the target to calculate a CRC32 checksum.
1309 int arm_checksum_memory(struct target *target,
1310 uint32_t address, uint32_t count, uint32_t *checksum)
1312 struct working_area *crc_algorithm;
1313 struct arm_algorithm armv4_5_info;
1314 struct arm *armv4_5 = target_to_arm(target);
1315 struct reg_param reg_params[2];
1316 int retval;
1317 uint32_t i;
1318 uint32_t exit_var = 0;
1320 /* see contib/loaders/checksum/armv4_5_crc.s for src */
1322 static const uint32_t arm_crc_code[] = {
1323 0xE1A02000, /* mov r2, r0 */
1324 0xE3E00000, /* mov r0, #0xffffffff */
1325 0xE1A03001, /* mov r3, r1 */
1326 0xE3A04000, /* mov r4, #0 */
1327 0xEA00000B, /* b ncomp */
1328 /* nbyte: */
1329 0xE7D21004, /* ldrb r1, [r2, r4] */
1330 0xE59F7030, /* ldr r7, CRC32XOR */
1331 0xE0200C01, /* eor r0, r0, r1, asl 24 */
1332 0xE3A05000, /* mov r5, #0 */
1333 /* loop: */
1334 0xE3500000, /* cmp r0, #0 */
1335 0xE1A06080, /* mov r6, r0, asl #1 */
1336 0xE2855001, /* add r5, r5, #1 */
1337 0xE1A00006, /* mov r0, r6 */
1338 0xB0260007, /* eorlt r0, r6, r7 */
1339 0xE3550008, /* cmp r5, #8 */
1340 0x1AFFFFF8, /* bne loop */
1341 0xE2844001, /* add r4, r4, #1 */
1342 /* ncomp: */
1343 0xE1540003, /* cmp r4, r3 */
1344 0x1AFFFFF1, /* bne nbyte */
1345 /* end: */
1346 0xe1200070, /* bkpt #0 */
1347 /* CRC32XOR: */
1348 0x04C11DB7 /* .word 0x04C11DB7 */
1351 retval = target_alloc_working_area(target,
1352 sizeof(arm_crc_code), &crc_algorithm);
1353 if (retval != ERROR_OK)
1354 return retval;
1356 /* convert code into a buffer in target endianness */
1357 for (i = 0; i < ARRAY_SIZE(arm_crc_code); i++) {
1358 retval = target_write_u32(target,
1359 crc_algorithm->address + i * sizeof(uint32_t),
1360 arm_crc_code[i]);
1361 if (retval != ERROR_OK)
1362 return retval;
1365 armv4_5_info.common_magic = ARM_COMMON_MAGIC;
1366 armv4_5_info.core_mode = ARM_MODE_SVC;
1367 armv4_5_info.core_state = ARM_STATE_ARM;
1369 init_reg_param(&reg_params[0], "r0", 32, PARAM_IN_OUT);
1370 init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
1372 buf_set_u32(reg_params[0].value, 0, 32, address);
1373 buf_set_u32(reg_params[1].value, 0, 32, count);
1375 /* 20 second timeout/megabyte */
1376 int timeout = 20000 * (1 + (count / (1024 * 1024)));
1378 /* armv4 must exit using a hardware breakpoint */
1379 if (armv4_5->is_armv4)
1380 exit_var = crc_algorithm->address + sizeof(arm_crc_code) - 8;
1382 retval = target_run_algorithm(target, 0, NULL, 2, reg_params,
1383 crc_algorithm->address,
1384 exit_var,
1385 timeout, &armv4_5_info);
1386 if (retval != ERROR_OK) {
1387 LOG_ERROR("error executing ARM crc algorithm");
1388 destroy_reg_param(&reg_params[0]);
1389 destroy_reg_param(&reg_params[1]);
1390 target_free_working_area(target, crc_algorithm);
1391 return retval;
1394 *checksum = buf_get_u32(reg_params[0].value, 0, 32);
1396 destroy_reg_param(&reg_params[0]);
1397 destroy_reg_param(&reg_params[1]);
1399 target_free_working_area(target, crc_algorithm);
1401 return ERROR_OK;
1405 * Runs ARM code in the target to check whether a memory block holds
1406 * all ones. NOR flash which has been erased, and thus may be written,
1407 * holds all ones.
1410 int arm_blank_check_memory(struct target *target,
1411 uint32_t address, uint32_t count, uint32_t *blank)
1413 struct working_area *check_algorithm;
1414 struct reg_param reg_params[3];
1415 struct arm_algorithm armv4_5_info;
1416 struct arm *armv4_5 = target_to_arm(target);
1417 int retval;
1418 uint32_t i;
1419 uint32_t exit_var = 0;
1421 static const uint32_t check_code[] = {
1422 /* loop: */
1423 0xe4d03001, /* ldrb r3, [r0], #1 */
1424 0xe0022003, /* and r2, r2, r3 */
1425 0xe2511001, /* subs r1, r1, #1 */
1426 0x1afffffb, /* bne loop */
1427 /* end: */
1428 0xe1200070, /* bkpt #0 */
1431 /* make sure we have a working area */
1432 retval = target_alloc_working_area(target,
1433 sizeof(check_code), &check_algorithm);
1434 if (retval != ERROR_OK)
1435 return retval;
1437 /* convert code into a buffer in target endianness */
1438 for (i = 0; i < ARRAY_SIZE(check_code); i++) {
1439 retval = target_write_u32(target,
1440 check_algorithm->address
1441 + i * sizeof(uint32_t),
1442 check_code[i]);
1443 if (retval != ERROR_OK)
1444 return retval;
1447 armv4_5_info.common_magic = ARM_COMMON_MAGIC;
1448 armv4_5_info.core_mode = ARM_MODE_SVC;
1449 armv4_5_info.core_state = ARM_STATE_ARM;
1451 init_reg_param(&reg_params[0], "r0", 32, PARAM_OUT);
1452 buf_set_u32(reg_params[0].value, 0, 32, address);
1454 init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
1455 buf_set_u32(reg_params[1].value, 0, 32, count);
1457 init_reg_param(&reg_params[2], "r2", 32, PARAM_IN_OUT);
1458 buf_set_u32(reg_params[2].value, 0, 32, 0xff);
1460 /* armv4 must exit using a hardware breakpoint */
1461 if (armv4_5->is_armv4)
1462 exit_var = check_algorithm->address + sizeof(check_code) - 4;
1464 retval = target_run_algorithm(target, 0, NULL, 3, reg_params,
1465 check_algorithm->address,
1466 exit_var,
1467 10000, &armv4_5_info);
1468 if (retval != ERROR_OK) {
1469 destroy_reg_param(&reg_params[0]);
1470 destroy_reg_param(&reg_params[1]);
1471 destroy_reg_param(&reg_params[2]);
1472 target_free_working_area(target, check_algorithm);
1473 return retval;
1476 *blank = buf_get_u32(reg_params[2].value, 0, 32);
1478 destroy_reg_param(&reg_params[0]);
1479 destroy_reg_param(&reg_params[1]);
1480 destroy_reg_param(&reg_params[2]);
1482 target_free_working_area(target, check_algorithm);
1484 return ERROR_OK;
1487 static int arm_full_context(struct target *target)
1489 struct arm *armv4_5 = target_to_arm(target);
1490 unsigned num_regs = armv4_5->core_cache->num_regs;
1491 struct reg *reg = armv4_5->core_cache->reg_list;
1492 int retval = ERROR_OK;
1494 for (; num_regs && retval == ERROR_OK; num_regs--, reg++) {
1495 if (reg->valid)
1496 continue;
1497 retval = armv4_5_get_core_reg(reg);
1499 return retval;
1502 static int arm_default_mrc(struct target *target, int cpnum,
1503 uint32_t op1, uint32_t op2,
1504 uint32_t CRn, uint32_t CRm,
1505 uint32_t *value)
1507 LOG_ERROR("%s doesn't implement MRC", target_type_name(target));
1508 return ERROR_FAIL;
1511 static int arm_default_mcr(struct target *target, int cpnum,
1512 uint32_t op1, uint32_t op2,
1513 uint32_t CRn, uint32_t CRm,
1514 uint32_t value)
1516 LOG_ERROR("%s doesn't implement MCR", target_type_name(target));
1517 return ERROR_FAIL;
1520 int arm_init_arch_info(struct target *target, struct arm *armv4_5)
1522 target->arch_info = armv4_5;
1523 armv4_5->target = target;
1525 armv4_5->common_magic = ARM_COMMON_MAGIC;
1527 /* core_type may be overridden by subtype logic */
1528 if (armv4_5->core_type != ARM_MODE_THREAD) {
1529 armv4_5->core_type = ARM_MODE_ANY;
1530 arm_set_cpsr(armv4_5, ARM_MODE_USR);
1533 /* default full_context() has no core-specific optimizations */
1534 if (!armv4_5->full_context && armv4_5->read_core_reg)
1535 armv4_5->full_context = arm_full_context;
1537 if (!armv4_5->mrc)
1538 armv4_5->mrc = arm_default_mrc;
1539 if (!armv4_5->mcr)
1540 armv4_5->mcr = arm_default_mcr;
1542 return ERROR_OK;