1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
5 * Copyright (C) 2008 by Spencer Oliver *
6 * spen@spen-soft.co.uk *
8 * Copyright (C) 2008 by Oyvind Harboe *
9 * oyvind.harboe@zylin.com *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
33 #include "breakpoints.h"
34 #include "arm_disassembler.h"
35 #include <helper/binarybuffer.h>
36 #include "algorithm.h"
40 /* offsets into armv4_5 core register cache */
43 ARMV4_5_SPSR_FIQ
= 32,
44 ARMV4_5_SPSR_IRQ
= 33,
45 ARMV4_5_SPSR_SVC
= 34,
46 ARMV4_5_SPSR_ABT
= 35,
47 ARMV4_5_SPSR_UND
= 36,
51 static const uint8_t arm_usr_indices
[17] = {
52 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, ARMV4_5_CPSR
,
55 static const uint8_t arm_fiq_indices
[8] = {
56 16, 17, 18, 19, 20, 21, 22, ARMV4_5_SPSR_FIQ
,
59 static const uint8_t arm_irq_indices
[3] = {
60 23, 24, ARMV4_5_SPSR_IRQ
,
63 static const uint8_t arm_svc_indices
[3] = {
64 25, 26, ARMV4_5_SPSR_SVC
,
67 static const uint8_t arm_abt_indices
[3] = {
68 27, 28, ARMV4_5_SPSR_ABT
,
71 static const uint8_t arm_und_indices
[3] = {
72 29, 30, ARMV4_5_SPSR_UND
,
75 static const uint8_t arm_mon_indices
[3] = {
82 /* For user and system modes, these list indices for all registers.
83 * otherwise they're just indices for the shadow registers and SPSR.
85 unsigned short n_indices
;
86 const uint8_t *indices
;
88 /* Seven modes are standard from ARM7 on. "System" and "User" share
89 * the same registers; other modes shadow from 3 to 8 registers.
94 .n_indices
= ARRAY_SIZE(arm_usr_indices
),
95 .indices
= arm_usr_indices
,
100 .n_indices
= ARRAY_SIZE(arm_fiq_indices
),
101 .indices
= arm_fiq_indices
,
104 .name
= "Supervisor",
106 .n_indices
= ARRAY_SIZE(arm_svc_indices
),
107 .indices
= arm_svc_indices
,
112 .n_indices
= ARRAY_SIZE(arm_abt_indices
),
113 .indices
= arm_abt_indices
,
118 .n_indices
= ARRAY_SIZE(arm_irq_indices
),
119 .indices
= arm_irq_indices
,
122 .name
= "Undefined instruction",
124 .n_indices
= ARRAY_SIZE(arm_und_indices
),
125 .indices
= arm_und_indices
,
130 .n_indices
= ARRAY_SIZE(arm_usr_indices
),
131 .indices
= arm_usr_indices
,
133 /* TrustZone "Security Extensions" add a secure monitor mode.
134 * This is distinct from a "debug monitor" which can support
135 * non-halting debug, in conjunction with some debuggers.
138 .name
= "Secure Monitor",
140 .n_indices
= ARRAY_SIZE(arm_mon_indices
),
141 .indices
= arm_mon_indices
,
145 /** Map PSR mode bits to the name of an ARM processor operating mode. */
146 const char *arm_mode_name(unsigned psr_mode
)
148 for (unsigned i
= 0; i
< ARRAY_SIZE(arm_mode_data
); i
++) {
149 if (arm_mode_data
[i
].psr
== psr_mode
)
150 return arm_mode_data
[i
].name
;
152 LOG_ERROR("unrecognized psr mode: %#02x", psr_mode
);
153 return "UNRECOGNIZED";
156 /** Return true iff the parameter denotes a valid ARM processor mode. */
157 bool is_arm_mode(unsigned psr_mode
)
159 for (unsigned i
= 0; i
< ARRAY_SIZE(arm_mode_data
); i
++) {
160 if (arm_mode_data
[i
].psr
== psr_mode
)
166 /** Map PSR mode bits to linear number indexing armv4_5_core_reg_map */
167 int arm_mode_to_number(enum arm_mode mode
)
171 /* map MODE_ANY to user mode */
189 LOG_ERROR("invalid mode value encountered %d", mode
);
194 /** Map linear number indexing armv4_5_core_reg_map to PSR mode bits. */
195 enum arm_mode
armv4_5_number_to_mode(int number
)
215 LOG_ERROR("mode index out of bounds %d", number
);
220 static const char *arm_state_strings
[] =
222 "ARM", "Thumb", "Jazelle", "ThumbEE",
225 /* Templates for ARM core registers.
227 * NOTE: offsets in this table are coupled to the arm_mode_data
228 * table above, the armv4_5_core_reg_map array below, and also to
229 * the ARMV4_5_CPSR symbol (which should vanish after ARM11 updates).
231 static const struct {
232 /* The name is used for e.g. the "regs" command. */
235 /* The {cookie, mode} tuple uniquely identifies one register.
236 * In a given mode, cookies 0..15 map to registers R0..R15,
237 * with R13..R15 usually called SP, LR, PC.
239 * MODE_ANY is used as *input* to the mapping, and indicates
240 * various special cases (sigh) and errors.
242 * Cookie 16 is (currently) confusing, since it indicates
243 * CPSR -or- SPSR depending on whether 'mode' is MODE_ANY.
244 * (Exception modes have both CPSR and SPSR registers ...)
248 } arm_core_regs
[] = {
249 /* IMPORTANT: we guarantee that the first eight cached registers
250 * correspond to r0..r7, and the fifteenth to PC, so that callers
251 * don't need to map them.
253 { .name
= "r0", .cookie
= 0, .mode
= ARM_MODE_ANY
, },
254 { .name
= "r1", .cookie
= 1, .mode
= ARM_MODE_ANY
, },
255 { .name
= "r2", .cookie
= 2, .mode
= ARM_MODE_ANY
, },
256 { .name
= "r3", .cookie
= 3, .mode
= ARM_MODE_ANY
, },
257 { .name
= "r4", .cookie
= 4, .mode
= ARM_MODE_ANY
, },
258 { .name
= "r5", .cookie
= 5, .mode
= ARM_MODE_ANY
, },
259 { .name
= "r6", .cookie
= 6, .mode
= ARM_MODE_ANY
, },
260 { .name
= "r7", .cookie
= 7, .mode
= ARM_MODE_ANY
, },
262 /* NOTE: regs 8..12 might be shadowed by FIQ ... flagging
263 * them as MODE_ANY creates special cases. (ANY means
264 * "not mapped" elsewhere; here it's "everything but FIQ".)
266 { .name
= "r8", .cookie
= 8, .mode
= ARM_MODE_ANY
, },
267 { .name
= "r9", .cookie
= 9, .mode
= ARM_MODE_ANY
, },
268 { .name
= "r10", .cookie
= 10, .mode
= ARM_MODE_ANY
, },
269 { .name
= "r11", .cookie
= 11, .mode
= ARM_MODE_ANY
, },
270 { .name
= "r12", .cookie
= 12, .mode
= ARM_MODE_ANY
, },
272 /* NOTE all MODE_USR registers are equivalent to MODE_SYS ones */
273 { .name
= "sp_usr", .cookie
= 13, .mode
= ARM_MODE_USR
, },
274 { .name
= "lr_usr", .cookie
= 14, .mode
= ARM_MODE_USR
, },
276 /* guaranteed to be at index 15 */
277 { .name
= "pc", .cookie
= 15, .mode
= ARM_MODE_ANY
, },
279 { .name
= "r8_fiq", .cookie
= 8, .mode
= ARM_MODE_FIQ
, },
280 { .name
= "r9_fiq", .cookie
= 9, .mode
= ARM_MODE_FIQ
, },
281 { .name
= "r10_fiq", .cookie
= 10, .mode
= ARM_MODE_FIQ
, },
282 { .name
= "r11_fiq", .cookie
= 11, .mode
= ARM_MODE_FIQ
, },
283 { .name
= "r12_fiq", .cookie
= 12, .mode
= ARM_MODE_FIQ
, },
285 { .name
= "sp_fiq", .cookie
= 13, .mode
= ARM_MODE_FIQ
, },
286 { .name
= "lr_fiq", .cookie
= 14, .mode
= ARM_MODE_FIQ
, },
288 { .name
= "sp_irq", .cookie
= 13, .mode
= ARM_MODE_IRQ
, },
289 { .name
= "lr_irq", .cookie
= 14, .mode
= ARM_MODE_IRQ
, },
291 { .name
= "sp_svc", .cookie
= 13, .mode
= ARM_MODE_SVC
, },
292 { .name
= "lr_svc", .cookie
= 14, .mode
= ARM_MODE_SVC
, },
294 { .name
= "sp_abt", .cookie
= 13, .mode
= ARM_MODE_ABT
, },
295 { .name
= "lr_abt", .cookie
= 14, .mode
= ARM_MODE_ABT
, },
297 { .name
= "sp_und", .cookie
= 13, .mode
= ARM_MODE_UND
, },
298 { .name
= "lr_und", .cookie
= 14, .mode
= ARM_MODE_UND
, },
300 { .name
= "cpsr", .cookie
= 16, .mode
= ARM_MODE_ANY
, },
301 { .name
= "spsr_fiq", .cookie
= 16, .mode
= ARM_MODE_FIQ
, },
302 { .name
= "spsr_irq", .cookie
= 16, .mode
= ARM_MODE_IRQ
, },
303 { .name
= "spsr_svc", .cookie
= 16, .mode
= ARM_MODE_SVC
, },
304 { .name
= "spsr_abt", .cookie
= 16, .mode
= ARM_MODE_ABT
, },
305 { .name
= "spsr_und", .cookie
= 16, .mode
= ARM_MODE_UND
, },
307 { .name
= "sp_mon", .cookie
= 13, .mode
= ARM_MODE_MON
, },
308 { .name
= "lr_mon", .cookie
= 14, .mode
= ARM_MODE_MON
, },
309 { .name
= "spsr_mon", .cookie
= 16, .mode
= ARM_MODE_MON
, },
312 /* map core mode (USR, FIQ, ...) and register number to
313 * indices into the register cache
315 const int armv4_5_core_reg_map
[8][17] =
318 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
320 { /* FIQ (8 shadows of USR, vs normal 3) */
321 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 15, 32
324 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 23, 24, 15, 33
327 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 25, 26, 15, 34
330 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 27, 28, 15, 35
333 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 29, 30, 15, 36
335 { /* SYS (same registers as USR) */
336 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
339 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 37, 38, 15, 39,
344 * Configures host-side ARM records to reflect the specified CPSR.
345 * Later, code can use arm_reg_current() to map register numbers
346 * according to how they are exposed by this mode.
348 void arm_set_cpsr(struct arm
*arm
, uint32_t cpsr
)
350 enum arm_mode mode
= cpsr
& 0x1f;
353 /* NOTE: this may be called very early, before the register
354 * cache is set up. We can't defend against many errors, in
355 * particular against CPSRs that aren't valid *here* ...
358 buf_set_u32(arm
->cpsr
->value
, 0, 32, cpsr
);
359 arm
->cpsr
->valid
= 1;
360 arm
->cpsr
->dirty
= 0;
363 arm
->core_mode
= mode
;
365 /* mode_to_number() warned; set up a somewhat-sane mapping */
366 num
= arm_mode_to_number(mode
);
372 arm
->map
= &armv4_5_core_reg_map
[num
][0];
373 arm
->spsr
= (mode
== ARM_MODE_USR
|| mode
== ARM_MODE_SYS
)
375 : arm
->core_cache
->reg_list
+ arm
->map
[16];
377 /* Older ARMs won't have the J bit */
378 enum arm_state state
;
380 if (cpsr
& (1 << 5)) { /* T */
381 if (cpsr
& (1 << 24)) { /* J */
382 LOG_WARNING("ThumbEE -- incomplete support");
383 state
= ARM_STATE_THUMB_EE
;
385 state
= ARM_STATE_THUMB
;
387 if (cpsr
& (1 << 24)) { /* J */
388 LOG_ERROR("Jazelle state handling is BROKEN!");
389 state
= ARM_STATE_JAZELLE
;
391 state
= ARM_STATE_ARM
;
393 arm
->core_state
= state
;
395 LOG_DEBUG("set CPSR %#8.8x: %s mode, %s state", (unsigned) cpsr
,
397 arm_state_strings
[arm
->core_state
]);
401 * Returns handle to the register currently mapped to a given number.
402 * Someone must have called arm_set_cpsr() before.
404 * \param arm This core's state and registers are used.
405 * \param regnum From 0..15 corresponding to R0..R14 and PC.
406 * Note that R0..R7 don't require mapping; you may access those
407 * as the first eight entries in the register cache. Likewise
408 * R15 (PC) doesn't need mapping; you may also access it directly.
409 * However, R8..R14, and SPSR (arm->spsr) *must* be mapped.
410 * CPSR (arm->cpsr) is also not mapped.
412 struct reg
*arm_reg_current(struct arm
*arm
, unsigned regnum
)
419 r
= arm
->core_cache
->reg_list
+ arm
->map
[regnum
];
421 /* e.g. invalid CPSR said "secure monitor" mode on a core
422 * that doesn't support it...
425 LOG_ERROR("Invalid CPSR mode");
426 r
= arm
->core_cache
->reg_list
+ regnum
;
432 static const uint8_t arm_gdb_dummy_fp_value
[12];
435 * Dummy FPA registers are required to support GDB on ARM.
436 * Register packets require eight obsolete FPA register values.
437 * Modern ARM cores use Vector Floating Point (VFP), if they
438 * have any floating point support. VFP is not FPA-compatible.
440 struct reg arm_gdb_dummy_fp_reg
=
442 .name
= "GDB dummy FPA register",
443 .value
= (uint8_t *) arm_gdb_dummy_fp_value
,
448 static const uint8_t arm_gdb_dummy_fps_value
[4];
451 * Dummy FPA status registers are required to support GDB on ARM.
452 * Register packets require an obsolete FPA status register.
454 struct reg arm_gdb_dummy_fps_reg
=
456 .name
= "GDB dummy FPA status register",
457 .value
= (uint8_t *) arm_gdb_dummy_fps_value
,
462 static void arm_gdb_dummy_init(void) __attribute__ ((constructor
));
464 static void arm_gdb_dummy_init(void)
466 register_init_dummy(&arm_gdb_dummy_fp_reg
);
467 register_init_dummy(&arm_gdb_dummy_fps_reg
);
470 static int armv4_5_get_core_reg(struct reg
*reg
)
473 struct arm_reg
*armv4_5
= reg
->arch_info
;
474 struct target
*target
= armv4_5
->target
;
476 if (target
->state
!= TARGET_HALTED
)
478 LOG_ERROR("Target not halted");
479 return ERROR_TARGET_NOT_HALTED
;
482 retval
= armv4_5
->armv4_5_common
->read_core_reg(target
, reg
, armv4_5
->num
, armv4_5
->mode
);
483 if (retval
== ERROR_OK
) {
491 static int armv4_5_set_core_reg(struct reg
*reg
, uint8_t *buf
)
493 struct arm_reg
*armv4_5
= reg
->arch_info
;
494 struct target
*target
= armv4_5
->target
;
495 struct arm
*armv4_5_target
= target_to_arm(target
);
496 uint32_t value
= buf_get_u32(buf
, 0, 32);
498 if (target
->state
!= TARGET_HALTED
)
500 LOG_ERROR("Target not halted");
501 return ERROR_TARGET_NOT_HALTED
;
504 /* Except for CPSR, the "reg" command exposes a writeback model
505 * for the register cache.
507 if (reg
== armv4_5_target
->cpsr
) {
508 arm_set_cpsr(armv4_5_target
, value
);
510 /* Older cores need help to be in ARM mode during halt
511 * mode debug, so we clear the J and T bits if we flush.
512 * For newer cores (v6/v7a/v7r) we don't need that, but
513 * it won't hurt since CPSR is always flushed anyway.
515 if (armv4_5_target
->core_mode
!=
516 (enum arm_mode
)(value
& 0x1f)) {
517 LOG_DEBUG("changing ARM core mode to '%s'",
518 arm_mode_name(value
& 0x1f));
519 value
&= ~((1 << 24) | (1 << 5));
520 armv4_5_target
->write_core_reg(target
, reg
,
521 16, ARM_MODE_ANY
, value
);
524 buf_set_u32(reg
->value
, 0, 32, value
);
532 static const struct reg_arch_type arm_reg_type
= {
533 .get
= armv4_5_get_core_reg
,
534 .set
= armv4_5_set_core_reg
,
537 struct reg_cache
*arm_build_reg_cache(struct target
*target
, struct arm
*arm
)
539 int num_regs
= ARRAY_SIZE(arm_core_regs
);
540 struct reg_cache
*cache
= malloc(sizeof(struct reg_cache
));
541 struct reg
*reg_list
= calloc(num_regs
, sizeof(struct reg
));
542 struct arm_reg
*arch_info
= calloc(num_regs
, sizeof(struct arm_reg
));
545 if (!cache
|| !reg_list
|| !arch_info
) {
552 cache
->name
= "ARM registers";
554 cache
->reg_list
= reg_list
;
557 for (i
= 0; i
< num_regs
; i
++)
559 /* Skip registers this core doesn't expose */
560 if (arm_core_regs
[i
].mode
== ARM_MODE_MON
561 && arm
->core_type
!= ARM_MODE_MON
)
564 /* REVISIT handle Cortex-M, which only shadows R13/SP */
566 arch_info
[i
].num
= arm_core_regs
[i
].cookie
;
567 arch_info
[i
].mode
= arm_core_regs
[i
].mode
;
568 arch_info
[i
].target
= target
;
569 arch_info
[i
].armv4_5_common
= arm
;
571 reg_list
[i
].name
= (char *) arm_core_regs
[i
].name
;
572 reg_list
[i
].size
= 32;
573 reg_list
[i
].value
= &arch_info
[i
].value
;
574 reg_list
[i
].type
= &arm_reg_type
;
575 reg_list
[i
].arch_info
= &arch_info
[i
];
580 arm
->pc
= reg_list
+ 15;
581 arm
->cpsr
= reg_list
+ ARMV4_5_CPSR
;
582 arm
->core_cache
= cache
;
586 int arm_arch_state(struct target
*target
)
588 struct arm
*armv4_5
= target_to_arm(target
);
590 if (armv4_5
->common_magic
!= ARM_COMMON_MAGIC
)
592 LOG_ERROR("BUG: called for a non-ARM target");
596 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
597 "cpsr: 0x%8.8" PRIx32
" pc: 0x%8.8" PRIx32
"%s",
598 arm_state_strings
[armv4_5
->core_state
],
599 debug_reason_name(target
),
600 arm_mode_name(armv4_5
->core_mode
),
601 buf_get_u32(armv4_5
->cpsr
->value
, 0, 32),
602 buf_get_u32(armv4_5
->pc
->value
, 0, 32),
603 armv4_5
->is_semihosting
? ", semihosting" : "");
608 #define ARMV4_5_CORE_REG_MODENUM(cache, mode, num) \
609 cache->reg_list[armv4_5_core_reg_map[mode][num]]
611 COMMAND_HANDLER(handle_armv4_5_reg_command
)
613 struct target
*target
= get_current_target(CMD_CTX
);
614 struct arm
*armv4_5
= target_to_arm(target
);
618 if (!is_arm(armv4_5
))
620 command_print(CMD_CTX
, "current target isn't an ARM");
624 if (target
->state
!= TARGET_HALTED
)
626 command_print(CMD_CTX
, "error: target must be halted for register accesses");
630 if (armv4_5
->core_type
!= ARM_MODE_ANY
)
632 command_print(CMD_CTX
, "Microcontroller Profile not supported - use standard reg cmd");
636 if (!is_arm_mode(armv4_5
->core_mode
))
639 if (!armv4_5
->full_context
) {
640 command_print(CMD_CTX
, "error: target doesn't support %s",
645 num_regs
= armv4_5
->core_cache
->num_regs
;
646 regs
= armv4_5
->core_cache
->reg_list
;
648 for (unsigned mode
= 0; mode
< ARRAY_SIZE(arm_mode_data
); mode
++) {
653 /* label this bank of registers (or shadows) */
654 switch (arm_mode_data
[mode
].psr
) {
658 name
= "System and User";
662 if (armv4_5
->core_type
!= ARM_MODE_MON
)
666 name
= arm_mode_data
[mode
].name
;
670 command_print(CMD_CTX
, "%s%s mode %sregisters",
673 /* display N rows of up to 4 registers each */
674 for (unsigned i
= 0; i
< arm_mode_data
[mode
].n_indices
;) {
678 for (unsigned j
= 0; j
< 4; j
++, i
++) {
680 struct reg
*reg
= regs
;
682 if (i
>= arm_mode_data
[mode
].n_indices
)
685 reg
+= arm_mode_data
[mode
].indices
[i
];
687 /* REVISIT be smarter about faults... */
689 armv4_5
->full_context(target
);
691 value
= buf_get_u32(reg
->value
, 0, 32);
692 output_len
+= snprintf(output
+ output_len
,
693 sizeof(output
) - output_len
,
694 "%8s: %8.8" PRIx32
" ",
697 command_print(CMD_CTX
, "%s", output
);
704 COMMAND_HANDLER(handle_armv4_5_core_state_command
)
706 struct target
*target
= get_current_target(CMD_CTX
);
707 struct arm
*armv4_5
= target_to_arm(target
);
709 if (!is_arm(armv4_5
))
711 command_print(CMD_CTX
, "current target isn't an ARM");
715 if (armv4_5
->core_type
== ARM_MODE_THREAD
)
717 /* armv7m not supported */
718 command_print(CMD_CTX
, "Unsupported Command");
724 if (strcmp(CMD_ARGV
[0], "arm") == 0)
726 armv4_5
->core_state
= ARM_STATE_ARM
;
728 if (strcmp(CMD_ARGV
[0], "thumb") == 0)
730 armv4_5
->core_state
= ARM_STATE_THUMB
;
734 command_print(CMD_CTX
, "core state: %s", arm_state_strings
[armv4_5
->core_state
]);
739 COMMAND_HANDLER(handle_arm_disassemble_command
)
741 int retval
= ERROR_OK
;
742 struct target
*target
= get_current_target(CMD_CTX
);
743 struct arm
*arm
= target
? target_to_arm(target
) : NULL
;
749 command_print(CMD_CTX
, "current target isn't an ARM");
753 if (arm
->core_type
== ARM_MODE_THREAD
)
755 /* armv7m is always thumb mode */
761 if (strcmp(CMD_ARGV
[2], "thumb") != 0)
766 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[1], count
);
769 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[0], address
);
770 if (address
& 0x01) {
772 command_print(CMD_CTX
, "Disassemble as Thumb");
780 command_print(CMD_CTX
,
781 "usage: arm disassemble <address> [<count> ['thumb']]");
786 while (count
-- > 0) {
787 struct arm_instruction cur_instruction
;
790 /* Always use Thumb2 disassembly for best handling
791 * of 32-bit BL/BLX, and to work with newer cores
792 * (some ARMv6, all ARMv7) that use Thumb2.
794 retval
= thumb2_opcode(target
, address
,
796 if (retval
!= ERROR_OK
)
801 retval
= target_read_u32(target
, address
, &opcode
);
802 if (retval
!= ERROR_OK
)
804 retval
= arm_evaluate_opcode(opcode
, address
,
805 &cur_instruction
) != ERROR_OK
;
806 if (retval
!= ERROR_OK
)
809 command_print(CMD_CTX
, "%s", cur_instruction
.text
);
810 address
+= cur_instruction
.instruction_size
;
816 static int jim_mcrmrc(Jim_Interp
*interp
, int argc
, Jim_Obj
*const *argv
)
818 struct command_context
*context
;
819 struct target
*target
;
823 context
= Jim_GetAssocData(interp
, "context");
824 if (context
== NULL
) {
825 LOG_ERROR("%s: no command context", __func__
);
828 target
= get_current_target(context
);
829 if (target
== NULL
) {
830 LOG_ERROR("%s: no current target", __func__
);
833 if (!target_was_examined(target
)) {
834 LOG_ERROR("%s: not yet examined", target_name(target
));
837 arm
= target_to_arm(target
);
839 LOG_ERROR("%s: not an ARM", target_name(target
));
843 if ((argc
< 6) || (argc
> 7)) {
844 /* FIXME use the command name to verify # params... */
845 LOG_ERROR("%s: wrong number of arguments", __func__
);
857 /* NOTE: parameter sequence matches ARM instruction set usage:
858 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
859 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
860 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
862 retval
= Jim_GetLong(interp
, argv
[1], &l
);
863 if (retval
!= JIM_OK
)
866 LOG_ERROR("%s: %s %d out of range", __func__
,
867 "coprocessor", (int) l
);
872 retval
= Jim_GetLong(interp
, argv
[2], &l
);
873 if (retval
!= JIM_OK
)
876 LOG_ERROR("%s: %s %d out of range", __func__
,
882 retval
= Jim_GetLong(interp
, argv
[3], &l
);
883 if (retval
!= JIM_OK
)
886 LOG_ERROR("%s: %s %d out of range", __func__
,
892 retval
= Jim_GetLong(interp
, argv
[4], &l
);
893 if (retval
!= JIM_OK
)
896 LOG_ERROR("%s: %s %d out of range", __func__
,
902 retval
= Jim_GetLong(interp
, argv
[5], &l
);
903 if (retval
!= JIM_OK
)
906 LOG_ERROR("%s: %s %d out of range", __func__
,
914 /* FIXME don't assume "mrc" vs "mcr" from the number of params;
915 * that could easily be a typo! Check both...
917 * FIXME change the call syntax here ... simplest to just pass
918 * the MRC() or MCR() instruction to be executed. That will also
919 * let us support the "mrc2" and "mcr2" opcodes (toggling one bit)
920 * if that's ever needed.
923 retval
= Jim_GetLong(interp
, argv
[6], &l
);
924 if (retval
!= JIM_OK
) {
929 /* NOTE: parameters reordered! */
930 // ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2)
931 retval
= arm
->mcr(target
, cpnum
, op1
, op2
, CRn
, CRm
, value
);
932 if (retval
!= ERROR_OK
)
935 /* NOTE: parameters reordered! */
936 // ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2)
937 retval
= arm
->mrc(target
, cpnum
, op1
, op2
, CRn
, CRm
, &value
);
938 if (retval
!= ERROR_OK
)
941 Jim_SetResult(interp
, Jim_NewIntObj(interp
, value
));
947 COMMAND_HANDLER(handle_arm_semihosting_command
)
949 struct target
*target
= get_current_target(CMD_CTX
);
950 struct arm
*arm
= target
? target_to_arm(target
) : NULL
;
953 command_print(CMD_CTX
, "current target isn't an ARM");
957 if (!arm
->setup_semihosting
)
959 command_print(CMD_CTX
, "semihosting not supported for current target");
966 COMMAND_PARSE_ENABLE(CMD_ARGV
[0], semihosting
);
968 if (!target_was_examined(target
))
970 LOG_ERROR("Target not examined yet");
974 if (arm
->setup_semihosting(target
, semihosting
) != ERROR_OK
) {
975 LOG_ERROR("Failed to Configure semihosting");
979 /* FIXME never let that "catch" be dropped! */
980 arm
->is_semihosting
= semihosting
;
983 command_print(CMD_CTX
, "semihosting is %s",
985 ? "enabled" : "disabled");
990 static const struct command_registration arm_exec_command_handlers
[] = {
993 .handler
= handle_armv4_5_reg_command
,
994 .mode
= COMMAND_EXEC
,
995 .help
= "display ARM core registers",
998 .name
= "core_state",
999 .handler
= handle_armv4_5_core_state_command
,
1000 .mode
= COMMAND_EXEC
,
1001 .usage
= "['arm'|'thumb']",
1002 .help
= "display/change ARM core state",
1005 .name
= "disassemble",
1006 .handler
= handle_arm_disassemble_command
,
1007 .mode
= COMMAND_EXEC
,
1008 .usage
= "address [count ['thumb']]",
1009 .help
= "disassemble instructions ",
1013 .mode
= COMMAND_EXEC
,
1014 .jim_handler
= &jim_mcrmrc
,
1015 .help
= "write coprocessor register",
1016 .usage
= "cpnum op1 CRn op2 CRm value",
1020 .jim_handler
= &jim_mcrmrc
,
1021 .help
= "read coprocessor register",
1022 .usage
= "cpnum op1 CRn op2 CRm",
1026 .handler
= handle_arm_semihosting_command
,
1027 .mode
= COMMAND_EXEC
,
1028 .usage
= "['enable'|'disable']",
1029 .help
= "activate support for semihosting operations",
1032 COMMAND_REGISTRATION_DONE
1034 const struct command_registration arm_command_handlers
[] = {
1037 .mode
= COMMAND_ANY
,
1038 .help
= "ARM command group",
1039 .chain
= arm_exec_command_handlers
,
1041 COMMAND_REGISTRATION_DONE
1044 int arm_get_gdb_reg_list(struct target
*target
,
1045 struct reg
**reg_list
[], int *reg_list_size
)
1047 struct arm
*armv4_5
= target_to_arm(target
);
1050 if (!is_arm_mode(armv4_5
->core_mode
))
1053 *reg_list_size
= 26;
1054 *reg_list
= malloc(sizeof(struct reg
*) * (*reg_list_size
));
1056 for (i
= 0; i
< 16; i
++)
1057 (*reg_list
)[i
] = arm_reg_current(armv4_5
, i
);
1059 for (i
= 16; i
< 24; i
++)
1060 (*reg_list
)[i
] = &arm_gdb_dummy_fp_reg
;
1062 (*reg_list
)[24] = &arm_gdb_dummy_fps_reg
;
1063 (*reg_list
)[25] = armv4_5
->cpsr
;
1068 /* wait for execution to complete and check exit point */
1069 static int armv4_5_run_algorithm_completion(struct target
*target
, uint32_t exit_point
, int timeout_ms
, void *arch_info
)
1072 struct arm
*armv4_5
= target_to_arm(target
);
1074 if ((retval
= target_wait_state(target
, TARGET_HALTED
, timeout_ms
)) != ERROR_OK
)
1078 if (target
->state
!= TARGET_HALTED
)
1080 if ((retval
= target_halt(target
)) != ERROR_OK
)
1082 if ((retval
= target_wait_state(target
, TARGET_HALTED
, 500)) != ERROR_OK
)
1086 return ERROR_TARGET_TIMEOUT
;
1089 /* fast exit: ARMv5+ code can use BKPT */
1090 if (exit_point
&& buf_get_u32(armv4_5
->pc
->value
, 0, 32) != exit_point
)
1092 LOG_WARNING("target reentered debug state, but not at the desired exit point: 0x%4.4" PRIx32
"",
1093 buf_get_u32(armv4_5
->pc
->value
, 0, 32));
1094 return ERROR_TARGET_TIMEOUT
;
1100 int armv4_5_run_algorithm_inner(struct target
*target
,
1101 int num_mem_params
, struct mem_param
*mem_params
,
1102 int num_reg_params
, struct reg_param
*reg_params
,
1103 uint32_t entry_point
, uint32_t exit_point
,
1104 int timeout_ms
, void *arch_info
,
1105 int (*run_it
)(struct target
*target
, uint32_t exit_point
,
1106 int timeout_ms
, void *arch_info
))
1108 struct arm
*armv4_5
= target_to_arm(target
);
1109 struct arm_algorithm
*arm_algorithm_info
= arch_info
;
1110 enum arm_state core_state
= armv4_5
->core_state
;
1111 uint32_t context
[17];
1113 int exit_breakpoint_size
= 0;
1115 int retval
= ERROR_OK
;
1117 LOG_DEBUG("Running algorithm");
1119 if (arm_algorithm_info
->common_magic
!= ARM_COMMON_MAGIC
)
1121 LOG_ERROR("current target isn't an ARMV4/5 target");
1122 return ERROR_TARGET_INVALID
;
1125 if (target
->state
!= TARGET_HALTED
)
1127 LOG_WARNING("target not halted");
1128 return ERROR_TARGET_NOT_HALTED
;
1131 if (!is_arm_mode(armv4_5
->core_mode
))
1134 /* armv5 and later can terminate with BKPT instruction; less overhead */
1135 if (!exit_point
&& armv4_5
->is_armv4
)
1137 LOG_ERROR("ARMv4 target needs HW breakpoint location");
1141 /* save r0..pc, cpsr-or-spsr, and then cpsr-for-sure;
1142 * they'll be restored later.
1144 for (i
= 0; i
<= 16; i
++)
1148 r
= &ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
,
1149 arm_algorithm_info
->core_mode
, i
);
1151 armv4_5
->read_core_reg(target
, r
, i
,
1152 arm_algorithm_info
->core_mode
);
1153 context
[i
] = buf_get_u32(r
->value
, 0, 32);
1155 cpsr
= buf_get_u32(armv4_5
->cpsr
->value
, 0, 32);
1157 for (i
= 0; i
< num_mem_params
; i
++)
1159 if ((retval
= target_write_buffer(target
, mem_params
[i
].address
, mem_params
[i
].size
, mem_params
[i
].value
)) != ERROR_OK
)
1165 for (i
= 0; i
< num_reg_params
; i
++)
1167 struct reg
*reg
= register_get_by_name(armv4_5
->core_cache
, reg_params
[i
].reg_name
, 0);
1170 LOG_ERROR("BUG: register '%s' not found", reg_params
[i
].reg_name
);
1171 return ERROR_INVALID_ARGUMENTS
;
1174 if (reg
->size
!= reg_params
[i
].size
)
1176 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params
[i
].reg_name
);
1177 return ERROR_INVALID_ARGUMENTS
;
1180 if ((retval
= armv4_5_set_core_reg(reg
, reg_params
[i
].value
)) != ERROR_OK
)
1186 armv4_5
->core_state
= arm_algorithm_info
->core_state
;
1187 if (armv4_5
->core_state
== ARM_STATE_ARM
)
1188 exit_breakpoint_size
= 4;
1189 else if (armv4_5
->core_state
== ARM_STATE_THUMB
)
1190 exit_breakpoint_size
= 2;
1193 LOG_ERROR("BUG: can't execute algorithms when not in ARM or Thumb state");
1194 return ERROR_INVALID_ARGUMENTS
;
1197 if (arm_algorithm_info
->core_mode
!= ARM_MODE_ANY
)
1199 LOG_DEBUG("setting core_mode: 0x%2.2x",
1200 arm_algorithm_info
->core_mode
);
1201 buf_set_u32(armv4_5
->cpsr
->value
, 0, 5,
1202 arm_algorithm_info
->core_mode
);
1203 armv4_5
->cpsr
->dirty
= 1;
1204 armv4_5
->cpsr
->valid
= 1;
1207 /* terminate using a hardware or (ARMv5+) software breakpoint */
1208 if (exit_point
&& (retval
= breakpoint_add(target
, exit_point
,
1209 exit_breakpoint_size
, BKPT_HARD
)) != ERROR_OK
)
1211 LOG_ERROR("can't add HW breakpoint to terminate algorithm");
1212 return ERROR_TARGET_FAILURE
;
1215 if ((retval
= target_resume(target
, 0, entry_point
, 1, 1)) != ERROR_OK
)
1220 retval
= run_it(target
, exit_point
, timeout_ms
, arch_info
);
1223 breakpoint_remove(target
, exit_point
);
1225 if (retval
!= ERROR_OK
)
1228 for (i
= 0; i
< num_mem_params
; i
++)
1230 if (mem_params
[i
].direction
!= PARAM_OUT
)
1231 if ((retvaltemp
= target_read_buffer(target
, mem_params
[i
].address
, mem_params
[i
].size
, mem_params
[i
].value
)) != ERROR_OK
)
1233 retval
= retvaltemp
;
1237 for (i
= 0; i
< num_reg_params
; i
++)
1239 if (reg_params
[i
].direction
!= PARAM_OUT
)
1242 struct reg
*reg
= register_get_by_name(armv4_5
->core_cache
, reg_params
[i
].reg_name
, 0);
1245 LOG_ERROR("BUG: register '%s' not found", reg_params
[i
].reg_name
);
1246 retval
= ERROR_INVALID_ARGUMENTS
;
1250 if (reg
->size
!= reg_params
[i
].size
)
1252 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params
[i
].reg_name
);
1253 retval
= ERROR_INVALID_ARGUMENTS
;
1257 buf_set_u32(reg_params
[i
].value
, 0, 32, buf_get_u32(reg
->value
, 0, 32));
1261 /* restore everything we saved before (17 or 18 registers) */
1262 for (i
= 0; i
<= 16; i
++)
1265 regvalue
= buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, arm_algorithm_info
->core_mode
, i
).value
, 0, 32);
1266 if (regvalue
!= context
[i
])
1268 LOG_DEBUG("restoring register %s with value 0x%8.8" PRIx32
"", ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, arm_algorithm_info
->core_mode
, i
).name
, context
[i
]);
1269 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, arm_algorithm_info
->core_mode
, i
).value
, 0, 32, context
[i
]);
1270 ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, arm_algorithm_info
->core_mode
, i
).valid
= 1;
1271 ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, arm_algorithm_info
->core_mode
, i
).dirty
= 1;
1275 arm_set_cpsr(armv4_5
, cpsr
);
1276 armv4_5
->cpsr
->dirty
= 1;
1278 armv4_5
->core_state
= core_state
;
1283 int armv4_5_run_algorithm(struct target
*target
, int num_mem_params
, struct mem_param
*mem_params
, int num_reg_params
, struct reg_param
*reg_params
, uint32_t entry_point
, uint32_t exit_point
, int timeout_ms
, void *arch_info
)
1285 return armv4_5_run_algorithm_inner(target
, num_mem_params
, mem_params
, num_reg_params
, reg_params
, entry_point
, exit_point
, timeout_ms
, arch_info
, armv4_5_run_algorithm_completion
);
1289 * Runs ARM code in the target to calculate a CRC32 checksum.
1291 * \todo On ARMv5+, rely on BKPT termination for reduced overhead.
1293 int arm_checksum_memory(struct target
*target
,
1294 uint32_t address
, uint32_t count
, uint32_t *checksum
)
1296 struct working_area
*crc_algorithm
;
1297 struct arm_algorithm armv4_5_info
;
1298 struct reg_param reg_params
[2];
1302 static const uint32_t arm_crc_code
[] = {
1303 0xE1A02000, /* mov r2, r0 */
1304 0xE3E00000, /* mov r0, #0xffffffff */
1305 0xE1A03001, /* mov r3, r1 */
1306 0xE3A04000, /* mov r4, #0 */
1307 0xEA00000B, /* b ncomp */
1309 0xE7D21004, /* ldrb r1, [r2, r4] */
1310 0xE59F7030, /* ldr r7, CRC32XOR */
1311 0xE0200C01, /* eor r0, r0, r1, asl 24 */
1312 0xE3A05000, /* mov r5, #0 */
1314 0xE3500000, /* cmp r0, #0 */
1315 0xE1A06080, /* mov r6, r0, asl #1 */
1316 0xE2855001, /* add r5, r5, #1 */
1317 0xE1A00006, /* mov r0, r6 */
1318 0xB0260007, /* eorlt r0, r6, r7 */
1319 0xE3550008, /* cmp r5, #8 */
1320 0x1AFFFFF8, /* bne loop */
1321 0xE2844001, /* add r4, r4, #1 */
1323 0xE1540003, /* cmp r4, r3 */
1324 0x1AFFFFF1, /* bne nbyte */
1326 0xEAFFFFFE, /* b end */
1328 0x04C11DB7 /* .word 0x04C11DB7 */
1331 retval
= target_alloc_working_area(target
,
1332 sizeof(arm_crc_code
), &crc_algorithm
);
1333 if (retval
!= ERROR_OK
)
1336 /* convert code into a buffer in target endianness */
1337 for (i
= 0; i
< ARRAY_SIZE(arm_crc_code
); i
++) {
1338 retval
= target_write_u32(target
,
1339 crc_algorithm
->address
+ i
* sizeof(uint32_t),
1341 if (retval
!= ERROR_OK
)
1345 armv4_5_info
.common_magic
= ARM_COMMON_MAGIC
;
1346 armv4_5_info
.core_mode
= ARM_MODE_SVC
;
1347 armv4_5_info
.core_state
= ARM_STATE_ARM
;
1349 init_reg_param(®_params
[0], "r0", 32, PARAM_IN_OUT
);
1350 init_reg_param(®_params
[1], "r1", 32, PARAM_OUT
);
1352 buf_set_u32(reg_params
[0].value
, 0, 32, address
);
1353 buf_set_u32(reg_params
[1].value
, 0, 32, count
);
1355 /* 20 second timeout/megabyte */
1356 int timeout
= 20000 * (1 + (count
/ (1024 * 1024)));
1358 retval
= target_run_algorithm(target
, 0, NULL
, 2, reg_params
,
1359 crc_algorithm
->address
,
1360 crc_algorithm
->address
+ sizeof(arm_crc_code
) - 8,
1361 timeout
, &armv4_5_info
);
1362 if (retval
!= ERROR_OK
) {
1363 LOG_ERROR("error executing ARM crc algorithm");
1364 destroy_reg_param(®_params
[0]);
1365 destroy_reg_param(®_params
[1]);
1366 target_free_working_area(target
, crc_algorithm
);
1370 *checksum
= buf_get_u32(reg_params
[0].value
, 0, 32);
1372 destroy_reg_param(®_params
[0]);
1373 destroy_reg_param(®_params
[1]);
1375 target_free_working_area(target
, crc_algorithm
);
1381 * Runs ARM code in the target to check whether a memory block holds
1382 * all ones. NOR flash which has been erased, and thus may be written,
1385 * \todo On ARMv5+, rely on BKPT termination for reduced overhead.
1387 int arm_blank_check_memory(struct target
*target
,
1388 uint32_t address
, uint32_t count
, uint32_t *blank
)
1390 struct working_area
*check_algorithm
;
1391 struct reg_param reg_params
[3];
1392 struct arm_algorithm armv4_5_info
;
1396 static const uint32_t check_code
[] = {
1398 0xe4d03001, /* ldrb r3, [r0], #1 */
1399 0xe0022003, /* and r2, r2, r3 */
1400 0xe2511001, /* subs r1, r1, #1 */
1401 0x1afffffb, /* bne loop */
1403 0xeafffffe /* b end */
1406 /* make sure we have a working area */
1407 retval
= target_alloc_working_area(target
,
1408 sizeof(check_code
), &check_algorithm
);
1409 if (retval
!= ERROR_OK
)
1412 /* convert code into a buffer in target endianness */
1413 for (i
= 0; i
< ARRAY_SIZE(check_code
); i
++) {
1414 retval
= target_write_u32(target
,
1415 check_algorithm
->address
1416 + i
* sizeof(uint32_t),
1418 if (retval
!= ERROR_OK
)
1422 armv4_5_info
.common_magic
= ARM_COMMON_MAGIC
;
1423 armv4_5_info
.core_mode
= ARM_MODE_SVC
;
1424 armv4_5_info
.core_state
= ARM_STATE_ARM
;
1426 init_reg_param(®_params
[0], "r0", 32, PARAM_OUT
);
1427 buf_set_u32(reg_params
[0].value
, 0, 32, address
);
1429 init_reg_param(®_params
[1], "r1", 32, PARAM_OUT
);
1430 buf_set_u32(reg_params
[1].value
, 0, 32, count
);
1432 init_reg_param(®_params
[2], "r2", 32, PARAM_IN_OUT
);
1433 buf_set_u32(reg_params
[2].value
, 0, 32, 0xff);
1435 retval
= target_run_algorithm(target
, 0, NULL
, 3, reg_params
,
1436 check_algorithm
->address
,
1437 check_algorithm
->address
+ sizeof(check_code
) - 4,
1438 10000, &armv4_5_info
);
1439 if (retval
!= ERROR_OK
) {
1440 destroy_reg_param(®_params
[0]);
1441 destroy_reg_param(®_params
[1]);
1442 destroy_reg_param(®_params
[2]);
1443 target_free_working_area(target
, check_algorithm
);
1447 *blank
= buf_get_u32(reg_params
[2].value
, 0, 32);
1449 destroy_reg_param(®_params
[0]);
1450 destroy_reg_param(®_params
[1]);
1451 destroy_reg_param(®_params
[2]);
1453 target_free_working_area(target
, check_algorithm
);
1458 static int arm_full_context(struct target
*target
)
1460 struct arm
*armv4_5
= target_to_arm(target
);
1461 unsigned num_regs
= armv4_5
->core_cache
->num_regs
;
1462 struct reg
*reg
= armv4_5
->core_cache
->reg_list
;
1463 int retval
= ERROR_OK
;
1465 for (; num_regs
&& retval
== ERROR_OK
; num_regs
--, reg
++) {
1468 retval
= armv4_5_get_core_reg(reg
);
1473 static int arm_default_mrc(struct target
*target
, int cpnum
,
1474 uint32_t op1
, uint32_t op2
,
1475 uint32_t CRn
, uint32_t CRm
,
1478 LOG_ERROR("%s doesn't implement MRC", target_type_name(target
));
1482 static int arm_default_mcr(struct target
*target
, int cpnum
,
1483 uint32_t op1
, uint32_t op2
,
1484 uint32_t CRn
, uint32_t CRm
,
1487 LOG_ERROR("%s doesn't implement MCR", target_type_name(target
));
1491 int arm_init_arch_info(struct target
*target
, struct arm
*armv4_5
)
1493 target
->arch_info
= armv4_5
;
1494 armv4_5
->target
= target
;
1496 armv4_5
->common_magic
= ARM_COMMON_MAGIC
;
1498 /* core_type may be overridden by subtype logic */
1499 if (armv4_5
->core_type
!= ARM_MODE_THREAD
) {
1500 armv4_5
->core_type
= ARM_MODE_ANY
;
1501 arm_set_cpsr(armv4_5
, ARM_MODE_USR
);
1504 /* default full_context() has no core-specific optimizations */
1505 if (!armv4_5
->full_context
&& armv4_5
->read_core_reg
)
1506 armv4_5
->full_context
= arm_full_context
;
1509 armv4_5
->mrc
= arm_default_mrc
;
1511 armv4_5
->mcr
= arm_default_mcr
;