1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
5 * Copyright (C) 2008 by Spencer Oliver *
6 * spen@spen-soft.co.uk *
8 * Copyright (C) 2008 by Oyvind Harboe *
9 * oyvind.harboe@zylin.com *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
32 #include "breakpoints.h"
33 #include "arm_disassembler.h"
34 #include <helper/binarybuffer.h>
35 #include "algorithm.h"
39 /* offsets into armv4_5 core register cache */
42 ARMV4_5_SPSR_FIQ
= 32,
43 ARMV4_5_SPSR_IRQ
= 33,
44 ARMV4_5_SPSR_SVC
= 34,
45 ARMV4_5_SPSR_ABT
= 35,
46 ARMV4_5_SPSR_UND
= 36,
50 static const uint8_t arm_usr_indices
[17] = {
51 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, ARMV4_5_CPSR
,
54 static const uint8_t arm_fiq_indices
[8] = {
55 16, 17, 18, 19, 20, 21, 22, ARMV4_5_SPSR_FIQ
,
58 static const uint8_t arm_irq_indices
[3] = {
59 23, 24, ARMV4_5_SPSR_IRQ
,
62 static const uint8_t arm_svc_indices
[3] = {
63 25, 26, ARMV4_5_SPSR_SVC
,
66 static const uint8_t arm_abt_indices
[3] = {
67 27, 28, ARMV4_5_SPSR_ABT
,
70 static const uint8_t arm_und_indices
[3] = {
71 29, 30, ARMV4_5_SPSR_UND
,
74 static const uint8_t arm_mon_indices
[3] = {
81 /* For user and system modes, these list indices for all registers.
82 * otherwise they're just indices for the shadow registers and SPSR.
84 unsigned short n_indices
;
85 const uint8_t *indices
;
87 /* Seven modes are standard from ARM7 on. "System" and "User" share
88 * the same registers; other modes shadow from 3 to 8 registers.
93 .n_indices
= ARRAY_SIZE(arm_usr_indices
),
94 .indices
= arm_usr_indices
,
99 .n_indices
= ARRAY_SIZE(arm_fiq_indices
),
100 .indices
= arm_fiq_indices
,
103 .name
= "Supervisor",
105 .n_indices
= ARRAY_SIZE(arm_svc_indices
),
106 .indices
= arm_svc_indices
,
111 .n_indices
= ARRAY_SIZE(arm_abt_indices
),
112 .indices
= arm_abt_indices
,
117 .n_indices
= ARRAY_SIZE(arm_irq_indices
),
118 .indices
= arm_irq_indices
,
121 .name
= "Undefined instruction",
123 .n_indices
= ARRAY_SIZE(arm_und_indices
),
124 .indices
= arm_und_indices
,
129 .n_indices
= ARRAY_SIZE(arm_usr_indices
),
130 .indices
= arm_usr_indices
,
132 /* TrustZone "Security Extensions" add a secure monitor mode.
133 * This is distinct from a "debug monitor" which can support
134 * non-halting debug, in conjunction with some debuggers.
137 .name
= "Secure Monitor",
139 .n_indices
= ARRAY_SIZE(arm_mon_indices
),
140 .indices
= arm_mon_indices
,
144 /** Map PSR mode bits to the name of an ARM processor operating mode. */
145 const char *arm_mode_name(unsigned psr_mode
)
147 for (unsigned i
= 0; i
< ARRAY_SIZE(arm_mode_data
); i
++) {
148 if (arm_mode_data
[i
].psr
== psr_mode
)
149 return arm_mode_data
[i
].name
;
151 LOG_ERROR("unrecognized psr mode: %#02x", psr_mode
);
152 return "UNRECOGNIZED";
155 /** Return true iff the parameter denotes a valid ARM processor mode. */
156 bool is_arm_mode(unsigned psr_mode
)
158 for (unsigned i
= 0; i
< ARRAY_SIZE(arm_mode_data
); i
++) {
159 if (arm_mode_data
[i
].psr
== psr_mode
)
165 /** Map PSR mode bits to linear number indexing armv4_5_core_reg_map */
166 int arm_mode_to_number(enum arm_mode mode
)
170 /* map MODE_ANY to user mode */
188 LOG_ERROR("invalid mode value encountered %d", mode
);
193 /** Map linear number indexing armv4_5_core_reg_map to PSR mode bits. */
194 enum arm_mode
armv4_5_number_to_mode(int number
)
214 LOG_ERROR("mode index out of bounds %d", number
);
219 const char *arm_state_strings
[] =
221 "ARM", "Thumb", "Jazelle", "ThumbEE",
224 /* Templates for ARM core registers.
226 * NOTE: offsets in this table are coupled to the arm_mode_data
227 * table above, the armv4_5_core_reg_map array below, and also to
228 * the ARMV4_5_CPSR symbol (which should vanish after ARM11 updates).
230 static const struct {
231 /* The name is used for e.g. the "regs" command. */
234 /* The {cookie, mode} tuple uniquely identifies one register.
235 * In a given mode, cookies 0..15 map to registers R0..R15,
236 * with R13..R15 usually called SP, LR, PC.
238 * MODE_ANY is used as *input* to the mapping, and indicates
239 * various special cases (sigh) and errors.
241 * Cookie 16 is (currently) confusing, since it indicates
242 * CPSR -or- SPSR depending on whether 'mode' is MODE_ANY.
243 * (Exception modes have both CPSR and SPSR registers ...)
247 } arm_core_regs
[] = {
248 /* IMPORTANT: we guarantee that the first eight cached registers
249 * correspond to r0..r7, and the fifteenth to PC, so that callers
250 * don't need to map them.
252 { .name
= "r0", .cookie
= 0, .mode
= ARM_MODE_ANY
, },
253 { .name
= "r1", .cookie
= 1, .mode
= ARM_MODE_ANY
, },
254 { .name
= "r2", .cookie
= 2, .mode
= ARM_MODE_ANY
, },
255 { .name
= "r3", .cookie
= 3, .mode
= ARM_MODE_ANY
, },
256 { .name
= "r4", .cookie
= 4, .mode
= ARM_MODE_ANY
, },
257 { .name
= "r5", .cookie
= 5, .mode
= ARM_MODE_ANY
, },
258 { .name
= "r6", .cookie
= 6, .mode
= ARM_MODE_ANY
, },
259 { .name
= "r7", .cookie
= 7, .mode
= ARM_MODE_ANY
, },
261 /* NOTE: regs 8..12 might be shadowed by FIQ ... flagging
262 * them as MODE_ANY creates special cases. (ANY means
263 * "not mapped" elsewhere; here it's "everything but FIQ".)
265 { .name
= "r8", .cookie
= 8, .mode
= ARM_MODE_ANY
, },
266 { .name
= "r9", .cookie
= 9, .mode
= ARM_MODE_ANY
, },
267 { .name
= "r10", .cookie
= 10, .mode
= ARM_MODE_ANY
, },
268 { .name
= "r11", .cookie
= 11, .mode
= ARM_MODE_ANY
, },
269 { .name
= "r12", .cookie
= 12, .mode
= ARM_MODE_ANY
, },
271 /* NOTE all MODE_USR registers are equivalent to MODE_SYS ones */
272 { .name
= "sp_usr", .cookie
= 13, .mode
= ARM_MODE_USR
, },
273 { .name
= "lr_usr", .cookie
= 14, .mode
= ARM_MODE_USR
, },
275 /* guaranteed to be at index 15 */
276 { .name
= "pc", .cookie
= 15, .mode
= ARM_MODE_ANY
, },
278 { .name
= "r8_fiq", .cookie
= 8, .mode
= ARM_MODE_FIQ
, },
279 { .name
= "r9_fiq", .cookie
= 9, .mode
= ARM_MODE_FIQ
, },
280 { .name
= "r10_fiq", .cookie
= 10, .mode
= ARM_MODE_FIQ
, },
281 { .name
= "r11_fiq", .cookie
= 11, .mode
= ARM_MODE_FIQ
, },
282 { .name
= "r12_fiq", .cookie
= 12, .mode
= ARM_MODE_FIQ
, },
284 { .name
= "sp_fiq", .cookie
= 13, .mode
= ARM_MODE_FIQ
, },
285 { .name
= "lr_fiq", .cookie
= 14, .mode
= ARM_MODE_FIQ
, },
287 { .name
= "sp_irq", .cookie
= 13, .mode
= ARM_MODE_IRQ
, },
288 { .name
= "lr_irq", .cookie
= 14, .mode
= ARM_MODE_IRQ
, },
290 { .name
= "sp_svc", .cookie
= 13, .mode
= ARM_MODE_SVC
, },
291 { .name
= "lr_svc", .cookie
= 14, .mode
= ARM_MODE_SVC
, },
293 { .name
= "sp_abt", .cookie
= 13, .mode
= ARM_MODE_ABT
, },
294 { .name
= "lr_abt", .cookie
= 14, .mode
= ARM_MODE_ABT
, },
296 { .name
= "sp_und", .cookie
= 13, .mode
= ARM_MODE_UND
, },
297 { .name
= "lr_und", .cookie
= 14, .mode
= ARM_MODE_UND
, },
299 { .name
= "cpsr", .cookie
= 16, .mode
= ARM_MODE_ANY
, },
300 { .name
= "spsr_fiq", .cookie
= 16, .mode
= ARM_MODE_FIQ
, },
301 { .name
= "spsr_irq", .cookie
= 16, .mode
= ARM_MODE_IRQ
, },
302 { .name
= "spsr_svc", .cookie
= 16, .mode
= ARM_MODE_SVC
, },
303 { .name
= "spsr_abt", .cookie
= 16, .mode
= ARM_MODE_ABT
, },
304 { .name
= "spsr_und", .cookie
= 16, .mode
= ARM_MODE_UND
, },
306 { .name
= "sp_mon", .cookie
= 13, .mode
= ARM_MODE_MON
, },
307 { .name
= "lr_mon", .cookie
= 14, .mode
= ARM_MODE_MON
, },
308 { .name
= "spsr_mon", .cookie
= 16, .mode
= ARM_MODE_MON
, },
311 /* map core mode (USR, FIQ, ...) and register number to
312 * indices into the register cache
314 const int armv4_5_core_reg_map
[8][17] =
317 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
319 { /* FIQ (8 shadows of USR, vs normal 3) */
320 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 15, 32
323 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 23, 24, 15, 33
326 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 25, 26, 15, 34
329 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 27, 28, 15, 35
332 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 29, 30, 15, 36
334 { /* SYS (same registers as USR) */
335 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
338 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 37, 38, 15, 39,
343 * Configures host-side ARM records to reflect the specified CPSR.
344 * Later, code can use arm_reg_current() to map register numbers
345 * according to how they are exposed by this mode.
347 void arm_set_cpsr(struct arm
*arm
, uint32_t cpsr
)
349 enum arm_mode mode
= cpsr
& 0x1f;
352 /* NOTE: this may be called very early, before the register
353 * cache is set up. We can't defend against many errors, in
354 * particular against CPSRs that aren't valid *here* ...
357 buf_set_u32(arm
->cpsr
->value
, 0, 32, cpsr
);
358 arm
->cpsr
->valid
= 1;
359 arm
->cpsr
->dirty
= 0;
362 arm
->core_mode
= mode
;
364 /* mode_to_number() warned; set up a somewhat-sane mapping */
365 num
= arm_mode_to_number(mode
);
371 arm
->map
= &armv4_5_core_reg_map
[num
][0];
372 arm
->spsr
= (mode
== ARM_MODE_USR
|| mode
== ARM_MODE_SYS
)
374 : arm
->core_cache
->reg_list
+ arm
->map
[16];
376 /* Older ARMs won't have the J bit */
377 enum arm_state state
;
379 if (cpsr
& (1 << 5)) { /* T */
380 if (cpsr
& (1 << 24)) { /* J */
381 LOG_WARNING("ThumbEE -- incomplete support");
382 state
= ARM_STATE_THUMB_EE
;
384 state
= ARM_STATE_THUMB
;
386 if (cpsr
& (1 << 24)) { /* J */
387 LOG_ERROR("Jazelle state handling is BROKEN!");
388 state
= ARM_STATE_JAZELLE
;
390 state
= ARM_STATE_ARM
;
392 arm
->core_state
= state
;
394 LOG_DEBUG("set CPSR %#8.8x: %s mode, %s state", (unsigned) cpsr
,
396 arm_state_strings
[arm
->core_state
]);
400 * Returns handle to the register currently mapped to a given number.
401 * Someone must have called arm_set_cpsr() before.
403 * \param arm This core's state and registers are used.
404 * \param regnum From 0..15 corresponding to R0..R14 and PC.
405 * Note that R0..R7 don't require mapping; you may access those
406 * as the first eight entries in the register cache. Likewise
407 * R15 (PC) doesn't need mapping; you may also access it directly.
408 * However, R8..R14, and SPSR (arm->spsr) *must* be mapped.
409 * CPSR (arm->cpsr) is also not mapped.
411 struct reg
*arm_reg_current(struct arm
*arm
, unsigned regnum
)
418 r
= arm
->core_cache
->reg_list
+ arm
->map
[regnum
];
420 /* e.g. invalid CPSR said "secure monitor" mode on a core
421 * that doesn't support it...
424 LOG_ERROR("Invalid CPSR mode");
425 r
= arm
->core_cache
->reg_list
+ regnum
;
431 static const uint8_t arm_gdb_dummy_fp_value
[12];
434 * Dummy FPA registers are required to support GDB on ARM.
435 * Register packets require eight obsolete FPA register values.
436 * Modern ARM cores use Vector Floating Point (VFP), if they
437 * have any floating point support. VFP is not FPA-compatible.
439 struct reg arm_gdb_dummy_fp_reg
=
441 .name
= "GDB dummy FPA register",
442 .value
= (uint8_t *) arm_gdb_dummy_fp_value
,
447 static const uint8_t arm_gdb_dummy_fps_value
[4];
450 * Dummy FPA status registers are required to support GDB on ARM.
451 * Register packets require an obsolete FPA status register.
453 struct reg arm_gdb_dummy_fps_reg
=
455 .name
= "GDB dummy FPA status register",
456 .value
= (uint8_t *) arm_gdb_dummy_fps_value
,
461 static void arm_gdb_dummy_init(void) __attribute__ ((constructor
));
463 static void arm_gdb_dummy_init(void)
465 register_init_dummy(&arm_gdb_dummy_fp_reg
);
466 register_init_dummy(&arm_gdb_dummy_fps_reg
);
469 static int armv4_5_get_core_reg(struct reg
*reg
)
472 struct arm_reg
*armv4_5
= reg
->arch_info
;
473 struct target
*target
= armv4_5
->target
;
475 if (target
->state
!= TARGET_HALTED
)
477 LOG_ERROR("Target not halted");
478 return ERROR_TARGET_NOT_HALTED
;
481 retval
= armv4_5
->armv4_5_common
->read_core_reg(target
, reg
, armv4_5
->num
, armv4_5
->mode
);
482 if (retval
== ERROR_OK
) {
490 static int armv4_5_set_core_reg(struct reg
*reg
, uint8_t *buf
)
492 struct arm_reg
*armv4_5
= reg
->arch_info
;
493 struct target
*target
= armv4_5
->target
;
494 struct arm
*armv4_5_target
= target_to_armv4_5(target
);
495 uint32_t value
= buf_get_u32(buf
, 0, 32);
497 if (target
->state
!= TARGET_HALTED
)
499 LOG_ERROR("Target not halted");
500 return ERROR_TARGET_NOT_HALTED
;
503 /* Except for CPSR, the "reg" command exposes a writeback model
504 * for the register cache.
506 if (reg
== armv4_5_target
->cpsr
) {
507 arm_set_cpsr(armv4_5_target
, value
);
509 /* Older cores need help to be in ARM mode during halt
510 * mode debug, so we clear the J and T bits if we flush.
511 * For newer cores (v6/v7a/v7r) we don't need that, but
512 * it won't hurt since CPSR is always flushed anyway.
514 if (armv4_5_target
->core_mode
!=
515 (enum arm_mode
)(value
& 0x1f)) {
516 LOG_DEBUG("changing ARM core mode to '%s'",
517 arm_mode_name(value
& 0x1f));
518 value
&= ~((1 << 24) | (1 << 5));
519 armv4_5_target
->write_core_reg(target
, reg
,
520 16, ARM_MODE_ANY
, value
);
523 buf_set_u32(reg
->value
, 0, 32, value
);
531 static const struct reg_arch_type arm_reg_type
= {
532 .get
= armv4_5_get_core_reg
,
533 .set
= armv4_5_set_core_reg
,
536 struct reg_cache
* armv4_5_build_reg_cache(struct target
*target
, struct arm
*armv4_5_common
)
538 int num_regs
= ARRAY_SIZE(arm_core_regs
);
539 struct reg_cache
*cache
= malloc(sizeof(struct reg_cache
));
540 struct reg
*reg_list
= calloc(num_regs
, sizeof(struct reg
));
541 struct arm_reg
*arch_info
= calloc(num_regs
, sizeof(struct arm_reg
));
544 if (!cache
|| !reg_list
|| !arch_info
) {
551 cache
->name
= "ARM registers";
553 cache
->reg_list
= reg_list
;
556 for (i
= 0; i
< num_regs
; i
++)
558 /* Skip registers this core doesn't expose */
559 if (arm_core_regs
[i
].mode
== ARM_MODE_MON
560 && armv4_5_common
->core_type
!= ARM_MODE_MON
)
563 /* REVISIT handle Cortex-M, which only shadows R13/SP */
565 arch_info
[i
].num
= arm_core_regs
[i
].cookie
;
566 arch_info
[i
].mode
= arm_core_regs
[i
].mode
;
567 arch_info
[i
].target
= target
;
568 arch_info
[i
].armv4_5_common
= armv4_5_common
;
570 reg_list
[i
].name
= (char *) arm_core_regs
[i
].name
;
571 reg_list
[i
].size
= 32;
572 reg_list
[i
].value
= &arch_info
[i
].value
;
573 reg_list
[i
].type
= &arm_reg_type
;
574 reg_list
[i
].arch_info
= &arch_info
[i
];
579 armv4_5_common
->cpsr
= reg_list
+ ARMV4_5_CPSR
;
580 armv4_5_common
->core_cache
= cache
;
584 int armv4_5_arch_state(struct target
*target
)
586 struct arm
*armv4_5
= target_to_armv4_5(target
);
588 if (armv4_5
->common_magic
!= ARMV4_5_COMMON_MAGIC
)
590 LOG_ERROR("BUG: called for a non-ARM target");
594 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
595 "cpsr: 0x%8.8" PRIx32
" pc: 0x%8.8" PRIx32
"%s",
596 arm_state_strings
[armv4_5
->core_state
],
597 Jim_Nvp_value2name_simple(nvp_target_debug_reason
,
598 target
->debug_reason
)->name
,
599 arm_mode_name(armv4_5
->core_mode
),
600 buf_get_u32(armv4_5
->cpsr
->value
, 0, 32),
601 buf_get_u32(armv4_5
->core_cache
->reg_list
[15].value
,
603 armv4_5
->is_semihosting
? ", semihosting" : "");
608 #define ARMV4_5_CORE_REG_MODENUM(cache, mode, num) \
609 cache->reg_list[armv4_5_core_reg_map[mode][num]]
611 COMMAND_HANDLER(handle_armv4_5_reg_command
)
613 struct target
*target
= get_current_target(CMD_CTX
);
614 struct arm
*armv4_5
= target_to_armv4_5(target
);
618 if (!is_arm(armv4_5
))
620 command_print(CMD_CTX
, "current target isn't an ARM");
624 if (target
->state
!= TARGET_HALTED
)
626 command_print(CMD_CTX
, "error: target must be halted for register accesses");
630 if (!is_arm_mode(armv4_5
->core_mode
))
633 if (!armv4_5
->full_context
) {
634 command_print(CMD_CTX
, "error: target doesn't support %s",
639 num_regs
= armv4_5
->core_cache
->num_regs
;
640 regs
= armv4_5
->core_cache
->reg_list
;
642 for (unsigned mode
= 0; mode
< ARRAY_SIZE(arm_mode_data
); mode
++) {
647 /* label this bank of registers (or shadows) */
648 switch (arm_mode_data
[mode
].psr
) {
652 name
= "System and User";
656 if (armv4_5
->core_type
!= ARM_MODE_MON
)
660 name
= arm_mode_data
[mode
].name
;
664 command_print(CMD_CTX
, "%s%s mode %sregisters",
667 /* display N rows of up to 4 registers each */
668 for (unsigned i
= 0; i
< arm_mode_data
[mode
].n_indices
;) {
672 for (unsigned j
= 0; j
< 4; j
++, i
++) {
674 struct reg
*reg
= regs
;
676 if (i
>= arm_mode_data
[mode
].n_indices
)
679 reg
+= arm_mode_data
[mode
].indices
[i
];
681 /* REVISIT be smarter about faults... */
683 armv4_5
->full_context(target
);
685 value
= buf_get_u32(reg
->value
, 0, 32);
686 output_len
+= snprintf(output
+ output_len
,
687 sizeof(output
) - output_len
,
688 "%8s: %8.8" PRIx32
" ",
691 command_print(CMD_CTX
, "%s", output
);
698 COMMAND_HANDLER(handle_armv4_5_core_state_command
)
700 struct target
*target
= get_current_target(CMD_CTX
);
701 struct arm
*armv4_5
= target_to_armv4_5(target
);
703 if (!is_arm(armv4_5
))
705 command_print(CMD_CTX
, "current target isn't an ARM");
711 if (strcmp(CMD_ARGV
[0], "arm") == 0)
713 armv4_5
->core_state
= ARM_STATE_ARM
;
715 if (strcmp(CMD_ARGV
[0], "thumb") == 0)
717 armv4_5
->core_state
= ARM_STATE_THUMB
;
721 command_print(CMD_CTX
, "core state: %s", arm_state_strings
[armv4_5
->core_state
]);
726 COMMAND_HANDLER(handle_armv4_5_disassemble_command
)
728 int retval
= ERROR_OK
;
729 struct target
*target
= get_current_target(CMD_CTX
);
730 struct arm
*arm
= target
? target_to_arm(target
) : NULL
;
736 command_print(CMD_CTX
, "current target isn't an ARM");
742 if (strcmp(CMD_ARGV
[2], "thumb") != 0)
747 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[1], count
);
750 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[0], address
);
751 if (address
& 0x01) {
753 command_print(CMD_CTX
, "Disassemble as Thumb");
761 command_print(CMD_CTX
,
762 "usage: arm disassemble <address> [<count> ['thumb']]");
767 while (count
-- > 0) {
768 struct arm_instruction cur_instruction
;
771 /* Always use Thumb2 disassembly for best handling
772 * of 32-bit BL/BLX, and to work with newer cores
773 * (some ARMv6, all ARMv7) that use Thumb2.
775 retval
= thumb2_opcode(target
, address
,
777 if (retval
!= ERROR_OK
)
782 retval
= target_read_u32(target
, address
, &opcode
);
783 if (retval
!= ERROR_OK
)
785 retval
= arm_evaluate_opcode(opcode
, address
,
786 &cur_instruction
) != ERROR_OK
;
787 if (retval
!= ERROR_OK
)
790 command_print(CMD_CTX
, "%s", cur_instruction
.text
);
791 address
+= cur_instruction
.instruction_size
;
797 static int jim_mcrmrc(Jim_Interp
*interp
, int argc
, Jim_Obj
*const *argv
)
799 struct command_context
*context
;
800 struct target
*target
;
804 context
= Jim_GetAssocData(interp
, "context");
805 if (context
== NULL
) {
806 LOG_ERROR("%s: no command context", __func__
);
809 target
= get_current_target(context
);
810 if (target
== NULL
) {
811 LOG_ERROR("%s: no current target", __func__
);
814 if (!target_was_examined(target
)) {
815 LOG_ERROR("%s: not yet examined", target_name(target
));
818 arm
= target_to_arm(target
);
820 LOG_ERROR("%s: not an ARM", target_name(target
));
824 if ((argc
< 6) || (argc
> 7)) {
825 /* FIXME use the command name to verify # params... */
826 LOG_ERROR("%s: wrong number of arguments", __func__
);
838 /* NOTE: parameter sequence matches ARM instruction set usage:
839 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
840 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
841 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
843 retval
= Jim_GetLong(interp
, argv
[1], &l
);
844 if (retval
!= JIM_OK
)
847 LOG_ERROR("%s: %s %d out of range", __func__
,
848 "coprocessor", (int) l
);
853 retval
= Jim_GetLong(interp
, argv
[2], &l
);
854 if (retval
!= JIM_OK
)
857 LOG_ERROR("%s: %s %d out of range", __func__
,
863 retval
= Jim_GetLong(interp
, argv
[3], &l
);
864 if (retval
!= JIM_OK
)
867 LOG_ERROR("%s: %s %d out of range", __func__
,
873 retval
= Jim_GetLong(interp
, argv
[4], &l
);
874 if (retval
!= JIM_OK
)
877 LOG_ERROR("%s: %s %d out of range", __func__
,
883 retval
= Jim_GetLong(interp
, argv
[5], &l
);
884 if (retval
!= JIM_OK
)
887 LOG_ERROR("%s: %s %d out of range", __func__
,
895 /* FIXME don't assume "mrc" vs "mcr" from the number of params;
896 * that could easily be a typo! Check both...
898 * FIXME change the call syntax here ... simplest to just pass
899 * the MRC() or MCR() instruction to be executed. That will also
900 * let us support the "mrc2" and "mcr2" opcodes (toggling one bit)
901 * if that's ever needed.
904 retval
= Jim_GetLong(interp
, argv
[6], &l
);
905 if (retval
!= JIM_OK
) {
910 /* NOTE: parameters reordered! */
911 // ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2)
912 retval
= arm
->mcr(target
, cpnum
, op1
, op2
, CRn
, CRm
, value
);
913 if (retval
!= ERROR_OK
)
916 /* NOTE: parameters reordered! */
917 // ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2)
918 retval
= arm
->mrc(target
, cpnum
, op1
, op2
, CRn
, CRm
, &value
);
919 if (retval
!= ERROR_OK
)
922 Jim_SetResult(interp
, Jim_NewIntObj(interp
, value
));
928 static const struct command_registration arm_exec_command_handlers
[] = {
931 .handler
= &handle_armv4_5_reg_command
,
932 .mode
= COMMAND_EXEC
,
933 .help
= "display ARM core registers",
936 .name
= "core_state",
937 .handler
= &handle_armv4_5_core_state_command
,
938 .mode
= COMMAND_EXEC
,
939 .usage
= "<arm | thumb>",
940 .help
= "display/change ARM core state",
943 .name
= "disassemble",
944 .handler
= &handle_armv4_5_disassemble_command
,
945 .mode
= COMMAND_EXEC
,
946 .usage
= "<address> [<count> ['thumb']]",
947 .help
= "disassemble instructions ",
951 .mode
= COMMAND_EXEC
,
952 .jim_handler
= &jim_mcrmrc
,
953 .help
= "write coprocessor register",
954 .usage
= "cpnum op1 CRn op2 CRm value",
958 .jim_handler
= &jim_mcrmrc
,
959 .help
= "read coprocessor register",
960 .usage
= "cpnum op1 CRn op2 CRm",
963 COMMAND_REGISTRATION_DONE
965 const struct command_registration arm_command_handlers
[] = {
969 .help
= "ARM command group",
970 .chain
= arm_exec_command_handlers
,
972 COMMAND_REGISTRATION_DONE
975 int armv4_5_get_gdb_reg_list(struct target
*target
, struct reg
**reg_list
[], int *reg_list_size
)
977 struct arm
*armv4_5
= target_to_armv4_5(target
);
980 if (!is_arm_mode(armv4_5
->core_mode
))
984 *reg_list
= malloc(sizeof(struct reg
*) * (*reg_list_size
));
986 for (i
= 0; i
< 16; i
++)
987 (*reg_list
)[i
] = arm_reg_current(armv4_5
, i
);
989 for (i
= 16; i
< 24; i
++)
990 (*reg_list
)[i
] = &arm_gdb_dummy_fp_reg
;
992 (*reg_list
)[24] = &arm_gdb_dummy_fps_reg
;
993 (*reg_list
)[25] = armv4_5
->cpsr
;
998 /* wait for execution to complete and check exit point */
999 static int armv4_5_run_algorithm_completion(struct target
*target
, uint32_t exit_point
, int timeout_ms
, void *arch_info
)
1002 struct arm
*armv4_5
= target_to_armv4_5(target
);
1004 if ((retval
= target_wait_state(target
, TARGET_HALTED
, timeout_ms
)) != ERROR_OK
)
1008 if (target
->state
!= TARGET_HALTED
)
1010 if ((retval
= target_halt(target
)) != ERROR_OK
)
1012 if ((retval
= target_wait_state(target
, TARGET_HALTED
, 500)) != ERROR_OK
)
1016 return ERROR_TARGET_TIMEOUT
;
1019 /* fast exit: ARMv5+ code can use BKPT */
1020 if (exit_point
&& buf_get_u32(armv4_5
->core_cache
->reg_list
[15].value
,
1021 0, 32) != exit_point
)
1023 LOG_WARNING("target reentered debug state, but not at the desired exit point: 0x%4.4" PRIx32
"",
1024 buf_get_u32(armv4_5
->core_cache
->reg_list
[15].value
, 0, 32));
1025 return ERROR_TARGET_TIMEOUT
;
1031 int armv4_5_run_algorithm_inner(struct target
*target
,
1032 int num_mem_params
, struct mem_param
*mem_params
,
1033 int num_reg_params
, struct reg_param
*reg_params
,
1034 uint32_t entry_point
, uint32_t exit_point
,
1035 int timeout_ms
, void *arch_info
,
1036 int (*run_it
)(struct target
*target
, uint32_t exit_point
,
1037 int timeout_ms
, void *arch_info
))
1039 struct arm
*armv4_5
= target_to_armv4_5(target
);
1040 struct armv4_5_algorithm
*armv4_5_algorithm_info
= arch_info
;
1041 enum arm_state core_state
= armv4_5
->core_state
;
1042 uint32_t context
[17];
1044 int exit_breakpoint_size
= 0;
1046 int retval
= ERROR_OK
;
1048 LOG_DEBUG("Running algorithm");
1050 if (armv4_5_algorithm_info
->common_magic
!= ARMV4_5_COMMON_MAGIC
)
1052 LOG_ERROR("current target isn't an ARMV4/5 target");
1053 return ERROR_TARGET_INVALID
;
1056 if (target
->state
!= TARGET_HALTED
)
1058 LOG_WARNING("target not halted");
1059 return ERROR_TARGET_NOT_HALTED
;
1062 if (!is_arm_mode(armv4_5
->core_mode
))
1065 /* armv5 and later can terminate with BKPT instruction; less overhead */
1066 if (!exit_point
&& armv4_5
->is_armv4
)
1068 LOG_ERROR("ARMv4 target needs HW breakpoint location");
1072 /* save r0..pc, cpsr-or-spsr, and then cpsr-for-sure;
1073 * they'll be restored later.
1075 for (i
= 0; i
<= 16; i
++)
1079 r
= &ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
,
1080 armv4_5_algorithm_info
->core_mode
, i
);
1082 armv4_5
->read_core_reg(target
, r
, i
,
1083 armv4_5_algorithm_info
->core_mode
);
1084 context
[i
] = buf_get_u32(r
->value
, 0, 32);
1086 cpsr
= buf_get_u32(armv4_5
->cpsr
->value
, 0, 32);
1088 for (i
= 0; i
< num_mem_params
; i
++)
1090 if ((retval
= target_write_buffer(target
, mem_params
[i
].address
, mem_params
[i
].size
, mem_params
[i
].value
)) != ERROR_OK
)
1096 for (i
= 0; i
< num_reg_params
; i
++)
1098 struct reg
*reg
= register_get_by_name(armv4_5
->core_cache
, reg_params
[i
].reg_name
, 0);
1101 LOG_ERROR("BUG: register '%s' not found", reg_params
[i
].reg_name
);
1102 return ERROR_INVALID_ARGUMENTS
;
1105 if (reg
->size
!= reg_params
[i
].size
)
1107 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params
[i
].reg_name
);
1108 return ERROR_INVALID_ARGUMENTS
;
1111 if ((retval
= armv4_5_set_core_reg(reg
, reg_params
[i
].value
)) != ERROR_OK
)
1117 armv4_5
->core_state
= armv4_5_algorithm_info
->core_state
;
1118 if (armv4_5
->core_state
== ARM_STATE_ARM
)
1119 exit_breakpoint_size
= 4;
1120 else if (armv4_5
->core_state
== ARM_STATE_THUMB
)
1121 exit_breakpoint_size
= 2;
1124 LOG_ERROR("BUG: can't execute algorithms when not in ARM or Thumb state");
1125 return ERROR_INVALID_ARGUMENTS
;
1128 if (armv4_5_algorithm_info
->core_mode
!= ARM_MODE_ANY
)
1130 LOG_DEBUG("setting core_mode: 0x%2.2x",
1131 armv4_5_algorithm_info
->core_mode
);
1132 buf_set_u32(armv4_5
->cpsr
->value
, 0, 5,
1133 armv4_5_algorithm_info
->core_mode
);
1134 armv4_5
->cpsr
->dirty
= 1;
1135 armv4_5
->cpsr
->valid
= 1;
1138 /* terminate using a hardware or (ARMv5+) software breakpoint */
1139 if (exit_point
&& (retval
= breakpoint_add(target
, exit_point
,
1140 exit_breakpoint_size
, BKPT_HARD
)) != ERROR_OK
)
1142 LOG_ERROR("can't add HW breakpoint to terminate algorithm");
1143 return ERROR_TARGET_FAILURE
;
1146 if ((retval
= target_resume(target
, 0, entry_point
, 1, 1)) != ERROR_OK
)
1151 retval
= run_it(target
, exit_point
, timeout_ms
, arch_info
);
1154 breakpoint_remove(target
, exit_point
);
1156 if (retval
!= ERROR_OK
)
1159 for (i
= 0; i
< num_mem_params
; i
++)
1161 if (mem_params
[i
].direction
!= PARAM_OUT
)
1162 if ((retvaltemp
= target_read_buffer(target
, mem_params
[i
].address
, mem_params
[i
].size
, mem_params
[i
].value
)) != ERROR_OK
)
1164 retval
= retvaltemp
;
1168 for (i
= 0; i
< num_reg_params
; i
++)
1170 if (reg_params
[i
].direction
!= PARAM_OUT
)
1173 struct reg
*reg
= register_get_by_name(armv4_5
->core_cache
, reg_params
[i
].reg_name
, 0);
1176 LOG_ERROR("BUG: register '%s' not found", reg_params
[i
].reg_name
);
1177 retval
= ERROR_INVALID_ARGUMENTS
;
1181 if (reg
->size
!= reg_params
[i
].size
)
1183 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params
[i
].reg_name
);
1184 retval
= ERROR_INVALID_ARGUMENTS
;
1188 buf_set_u32(reg_params
[i
].value
, 0, 32, buf_get_u32(reg
->value
, 0, 32));
1192 /* restore everything we saved before (17 or 18 registers) */
1193 for (i
= 0; i
<= 16; i
++)
1196 regvalue
= buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5_algorithm_info
->core_mode
, i
).value
, 0, 32);
1197 if (regvalue
!= context
[i
])
1199 LOG_DEBUG("restoring register %s with value 0x%8.8" PRIx32
"", ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5_algorithm_info
->core_mode
, i
).name
, context
[i
]);
1200 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5_algorithm_info
->core_mode
, i
).value
, 0, 32, context
[i
]);
1201 ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5_algorithm_info
->core_mode
, i
).valid
= 1;
1202 ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5_algorithm_info
->core_mode
, i
).dirty
= 1;
1206 arm_set_cpsr(armv4_5
, cpsr
);
1207 armv4_5
->cpsr
->dirty
= 1;
1209 armv4_5
->core_state
= core_state
;
1214 int armv4_5_run_algorithm(struct target
*target
, int num_mem_params
, struct mem_param
*mem_params
, int num_reg_params
, struct reg_param
*reg_params
, uint32_t entry_point
, uint32_t exit_point
, int timeout_ms
, void *arch_info
)
1216 return armv4_5_run_algorithm_inner(target
, num_mem_params
, mem_params
, num_reg_params
, reg_params
, entry_point
, exit_point
, timeout_ms
, arch_info
, armv4_5_run_algorithm_completion
);
1220 * Runs ARM code in the target to calculate a CRC32 checksum.
1222 * \todo On ARMv5+, rely on BKPT termination for reduced overhead.
1224 int arm_checksum_memory(struct target
*target
,
1225 uint32_t address
, uint32_t count
, uint32_t *checksum
)
1227 struct working_area
*crc_algorithm
;
1228 struct armv4_5_algorithm armv4_5_info
;
1229 struct reg_param reg_params
[2];
1233 static const uint32_t arm_crc_code
[] = {
1234 0xE1A02000, /* mov r2, r0 */
1235 0xE3E00000, /* mov r0, #0xffffffff */
1236 0xE1A03001, /* mov r3, r1 */
1237 0xE3A04000, /* mov r4, #0 */
1238 0xEA00000B, /* b ncomp */
1240 0xE7D21004, /* ldrb r1, [r2, r4] */
1241 0xE59F7030, /* ldr r7, CRC32XOR */
1242 0xE0200C01, /* eor r0, r0, r1, asl 24 */
1243 0xE3A05000, /* mov r5, #0 */
1245 0xE3500000, /* cmp r0, #0 */
1246 0xE1A06080, /* mov r6, r0, asl #1 */
1247 0xE2855001, /* add r5, r5, #1 */
1248 0xE1A00006, /* mov r0, r6 */
1249 0xB0260007, /* eorlt r0, r6, r7 */
1250 0xE3550008, /* cmp r5, #8 */
1251 0x1AFFFFF8, /* bne loop */
1252 0xE2844001, /* add r4, r4, #1 */
1254 0xE1540003, /* cmp r4, r3 */
1255 0x1AFFFFF1, /* bne nbyte */
1257 0xEAFFFFFE, /* b end */
1259 0x04C11DB7 /* .word 0x04C11DB7 */
1262 retval
= target_alloc_working_area(target
,
1263 sizeof(arm_crc_code
), &crc_algorithm
);
1264 if (retval
!= ERROR_OK
)
1267 /* convert code into a buffer in target endianness */
1268 for (i
= 0; i
< ARRAY_SIZE(arm_crc_code
); i
++) {
1269 retval
= target_write_u32(target
,
1270 crc_algorithm
->address
+ i
* sizeof(uint32_t),
1272 if (retval
!= ERROR_OK
)
1276 armv4_5_info
.common_magic
= ARMV4_5_COMMON_MAGIC
;
1277 armv4_5_info
.core_mode
= ARM_MODE_SVC
;
1278 armv4_5_info
.core_state
= ARM_STATE_ARM
;
1280 init_reg_param(®_params
[0], "r0", 32, PARAM_IN_OUT
);
1281 init_reg_param(®_params
[1], "r1", 32, PARAM_OUT
);
1283 buf_set_u32(reg_params
[0].value
, 0, 32, address
);
1284 buf_set_u32(reg_params
[1].value
, 0, 32, count
);
1286 /* 20 second timeout/megabyte */
1287 int timeout
= 20000 * (1 + (count
/ (1024 * 1024)));
1289 retval
= target_run_algorithm(target
, 0, NULL
, 2, reg_params
,
1290 crc_algorithm
->address
,
1291 crc_algorithm
->address
+ sizeof(arm_crc_code
) - 8,
1292 timeout
, &armv4_5_info
);
1293 if (retval
!= ERROR_OK
) {
1294 LOG_ERROR("error executing ARM crc algorithm");
1295 destroy_reg_param(®_params
[0]);
1296 destroy_reg_param(®_params
[1]);
1297 target_free_working_area(target
, crc_algorithm
);
1301 *checksum
= buf_get_u32(reg_params
[0].value
, 0, 32);
1303 destroy_reg_param(®_params
[0]);
1304 destroy_reg_param(®_params
[1]);
1306 target_free_working_area(target
, crc_algorithm
);
1312 * Runs ARM code in the target to check whether a memory block holds
1313 * all ones. NOR flash which has been erased, and thus may be written,
1316 * \todo On ARMv5+, rely on BKPT termination for reduced overhead.
1318 int arm_blank_check_memory(struct target
*target
,
1319 uint32_t address
, uint32_t count
, uint32_t *blank
)
1321 struct working_area
*check_algorithm
;
1322 struct reg_param reg_params
[3];
1323 struct armv4_5_algorithm armv4_5_info
;
1327 static const uint32_t check_code
[] = {
1329 0xe4d03001, /* ldrb r3, [r0], #1 */
1330 0xe0022003, /* and r2, r2, r3 */
1331 0xe2511001, /* subs r1, r1, #1 */
1332 0x1afffffb, /* bne loop */
1334 0xeafffffe /* b end */
1337 /* make sure we have a working area */
1338 retval
= target_alloc_working_area(target
,
1339 sizeof(check_code
), &check_algorithm
);
1340 if (retval
!= ERROR_OK
)
1343 /* convert code into a buffer in target endianness */
1344 for (i
= 0; i
< ARRAY_SIZE(check_code
); i
++) {
1345 retval
= target_write_u32(target
,
1346 check_algorithm
->address
1347 + i
* sizeof(uint32_t),
1349 if (retval
!= ERROR_OK
)
1353 armv4_5_info
.common_magic
= ARMV4_5_COMMON_MAGIC
;
1354 armv4_5_info
.core_mode
= ARM_MODE_SVC
;
1355 armv4_5_info
.core_state
= ARM_STATE_ARM
;
1357 init_reg_param(®_params
[0], "r0", 32, PARAM_OUT
);
1358 buf_set_u32(reg_params
[0].value
, 0, 32, address
);
1360 init_reg_param(®_params
[1], "r1", 32, PARAM_OUT
);
1361 buf_set_u32(reg_params
[1].value
, 0, 32, count
);
1363 init_reg_param(®_params
[2], "r2", 32, PARAM_IN_OUT
);
1364 buf_set_u32(reg_params
[2].value
, 0, 32, 0xff);
1366 retval
= target_run_algorithm(target
, 0, NULL
, 3, reg_params
,
1367 check_algorithm
->address
,
1368 check_algorithm
->address
+ sizeof(check_code
) - 4,
1369 10000, &armv4_5_info
);
1370 if (retval
!= ERROR_OK
) {
1371 destroy_reg_param(®_params
[0]);
1372 destroy_reg_param(®_params
[1]);
1373 destroy_reg_param(®_params
[2]);
1374 target_free_working_area(target
, check_algorithm
);
1378 *blank
= buf_get_u32(reg_params
[2].value
, 0, 32);
1380 destroy_reg_param(®_params
[0]);
1381 destroy_reg_param(®_params
[1]);
1382 destroy_reg_param(®_params
[2]);
1384 target_free_working_area(target
, check_algorithm
);
1389 static int arm_full_context(struct target
*target
)
1391 struct arm
*armv4_5
= target_to_armv4_5(target
);
1392 unsigned num_regs
= armv4_5
->core_cache
->num_regs
;
1393 struct reg
*reg
= armv4_5
->core_cache
->reg_list
;
1394 int retval
= ERROR_OK
;
1396 for (; num_regs
&& retval
== ERROR_OK
; num_regs
--, reg
++) {
1399 retval
= armv4_5_get_core_reg(reg
);
1404 static int arm_default_mrc(struct target
*target
, int cpnum
,
1405 uint32_t op1
, uint32_t op2
,
1406 uint32_t CRn
, uint32_t CRm
,
1409 LOG_ERROR("%s doesn't implement MRC", target_type_name(target
));
1413 static int arm_default_mcr(struct target
*target
, int cpnum
,
1414 uint32_t op1
, uint32_t op2
,
1415 uint32_t CRn
, uint32_t CRm
,
1418 LOG_ERROR("%s doesn't implement MCR", target_type_name(target
));
1422 int armv4_5_init_arch_info(struct target
*target
, struct arm
*armv4_5
)
1424 target
->arch_info
= armv4_5
;
1425 armv4_5
->target
= target
;
1427 armv4_5
->common_magic
= ARMV4_5_COMMON_MAGIC
;
1428 arm_set_cpsr(armv4_5
, ARM_MODE_USR
);
1430 /* core_type may be overridden by subtype logic */
1431 armv4_5
->core_type
= ARM_MODE_ANY
;
1433 /* default full_context() has no core-specific optimizations */
1434 if (!armv4_5
->full_context
&& armv4_5
->read_core_reg
)
1435 armv4_5
->full_context
= arm_full_context
;
1438 armv4_5
->mrc
= arm_default_mrc
;
1440 armv4_5
->mcr
= arm_default_mcr
;