target: Add some info messages about examination process.
[openocd.git] / src / target / target.c
blob2703f7b00a30acddc4a9f720addea7b70bbfbb1c
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2007-2010 Øyvind Harboe *
8 * oyvind.harboe@zylin.com *
9 * *
10 * Copyright (C) 2008, Duane Ellis *
11 * openocd@duaneeellis.com *
12 * *
13 * Copyright (C) 2008 by Spencer Oliver *
14 * spen@spen-soft.co.uk *
15 * *
16 * Copyright (C) 2008 by Rick Altherr *
17 * kc8apf@kc8apf.net> *
18 * *
19 * Copyright (C) 2011 by Broadcom Corporation *
20 * Evan Hunter - ehunter@broadcom.com *
21 * *
22 * Copyright (C) ST-Ericsson SA 2011 *
23 * michel.jaouen@stericsson.com : smp minimum support *
24 * *
25 * Copyright (C) 2011 Andreas Fritiofson *
26 * andreas.fritiofson@gmail.com *
27 ***************************************************************************/
29 #ifdef HAVE_CONFIG_H
30 #include "config.h"
31 #endif
33 #include <helper/align.h>
34 #include <helper/nvp.h>
35 #include <helper/time_support.h>
36 #include <jtag/jtag.h>
37 #include <flash/nor/core.h>
39 #include "target.h"
40 #include "target_type.h"
41 #include "target_request.h"
42 #include "breakpoints.h"
43 #include "register.h"
44 #include "trace.h"
45 #include "image.h"
46 #include "rtos/rtos.h"
47 #include "transport/transport.h"
48 #include "arm_cti.h"
49 #include "smp.h"
50 #include "semihosting_common.h"
52 /* default halt wait timeout (ms) */
53 #define DEFAULT_HALT_TIMEOUT 5000
55 static int target_read_buffer_default(struct target *target, target_addr_t address,
56 uint32_t count, uint8_t *buffer);
57 static int target_write_buffer_default(struct target *target, target_addr_t address,
58 uint32_t count, const uint8_t *buffer);
59 static int target_array2mem(Jim_Interp *interp, struct target *target,
60 int argc, Jim_Obj * const *argv);
61 static int target_mem2array(Jim_Interp *interp, struct target *target,
62 int argc, Jim_Obj * const *argv);
63 static int target_register_user_commands(struct command_context *cmd_ctx);
64 static int target_get_gdb_fileio_info_default(struct target *target,
65 struct gdb_fileio_info *fileio_info);
66 static int target_gdb_fileio_end_default(struct target *target, int retcode,
67 int fileio_errno, bool ctrl_c);
69 static struct target_type *target_types[] = {
70 &arm7tdmi_target,
71 &arm9tdmi_target,
72 &arm920t_target,
73 &arm720t_target,
74 &arm966e_target,
75 &arm946e_target,
76 &arm926ejs_target,
77 &fa526_target,
78 &feroceon_target,
79 &dragonite_target,
80 &xscale_target,
81 &xtensa_chip_target,
82 &cortexm_target,
83 &cortexa_target,
84 &cortexr4_target,
85 &arm11_target,
86 &ls1_sap_target,
87 &mips_m4k_target,
88 &avr_target,
89 &dsp563xx_target,
90 &dsp5680xx_target,
91 &testee_target,
92 &avr32_ap7k_target,
93 &hla_target,
94 &esp32_target,
95 &esp32s2_target,
96 &esp32s3_target,
97 &or1k_target,
98 &quark_x10xx_target,
99 &quark_d20xx_target,
100 &stm8_target,
101 &riscv_target,
102 &mem_ap_target,
103 &esirisc_target,
104 &arcv2_target,
105 &aarch64_target,
106 &armv8r_target,
107 &mips_mips64_target,
108 NULL,
111 struct target *all_targets;
112 static struct target_event_callback *target_event_callbacks;
113 static struct target_timer_callback *target_timer_callbacks;
114 static int64_t target_timer_next_event_value;
115 static LIST_HEAD(target_reset_callback_list);
116 static LIST_HEAD(target_trace_callback_list);
117 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
118 static LIST_HEAD(empty_smp_targets);
120 enum nvp_assert {
121 NVP_DEASSERT,
122 NVP_ASSERT,
125 static const struct nvp nvp_assert[] = {
126 { .name = "assert", NVP_ASSERT },
127 { .name = "deassert", NVP_DEASSERT },
128 { .name = "T", NVP_ASSERT },
129 { .name = "F", NVP_DEASSERT },
130 { .name = "t", NVP_ASSERT },
131 { .name = "f", NVP_DEASSERT },
132 { .name = NULL, .value = -1 }
135 static const struct nvp nvp_error_target[] = {
136 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
137 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
138 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
139 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
140 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
141 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
142 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
143 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
144 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
145 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
146 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
147 { .value = -1, .name = NULL }
150 static const char *target_strerror_safe(int err)
152 const struct nvp *n;
154 n = nvp_value2name(nvp_error_target, err);
155 if (!n->name)
156 return "unknown";
157 else
158 return n->name;
161 static const struct jim_nvp nvp_target_event[] = {
163 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
164 { .value = TARGET_EVENT_HALTED, .name = "halted" },
165 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
166 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
167 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
168 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
169 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
171 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
172 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
174 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
175 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
176 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
177 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
178 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
179 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
180 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
181 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
183 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
184 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
185 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
187 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
188 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
190 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
191 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
193 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
194 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
196 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
197 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
199 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
201 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X100, .name = "semihosting-user-cmd-0x100" },
202 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X101, .name = "semihosting-user-cmd-0x101" },
203 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X102, .name = "semihosting-user-cmd-0x102" },
204 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X103, .name = "semihosting-user-cmd-0x103" },
205 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X104, .name = "semihosting-user-cmd-0x104" },
206 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X105, .name = "semihosting-user-cmd-0x105" },
207 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X106, .name = "semihosting-user-cmd-0x106" },
208 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X107, .name = "semihosting-user-cmd-0x107" },
210 { .name = NULL, .value = -1 }
213 static const struct nvp nvp_target_state[] = {
214 { .name = "unknown", .value = TARGET_UNKNOWN },
215 { .name = "running", .value = TARGET_RUNNING },
216 { .name = "halted", .value = TARGET_HALTED },
217 { .name = "reset", .value = TARGET_RESET },
218 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
219 { .name = NULL, .value = -1 },
222 static const struct nvp nvp_target_debug_reason[] = {
223 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
224 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
225 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
226 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
227 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
228 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
229 { .name = "program-exit", .value = DBG_REASON_EXIT },
230 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
231 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
232 { .name = NULL, .value = -1 },
235 static const struct jim_nvp nvp_target_endian[] = {
236 { .name = "big", .value = TARGET_BIG_ENDIAN },
237 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
238 { .name = "be", .value = TARGET_BIG_ENDIAN },
239 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
240 { .name = NULL, .value = -1 },
243 static const struct nvp nvp_reset_modes[] = {
244 { .name = "unknown", .value = RESET_UNKNOWN },
245 { .name = "run", .value = RESET_RUN },
246 { .name = "halt", .value = RESET_HALT },
247 { .name = "init", .value = RESET_INIT },
248 { .name = NULL, .value = -1 },
251 const char *debug_reason_name(struct target *t)
253 const char *cp;
255 cp = nvp_value2name(nvp_target_debug_reason,
256 t->debug_reason)->name;
257 if (!cp) {
258 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
259 cp = "(*BUG*unknown*BUG*)";
261 return cp;
264 const char *target_state_name(struct target *t)
266 const char *cp;
267 cp = nvp_value2name(nvp_target_state, t->state)->name;
268 if (!cp) {
269 LOG_ERROR("Invalid target state: %d", (int)(t->state));
270 cp = "(*BUG*unknown*BUG*)";
273 if (!target_was_examined(t) && t->defer_examine)
274 cp = "examine deferred";
276 return cp;
279 const char *target_event_name(enum target_event event)
281 const char *cp;
282 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
283 if (!cp) {
284 LOG_ERROR("Invalid target event: %d", (int)(event));
285 cp = "(*BUG*unknown*BUG*)";
287 return cp;
290 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
292 const char *cp;
293 cp = nvp_value2name(nvp_reset_modes, reset_mode)->name;
294 if (!cp) {
295 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
296 cp = "(*BUG*unknown*BUG*)";
298 return cp;
301 static void append_to_list_all_targets(struct target *target)
303 struct target **t = &all_targets;
305 while (*t)
306 t = &((*t)->next);
307 *t = target;
310 /* read a uint64_t from a buffer in target memory endianness */
311 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
313 if (target->endianness == TARGET_LITTLE_ENDIAN)
314 return le_to_h_u64(buffer);
315 else
316 return be_to_h_u64(buffer);
319 /* read a uint32_t from a buffer in target memory endianness */
320 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
322 if (target->endianness == TARGET_LITTLE_ENDIAN)
323 return le_to_h_u32(buffer);
324 else
325 return be_to_h_u32(buffer);
328 /* read a uint24_t from a buffer in target memory endianness */
329 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
331 if (target->endianness == TARGET_LITTLE_ENDIAN)
332 return le_to_h_u24(buffer);
333 else
334 return be_to_h_u24(buffer);
337 /* read a uint16_t from a buffer in target memory endianness */
338 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
340 if (target->endianness == TARGET_LITTLE_ENDIAN)
341 return le_to_h_u16(buffer);
342 else
343 return be_to_h_u16(buffer);
346 /* write a uint64_t to a buffer in target memory endianness */
347 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
349 if (target->endianness == TARGET_LITTLE_ENDIAN)
350 h_u64_to_le(buffer, value);
351 else
352 h_u64_to_be(buffer, value);
355 /* write a uint32_t to a buffer in target memory endianness */
356 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
358 if (target->endianness == TARGET_LITTLE_ENDIAN)
359 h_u32_to_le(buffer, value);
360 else
361 h_u32_to_be(buffer, value);
364 /* write a uint24_t to a buffer in target memory endianness */
365 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
367 if (target->endianness == TARGET_LITTLE_ENDIAN)
368 h_u24_to_le(buffer, value);
369 else
370 h_u24_to_be(buffer, value);
373 /* write a uint16_t to a buffer in target memory endianness */
374 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
376 if (target->endianness == TARGET_LITTLE_ENDIAN)
377 h_u16_to_le(buffer, value);
378 else
379 h_u16_to_be(buffer, value);
382 /* write a uint8_t to a buffer in target memory endianness */
383 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
385 *buffer = value;
388 /* write a uint64_t array to a buffer in target memory endianness */
389 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
391 uint32_t i;
392 for (i = 0; i < count; i++)
393 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
396 /* write a uint32_t array to a buffer in target memory endianness */
397 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
399 uint32_t i;
400 for (i = 0; i < count; i++)
401 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
404 /* write a uint16_t array to a buffer in target memory endianness */
405 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
407 uint32_t i;
408 for (i = 0; i < count; i++)
409 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
412 /* write a uint64_t array to a buffer in target memory endianness */
413 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
415 uint32_t i;
416 for (i = 0; i < count; i++)
417 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
420 /* write a uint32_t array to a buffer in target memory endianness */
421 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
423 uint32_t i;
424 for (i = 0; i < count; i++)
425 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
428 /* write a uint16_t array to a buffer in target memory endianness */
429 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
431 uint32_t i;
432 for (i = 0; i < count; i++)
433 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
436 /* return a pointer to a configured target; id is name or index in all_targets */
437 struct target *get_target(const char *id)
439 struct target *target;
441 /* try as tcltarget name */
442 for (target = all_targets; target; target = target->next) {
443 if (!target_name(target))
444 continue;
445 if (strcmp(id, target_name(target)) == 0)
446 return target;
449 /* try as index */
450 unsigned int index, counter;
451 if (parse_uint(id, &index) != ERROR_OK)
452 return NULL;
454 for (target = all_targets, counter = index;
455 target && counter;
456 target = target->next, --counter)
459 return target;
462 struct target *get_current_target(struct command_context *cmd_ctx)
464 struct target *target = get_current_target_or_null(cmd_ctx);
466 if (!target) {
467 LOG_ERROR("BUG: current_target out of bounds");
468 exit(-1);
471 return target;
474 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
476 return cmd_ctx->current_target_override
477 ? cmd_ctx->current_target_override
478 : cmd_ctx->current_target;
481 int target_poll(struct target *target)
483 int retval;
485 /* We can't poll until after examine */
486 if (!target_was_examined(target)) {
487 /* Fail silently lest we pollute the log */
488 return ERROR_FAIL;
491 retval = target->type->poll(target);
492 if (retval != ERROR_OK)
493 return retval;
495 if (target->halt_issued) {
496 if (target->state == TARGET_HALTED)
497 target->halt_issued = false;
498 else {
499 int64_t t = timeval_ms() - target->halt_issued_time;
500 if (t > DEFAULT_HALT_TIMEOUT) {
501 target->halt_issued = false;
502 LOG_INFO("Halt timed out, wake up GDB.");
503 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
508 return ERROR_OK;
511 int target_halt(struct target *target)
513 int retval;
514 /* We can't poll until after examine */
515 if (!target_was_examined(target)) {
516 LOG_ERROR("Target not examined yet");
517 return ERROR_FAIL;
520 retval = target->type->halt(target);
521 if (retval != ERROR_OK)
522 return retval;
524 target->halt_issued = true;
525 target->halt_issued_time = timeval_ms();
527 return ERROR_OK;
531 * Make the target (re)start executing using its saved execution
532 * context (possibly with some modifications).
534 * @param target Which target should start executing.
535 * @param current True to use the target's saved program counter instead
536 * of the address parameter
537 * @param address Optionally used as the program counter.
538 * @param handle_breakpoints True iff breakpoints at the resumption PC
539 * should be skipped. (For example, maybe execution was stopped by
540 * such a breakpoint, in which case it would be counterproductive to
541 * let it re-trigger.
542 * @param debug_execution False if all working areas allocated by OpenOCD
543 * should be released and/or restored to their original contents.
544 * (This would for example be true to run some downloaded "helper"
545 * algorithm code, which resides in one such working buffer and uses
546 * another for data storage.)
548 * @todo Resolve the ambiguity about what the "debug_execution" flag
549 * signifies. For example, Target implementations don't agree on how
550 * it relates to invalidation of the register cache, or to whether
551 * breakpoints and watchpoints should be enabled. (It would seem wrong
552 * to enable breakpoints when running downloaded "helper" algorithms
553 * (debug_execution true), since the breakpoints would be set to match
554 * target firmware being debugged, not the helper algorithm.... and
555 * enabling them could cause such helpers to malfunction (for example,
556 * by overwriting data with a breakpoint instruction. On the other
557 * hand the infrastructure for running such helpers might use this
558 * procedure but rely on hardware breakpoint to detect termination.)
560 int target_resume(struct target *target, int current, target_addr_t address,
561 int handle_breakpoints, int debug_execution)
563 int retval;
565 /* We can't poll until after examine */
566 if (!target_was_examined(target)) {
567 LOG_ERROR("Target not examined yet");
568 return ERROR_FAIL;
571 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
573 /* note that resume *must* be asynchronous. The CPU can halt before
574 * we poll. The CPU can even halt at the current PC as a result of
575 * a software breakpoint being inserted by (a bug?) the application.
578 * resume() triggers the event 'resumed'. The execution of TCL commands
579 * in the event handler causes the polling of targets. If the target has
580 * already halted for a breakpoint, polling will run the 'halted' event
581 * handler before the pending 'resumed' handler.
582 * Disable polling during resume() to guarantee the execution of handlers
583 * in the correct order.
585 bool save_poll_mask = jtag_poll_mask();
586 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
587 jtag_poll_unmask(save_poll_mask);
589 if (retval != ERROR_OK)
590 return retval;
592 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
594 return retval;
597 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
599 char buf[100];
600 int retval;
601 const struct nvp *n;
602 n = nvp_value2name(nvp_reset_modes, reset_mode);
603 if (!n->name) {
604 LOG_ERROR("invalid reset mode");
605 return ERROR_FAIL;
608 struct target *target;
609 for (target = all_targets; target; target = target->next)
610 target_call_reset_callbacks(target, reset_mode);
612 /* disable polling during reset to make reset event scripts
613 * more predictable, i.e. dr/irscan & pathmove in events will
614 * not have JTAG operations injected into the middle of a sequence.
616 bool save_poll_mask = jtag_poll_mask();
618 sprintf(buf, "ocd_process_reset %s", n->name);
619 retval = Jim_Eval(cmd->ctx->interp, buf);
621 jtag_poll_unmask(save_poll_mask);
623 if (retval != JIM_OK) {
624 Jim_MakeErrorMessage(cmd->ctx->interp);
625 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
626 return ERROR_FAIL;
629 /* We want any events to be processed before the prompt */
630 retval = target_call_timer_callbacks_now();
632 for (target = all_targets; target; target = target->next) {
633 target->type->check_reset(target);
634 target->running_alg = false;
637 return retval;
640 static int identity_virt2phys(struct target *target,
641 target_addr_t virtual, target_addr_t *physical)
643 *physical = virtual;
644 return ERROR_OK;
647 static int no_mmu(struct target *target, int *enabled)
649 *enabled = 0;
650 return ERROR_OK;
654 * Reset the @c examined flag for the given target.
655 * Pure paranoia -- targets are zeroed on allocation.
657 static inline void target_reset_examined(struct target *target)
659 target->examined = false;
662 static int default_examine(struct target *target)
664 target_set_examined(target);
665 return ERROR_OK;
668 /* no check by default */
669 static int default_check_reset(struct target *target)
671 return ERROR_OK;
674 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
675 * Keep in sync */
676 int target_examine_one(struct target *target)
678 LOG_TARGET_DEBUG(target, "Examination started");
680 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
682 int retval = target->type->examine(target);
683 if (retval != ERROR_OK) {
684 LOG_TARGET_ERROR(target, "Examination failed");
685 LOG_TARGET_DEBUG(target, "examine() returned error code %d", retval);
686 target_reset_examined(target);
687 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
688 return retval;
691 target_set_examined(target);
692 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
694 LOG_TARGET_INFO(target, "Examination succeed");
695 return ERROR_OK;
698 static int jtag_enable_callback(enum jtag_event event, void *priv)
700 struct target *target = priv;
702 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
703 return ERROR_OK;
705 jtag_unregister_event_callback(jtag_enable_callback, target);
707 return target_examine_one(target);
710 /* Targets that correctly implement init + examine, i.e.
711 * no communication with target during init:
713 * XScale
715 int target_examine(void)
717 int retval = ERROR_OK;
718 struct target *target;
720 for (target = all_targets; target; target = target->next) {
721 /* defer examination, but don't skip it */
722 if (!target->tap->enabled) {
723 jtag_register_event_callback(jtag_enable_callback,
724 target);
725 continue;
728 if (target->defer_examine)
729 continue;
731 int retval2 = target_examine_one(target);
732 if (retval2 != ERROR_OK) {
733 LOG_WARNING("target %s examination failed", target_name(target));
734 retval = retval2;
737 return retval;
740 const char *target_type_name(struct target *target)
742 return target->type->name;
745 static int target_soft_reset_halt(struct target *target)
747 if (!target_was_examined(target)) {
748 LOG_ERROR("Target not examined yet");
749 return ERROR_FAIL;
751 if (!target->type->soft_reset_halt) {
752 LOG_ERROR("Target %s does not support soft_reset_halt",
753 target_name(target));
754 return ERROR_FAIL;
756 return target->type->soft_reset_halt(target);
760 * Downloads a target-specific native code algorithm to the target,
761 * and executes it. * Note that some targets may need to set up, enable,
762 * and tear down a breakpoint (hard or * soft) to detect algorithm
763 * termination, while others may support lower overhead schemes where
764 * soft breakpoints embedded in the algorithm automatically terminate the
765 * algorithm.
767 * @param target used to run the algorithm
768 * @param num_mem_params
769 * @param mem_params
770 * @param num_reg_params
771 * @param reg_param
772 * @param entry_point
773 * @param exit_point
774 * @param timeout_ms
775 * @param arch_info target-specific description of the algorithm.
777 int target_run_algorithm(struct target *target,
778 int num_mem_params, struct mem_param *mem_params,
779 int num_reg_params, struct reg_param *reg_param,
780 target_addr_t entry_point, target_addr_t exit_point,
781 unsigned int timeout_ms, void *arch_info)
783 int retval = ERROR_FAIL;
785 if (!target_was_examined(target)) {
786 LOG_ERROR("Target not examined yet");
787 goto done;
789 if (!target->type->run_algorithm) {
790 LOG_ERROR("Target type '%s' does not support %s",
791 target_type_name(target), __func__);
792 goto done;
795 target->running_alg = true;
796 retval = target->type->run_algorithm(target,
797 num_mem_params, mem_params,
798 num_reg_params, reg_param,
799 entry_point, exit_point, timeout_ms, arch_info);
800 target->running_alg = false;
802 done:
803 return retval;
807 * Executes a target-specific native code algorithm and leaves it running.
809 * @param target used to run the algorithm
810 * @param num_mem_params
811 * @param mem_params
812 * @param num_reg_params
813 * @param reg_params
814 * @param entry_point
815 * @param exit_point
816 * @param arch_info target-specific description of the algorithm.
818 int target_start_algorithm(struct target *target,
819 int num_mem_params, struct mem_param *mem_params,
820 int num_reg_params, struct reg_param *reg_params,
821 target_addr_t entry_point, target_addr_t exit_point,
822 void *arch_info)
824 int retval = ERROR_FAIL;
826 if (!target_was_examined(target)) {
827 LOG_ERROR("Target not examined yet");
828 goto done;
830 if (!target->type->start_algorithm) {
831 LOG_ERROR("Target type '%s' does not support %s",
832 target_type_name(target), __func__);
833 goto done;
835 if (target->running_alg) {
836 LOG_ERROR("Target is already running an algorithm");
837 goto done;
840 target->running_alg = true;
841 retval = target->type->start_algorithm(target,
842 num_mem_params, mem_params,
843 num_reg_params, reg_params,
844 entry_point, exit_point, arch_info);
846 done:
847 return retval;
851 * Waits for an algorithm started with target_start_algorithm() to complete.
853 * @param target used to run the algorithm
854 * @param num_mem_params
855 * @param mem_params
856 * @param num_reg_params
857 * @param reg_params
858 * @param exit_point
859 * @param timeout_ms
860 * @param arch_info target-specific description of the algorithm.
862 int target_wait_algorithm(struct target *target,
863 int num_mem_params, struct mem_param *mem_params,
864 int num_reg_params, struct reg_param *reg_params,
865 target_addr_t exit_point, unsigned int timeout_ms,
866 void *arch_info)
868 int retval = ERROR_FAIL;
870 if (!target->type->wait_algorithm) {
871 LOG_ERROR("Target type '%s' does not support %s",
872 target_type_name(target), __func__);
873 goto done;
875 if (!target->running_alg) {
876 LOG_ERROR("Target is not running an algorithm");
877 goto done;
880 retval = target->type->wait_algorithm(target,
881 num_mem_params, mem_params,
882 num_reg_params, reg_params,
883 exit_point, timeout_ms, arch_info);
884 if (retval != ERROR_TARGET_TIMEOUT)
885 target->running_alg = false;
887 done:
888 return retval;
892 * Streams data to a circular buffer on target intended for consumption by code
893 * running asynchronously on target.
895 * This is intended for applications where target-specific native code runs
896 * on the target, receives data from the circular buffer, does something with
897 * it (most likely writing it to a flash memory), and advances the circular
898 * buffer pointer.
900 * This assumes that the helper algorithm has already been loaded to the target,
901 * but has not been started yet. Given memory and register parameters are passed
902 * to the algorithm.
904 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
905 * following format:
907 * [buffer_start + 0, buffer_start + 4):
908 * Write Pointer address (aka head). Written and updated by this
909 * routine when new data is written to the circular buffer.
910 * [buffer_start + 4, buffer_start + 8):
911 * Read Pointer address (aka tail). Updated by code running on the
912 * target after it consumes data.
913 * [buffer_start + 8, buffer_start + buffer_size):
914 * Circular buffer contents.
916 * See contrib/loaders/flash/stm32f1x.S for an example.
918 * @param target used to run the algorithm
919 * @param buffer address on the host where data to be sent is located
920 * @param count number of blocks to send
921 * @param block_size size in bytes of each block
922 * @param num_mem_params count of memory-based params to pass to algorithm
923 * @param mem_params memory-based params to pass to algorithm
924 * @param num_reg_params count of register-based params to pass to algorithm
925 * @param reg_params memory-based params to pass to algorithm
926 * @param buffer_start address on the target of the circular buffer structure
927 * @param buffer_size size of the circular buffer structure
928 * @param entry_point address on the target to execute to start the algorithm
929 * @param exit_point address at which to set a breakpoint to catch the
930 * end of the algorithm; can be 0 if target triggers a breakpoint itself
931 * @param arch_info
934 int target_run_flash_async_algorithm(struct target *target,
935 const uint8_t *buffer, uint32_t count, int block_size,
936 int num_mem_params, struct mem_param *mem_params,
937 int num_reg_params, struct reg_param *reg_params,
938 uint32_t buffer_start, uint32_t buffer_size,
939 uint32_t entry_point, uint32_t exit_point, void *arch_info)
941 int retval;
942 int timeout = 0;
944 const uint8_t *buffer_orig = buffer;
946 /* Set up working area. First word is write pointer, second word is read pointer,
947 * rest is fifo data area. */
948 uint32_t wp_addr = buffer_start;
949 uint32_t rp_addr = buffer_start + 4;
950 uint32_t fifo_start_addr = buffer_start + 8;
951 uint32_t fifo_end_addr = buffer_start + buffer_size;
953 uint32_t wp = fifo_start_addr;
954 uint32_t rp = fifo_start_addr;
956 /* validate block_size is 2^n */
957 assert(IS_PWR_OF_2(block_size));
959 retval = target_write_u32(target, wp_addr, wp);
960 if (retval != ERROR_OK)
961 return retval;
962 retval = target_write_u32(target, rp_addr, rp);
963 if (retval != ERROR_OK)
964 return retval;
966 /* Start up algorithm on target and let it idle while writing the first chunk */
967 retval = target_start_algorithm(target, num_mem_params, mem_params,
968 num_reg_params, reg_params,
969 entry_point,
970 exit_point,
971 arch_info);
973 if (retval != ERROR_OK) {
974 LOG_ERROR("error starting target flash write algorithm");
975 return retval;
978 while (count > 0) {
980 retval = target_read_u32(target, rp_addr, &rp);
981 if (retval != ERROR_OK) {
982 LOG_ERROR("failed to get read pointer");
983 break;
986 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
987 (size_t) (buffer - buffer_orig), count, wp, rp);
989 if (rp == 0) {
990 LOG_ERROR("flash write algorithm aborted by target");
991 retval = ERROR_FLASH_OPERATION_FAILED;
992 break;
995 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
996 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
997 break;
1000 /* Count the number of bytes available in the fifo without
1001 * crossing the wrap around. Make sure to not fill it completely,
1002 * because that would make wp == rp and that's the empty condition. */
1003 uint32_t thisrun_bytes;
1004 if (rp > wp)
1005 thisrun_bytes = rp - wp - block_size;
1006 else if (rp > fifo_start_addr)
1007 thisrun_bytes = fifo_end_addr - wp;
1008 else
1009 thisrun_bytes = fifo_end_addr - wp - block_size;
1011 if (thisrun_bytes == 0) {
1012 /* Throttle polling a bit if transfer is (much) faster than flash
1013 * programming. The exact delay shouldn't matter as long as it's
1014 * less than buffer size / flash speed. This is very unlikely to
1015 * run when using high latency connections such as USB. */
1016 alive_sleep(2);
1018 /* to stop an infinite loop on some targets check and increment a timeout
1019 * this issue was observed on a stellaris using the new ICDI interface */
1020 if (timeout++ >= 2500) {
1021 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1022 return ERROR_FLASH_OPERATION_FAILED;
1024 continue;
1027 /* reset our timeout */
1028 timeout = 0;
1030 /* Limit to the amount of data we actually want to write */
1031 if (thisrun_bytes > count * block_size)
1032 thisrun_bytes = count * block_size;
1034 /* Force end of large blocks to be word aligned */
1035 if (thisrun_bytes >= 16)
1036 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1038 /* Write data to fifo */
1039 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1040 if (retval != ERROR_OK)
1041 break;
1043 /* Update counters and wrap write pointer */
1044 buffer += thisrun_bytes;
1045 count -= thisrun_bytes / block_size;
1046 wp += thisrun_bytes;
1047 if (wp >= fifo_end_addr)
1048 wp = fifo_start_addr;
1050 /* Store updated write pointer to target */
1051 retval = target_write_u32(target, wp_addr, wp);
1052 if (retval != ERROR_OK)
1053 break;
1055 /* Avoid GDB timeouts */
1056 keep_alive();
1059 if (retval != ERROR_OK) {
1060 /* abort flash write algorithm on target */
1061 target_write_u32(target, wp_addr, 0);
1064 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1065 num_reg_params, reg_params,
1066 exit_point,
1067 10000,
1068 arch_info);
1070 if (retval2 != ERROR_OK) {
1071 LOG_ERROR("error waiting for target flash write algorithm");
1072 retval = retval2;
1075 if (retval == ERROR_OK) {
1076 /* check if algorithm set rp = 0 after fifo writer loop finished */
1077 retval = target_read_u32(target, rp_addr, &rp);
1078 if (retval == ERROR_OK && rp == 0) {
1079 LOG_ERROR("flash write algorithm aborted by target");
1080 retval = ERROR_FLASH_OPERATION_FAILED;
1084 return retval;
1087 int target_run_read_async_algorithm(struct target *target,
1088 uint8_t *buffer, uint32_t count, int block_size,
1089 int num_mem_params, struct mem_param *mem_params,
1090 int num_reg_params, struct reg_param *reg_params,
1091 uint32_t buffer_start, uint32_t buffer_size,
1092 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1094 int retval;
1095 int timeout = 0;
1097 const uint8_t *buffer_orig = buffer;
1099 /* Set up working area. First word is write pointer, second word is read pointer,
1100 * rest is fifo data area. */
1101 uint32_t wp_addr = buffer_start;
1102 uint32_t rp_addr = buffer_start + 4;
1103 uint32_t fifo_start_addr = buffer_start + 8;
1104 uint32_t fifo_end_addr = buffer_start + buffer_size;
1106 uint32_t wp = fifo_start_addr;
1107 uint32_t rp = fifo_start_addr;
1109 /* validate block_size is 2^n */
1110 assert(IS_PWR_OF_2(block_size));
1112 retval = target_write_u32(target, wp_addr, wp);
1113 if (retval != ERROR_OK)
1114 return retval;
1115 retval = target_write_u32(target, rp_addr, rp);
1116 if (retval != ERROR_OK)
1117 return retval;
1119 /* Start up algorithm on target */
1120 retval = target_start_algorithm(target, num_mem_params, mem_params,
1121 num_reg_params, reg_params,
1122 entry_point,
1123 exit_point,
1124 arch_info);
1126 if (retval != ERROR_OK) {
1127 LOG_ERROR("error starting target flash read algorithm");
1128 return retval;
1131 while (count > 0) {
1132 retval = target_read_u32(target, wp_addr, &wp);
1133 if (retval != ERROR_OK) {
1134 LOG_ERROR("failed to get write pointer");
1135 break;
1138 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1139 (size_t)(buffer - buffer_orig), count, wp, rp);
1141 if (wp == 0) {
1142 LOG_ERROR("flash read algorithm aborted by target");
1143 retval = ERROR_FLASH_OPERATION_FAILED;
1144 break;
1147 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1148 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1149 break;
1152 /* Count the number of bytes available in the fifo without
1153 * crossing the wrap around. */
1154 uint32_t thisrun_bytes;
1155 if (wp >= rp)
1156 thisrun_bytes = wp - rp;
1157 else
1158 thisrun_bytes = fifo_end_addr - rp;
1160 if (thisrun_bytes == 0) {
1161 /* Throttle polling a bit if transfer is (much) faster than flash
1162 * reading. The exact delay shouldn't matter as long as it's
1163 * less than buffer size / flash speed. This is very unlikely to
1164 * run when using high latency connections such as USB. */
1165 alive_sleep(2);
1167 /* to stop an infinite loop on some targets check and increment a timeout
1168 * this issue was observed on a stellaris using the new ICDI interface */
1169 if (timeout++ >= 2500) {
1170 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1171 return ERROR_FLASH_OPERATION_FAILED;
1173 continue;
1176 /* Reset our timeout */
1177 timeout = 0;
1179 /* Limit to the amount of data we actually want to read */
1180 if (thisrun_bytes > count * block_size)
1181 thisrun_bytes = count * block_size;
1183 /* Force end of large blocks to be word aligned */
1184 if (thisrun_bytes >= 16)
1185 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1187 /* Read data from fifo */
1188 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1189 if (retval != ERROR_OK)
1190 break;
1192 /* Update counters and wrap write pointer */
1193 buffer += thisrun_bytes;
1194 count -= thisrun_bytes / block_size;
1195 rp += thisrun_bytes;
1196 if (rp >= fifo_end_addr)
1197 rp = fifo_start_addr;
1199 /* Store updated write pointer to target */
1200 retval = target_write_u32(target, rp_addr, rp);
1201 if (retval != ERROR_OK)
1202 break;
1204 /* Avoid GDB timeouts */
1205 keep_alive();
1209 if (retval != ERROR_OK) {
1210 /* abort flash write algorithm on target */
1211 target_write_u32(target, rp_addr, 0);
1214 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1215 num_reg_params, reg_params,
1216 exit_point,
1217 10000,
1218 arch_info);
1220 if (retval2 != ERROR_OK) {
1221 LOG_ERROR("error waiting for target flash write algorithm");
1222 retval = retval2;
1225 if (retval == ERROR_OK) {
1226 /* check if algorithm set wp = 0 after fifo writer loop finished */
1227 retval = target_read_u32(target, wp_addr, &wp);
1228 if (retval == ERROR_OK && wp == 0) {
1229 LOG_ERROR("flash read algorithm aborted by target");
1230 retval = ERROR_FLASH_OPERATION_FAILED;
1234 return retval;
1237 int target_read_memory(struct target *target,
1238 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1240 if (!target_was_examined(target)) {
1241 LOG_ERROR("Target not examined yet");
1242 return ERROR_FAIL;
1244 if (!target->type->read_memory) {
1245 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1246 return ERROR_FAIL;
1248 return target->type->read_memory(target, address, size, count, buffer);
1251 int target_read_phys_memory(struct target *target,
1252 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1254 if (!target_was_examined(target)) {
1255 LOG_ERROR("Target not examined yet");
1256 return ERROR_FAIL;
1258 if (!target->type->read_phys_memory) {
1259 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1260 return ERROR_FAIL;
1262 return target->type->read_phys_memory(target, address, size, count, buffer);
1265 int target_write_memory(struct target *target,
1266 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1268 if (!target_was_examined(target)) {
1269 LOG_ERROR("Target not examined yet");
1270 return ERROR_FAIL;
1272 if (!target->type->write_memory) {
1273 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1274 return ERROR_FAIL;
1276 return target->type->write_memory(target, address, size, count, buffer);
1279 int target_write_phys_memory(struct target *target,
1280 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1282 if (!target_was_examined(target)) {
1283 LOG_ERROR("Target not examined yet");
1284 return ERROR_FAIL;
1286 if (!target->type->write_phys_memory) {
1287 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1288 return ERROR_FAIL;
1290 return target->type->write_phys_memory(target, address, size, count, buffer);
1293 int target_add_breakpoint(struct target *target,
1294 struct breakpoint *breakpoint)
1296 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1297 LOG_TARGET_ERROR(target, "not halted (add breakpoint)");
1298 return ERROR_TARGET_NOT_HALTED;
1300 return target->type->add_breakpoint(target, breakpoint);
1303 int target_add_context_breakpoint(struct target *target,
1304 struct breakpoint *breakpoint)
1306 if (target->state != TARGET_HALTED) {
1307 LOG_TARGET_ERROR(target, "not halted (add context breakpoint)");
1308 return ERROR_TARGET_NOT_HALTED;
1310 return target->type->add_context_breakpoint(target, breakpoint);
1313 int target_add_hybrid_breakpoint(struct target *target,
1314 struct breakpoint *breakpoint)
1316 if (target->state != TARGET_HALTED) {
1317 LOG_TARGET_ERROR(target, "not halted (add hybrid breakpoint)");
1318 return ERROR_TARGET_NOT_HALTED;
1320 return target->type->add_hybrid_breakpoint(target, breakpoint);
1323 int target_remove_breakpoint(struct target *target,
1324 struct breakpoint *breakpoint)
1326 return target->type->remove_breakpoint(target, breakpoint);
1329 int target_add_watchpoint(struct target *target,
1330 struct watchpoint *watchpoint)
1332 if (target->state != TARGET_HALTED) {
1333 LOG_TARGET_ERROR(target, "not halted (add watchpoint)");
1334 return ERROR_TARGET_NOT_HALTED;
1336 return target->type->add_watchpoint(target, watchpoint);
1338 int target_remove_watchpoint(struct target *target,
1339 struct watchpoint *watchpoint)
1341 return target->type->remove_watchpoint(target, watchpoint);
1343 int target_hit_watchpoint(struct target *target,
1344 struct watchpoint **hit_watchpoint)
1346 if (target->state != TARGET_HALTED) {
1347 LOG_TARGET_ERROR(target, "not halted (hit watchpoint)");
1348 return ERROR_TARGET_NOT_HALTED;
1351 if (!target->type->hit_watchpoint) {
1352 /* For backward compatible, if hit_watchpoint is not implemented,
1353 * return ERROR_FAIL such that gdb_server will not take the nonsense
1354 * information. */
1355 return ERROR_FAIL;
1358 return target->type->hit_watchpoint(target, hit_watchpoint);
1361 const char *target_get_gdb_arch(struct target *target)
1363 if (!target->type->get_gdb_arch)
1364 return NULL;
1365 return target->type->get_gdb_arch(target);
1368 int target_get_gdb_reg_list(struct target *target,
1369 struct reg **reg_list[], int *reg_list_size,
1370 enum target_register_class reg_class)
1372 int result = ERROR_FAIL;
1374 if (!target_was_examined(target)) {
1375 LOG_ERROR("Target not examined yet");
1376 goto done;
1379 result = target->type->get_gdb_reg_list(target, reg_list,
1380 reg_list_size, reg_class);
1382 done:
1383 if (result != ERROR_OK) {
1384 *reg_list = NULL;
1385 *reg_list_size = 0;
1387 return result;
1390 int target_get_gdb_reg_list_noread(struct target *target,
1391 struct reg **reg_list[], int *reg_list_size,
1392 enum target_register_class reg_class)
1394 if (target->type->get_gdb_reg_list_noread &&
1395 target->type->get_gdb_reg_list_noread(target, reg_list,
1396 reg_list_size, reg_class) == ERROR_OK)
1397 return ERROR_OK;
1398 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1401 bool target_supports_gdb_connection(struct target *target)
1404 * exclude all the targets that don't provide get_gdb_reg_list
1405 * or that have explicit gdb_max_connection == 0
1407 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1410 int target_step(struct target *target,
1411 int current, target_addr_t address, int handle_breakpoints)
1413 int retval;
1415 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1417 retval = target->type->step(target, current, address, handle_breakpoints);
1418 if (retval != ERROR_OK)
1419 return retval;
1421 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1423 return retval;
1426 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1428 if (target->state != TARGET_HALTED) {
1429 LOG_TARGET_ERROR(target, "not halted (gdb fileio)");
1430 return ERROR_TARGET_NOT_HALTED;
1432 return target->type->get_gdb_fileio_info(target, fileio_info);
1435 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1437 if (target->state != TARGET_HALTED) {
1438 LOG_TARGET_ERROR(target, "not halted (gdb fileio end)");
1439 return ERROR_TARGET_NOT_HALTED;
1441 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1444 target_addr_t target_address_max(struct target *target)
1446 unsigned bits = target_address_bits(target);
1447 if (sizeof(target_addr_t) * 8 == bits)
1448 return (target_addr_t) -1;
1449 else
1450 return (((target_addr_t) 1) << bits) - 1;
1453 unsigned target_address_bits(struct target *target)
1455 if (target->type->address_bits)
1456 return target->type->address_bits(target);
1457 return 32;
1460 unsigned int target_data_bits(struct target *target)
1462 if (target->type->data_bits)
1463 return target->type->data_bits(target);
1464 return 32;
1467 static int target_profiling(struct target *target, uint32_t *samples,
1468 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1470 return target->type->profiling(target, samples, max_num_samples,
1471 num_samples, seconds);
1474 static int handle_target(void *priv);
1476 static int target_init_one(struct command_context *cmd_ctx,
1477 struct target *target)
1479 target_reset_examined(target);
1481 struct target_type *type = target->type;
1482 if (!type->examine)
1483 type->examine = default_examine;
1485 if (!type->check_reset)
1486 type->check_reset = default_check_reset;
1488 assert(type->init_target);
1490 int retval = type->init_target(cmd_ctx, target);
1491 if (retval != ERROR_OK) {
1492 LOG_ERROR("target '%s' init failed", target_name(target));
1493 return retval;
1496 /* Sanity-check MMU support ... stub in what we must, to help
1497 * implement it in stages, but warn if we need to do so.
1499 if (type->mmu) {
1500 if (!type->virt2phys) {
1501 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1502 type->virt2phys = identity_virt2phys;
1504 } else {
1505 /* Make sure no-MMU targets all behave the same: make no
1506 * distinction between physical and virtual addresses, and
1507 * ensure that virt2phys() is always an identity mapping.
1509 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1510 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1512 type->mmu = no_mmu;
1513 type->write_phys_memory = type->write_memory;
1514 type->read_phys_memory = type->read_memory;
1515 type->virt2phys = identity_virt2phys;
1518 if (!target->type->read_buffer)
1519 target->type->read_buffer = target_read_buffer_default;
1521 if (!target->type->write_buffer)
1522 target->type->write_buffer = target_write_buffer_default;
1524 if (!target->type->get_gdb_fileio_info)
1525 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1527 if (!target->type->gdb_fileio_end)
1528 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1530 if (!target->type->profiling)
1531 target->type->profiling = target_profiling_default;
1533 return ERROR_OK;
1536 static int target_init(struct command_context *cmd_ctx)
1538 struct target *target;
1539 int retval;
1541 for (target = all_targets; target; target = target->next) {
1542 retval = target_init_one(cmd_ctx, target);
1543 if (retval != ERROR_OK)
1544 return retval;
1547 if (!all_targets)
1548 return ERROR_OK;
1550 retval = target_register_user_commands(cmd_ctx);
1551 if (retval != ERROR_OK)
1552 return retval;
1554 retval = target_register_timer_callback(&handle_target,
1555 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1556 if (retval != ERROR_OK)
1557 return retval;
1559 return ERROR_OK;
1562 COMMAND_HANDLER(handle_target_init_command)
1564 int retval;
1566 if (CMD_ARGC != 0)
1567 return ERROR_COMMAND_SYNTAX_ERROR;
1569 static bool target_initialized;
1570 if (target_initialized) {
1571 LOG_INFO("'target init' has already been called");
1572 return ERROR_OK;
1574 target_initialized = true;
1576 retval = command_run_line(CMD_CTX, "init_targets");
1577 if (retval != ERROR_OK)
1578 return retval;
1580 retval = command_run_line(CMD_CTX, "init_target_events");
1581 if (retval != ERROR_OK)
1582 return retval;
1584 retval = command_run_line(CMD_CTX, "init_board");
1585 if (retval != ERROR_OK)
1586 return retval;
1588 LOG_DEBUG("Initializing targets...");
1589 return target_init(CMD_CTX);
1592 int target_register_event_callback(int (*callback)(struct target *target,
1593 enum target_event event, void *priv), void *priv)
1595 struct target_event_callback **callbacks_p = &target_event_callbacks;
1597 if (!callback)
1598 return ERROR_COMMAND_SYNTAX_ERROR;
1600 if (*callbacks_p) {
1601 while ((*callbacks_p)->next)
1602 callbacks_p = &((*callbacks_p)->next);
1603 callbacks_p = &((*callbacks_p)->next);
1606 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1607 (*callbacks_p)->callback = callback;
1608 (*callbacks_p)->priv = priv;
1609 (*callbacks_p)->next = NULL;
1611 return ERROR_OK;
1614 int target_register_reset_callback(int (*callback)(struct target *target,
1615 enum target_reset_mode reset_mode, void *priv), void *priv)
1617 struct target_reset_callback *entry;
1619 if (!callback)
1620 return ERROR_COMMAND_SYNTAX_ERROR;
1622 entry = malloc(sizeof(struct target_reset_callback));
1623 if (!entry) {
1624 LOG_ERROR("error allocating buffer for reset callback entry");
1625 return ERROR_COMMAND_SYNTAX_ERROR;
1628 entry->callback = callback;
1629 entry->priv = priv;
1630 list_add(&entry->list, &target_reset_callback_list);
1633 return ERROR_OK;
1636 int target_register_trace_callback(int (*callback)(struct target *target,
1637 size_t len, uint8_t *data, void *priv), void *priv)
1639 struct target_trace_callback *entry;
1641 if (!callback)
1642 return ERROR_COMMAND_SYNTAX_ERROR;
1644 entry = malloc(sizeof(struct target_trace_callback));
1645 if (!entry) {
1646 LOG_ERROR("error allocating buffer for trace callback entry");
1647 return ERROR_COMMAND_SYNTAX_ERROR;
1650 entry->callback = callback;
1651 entry->priv = priv;
1652 list_add(&entry->list, &target_trace_callback_list);
1655 return ERROR_OK;
1658 int target_register_timer_callback(int (*callback)(void *priv),
1659 unsigned int time_ms, enum target_timer_type type, void *priv)
1661 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1663 if (!callback)
1664 return ERROR_COMMAND_SYNTAX_ERROR;
1666 if (*callbacks_p) {
1667 while ((*callbacks_p)->next)
1668 callbacks_p = &((*callbacks_p)->next);
1669 callbacks_p = &((*callbacks_p)->next);
1672 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1673 (*callbacks_p)->callback = callback;
1674 (*callbacks_p)->type = type;
1675 (*callbacks_p)->time_ms = time_ms;
1676 (*callbacks_p)->removed = false;
1678 (*callbacks_p)->when = timeval_ms() + time_ms;
1679 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1681 (*callbacks_p)->priv = priv;
1682 (*callbacks_p)->next = NULL;
1684 return ERROR_OK;
1687 int target_unregister_event_callback(int (*callback)(struct target *target,
1688 enum target_event event, void *priv), void *priv)
1690 struct target_event_callback **p = &target_event_callbacks;
1691 struct target_event_callback *c = target_event_callbacks;
1693 if (!callback)
1694 return ERROR_COMMAND_SYNTAX_ERROR;
1696 while (c) {
1697 struct target_event_callback *next = c->next;
1698 if ((c->callback == callback) && (c->priv == priv)) {
1699 *p = next;
1700 free(c);
1701 return ERROR_OK;
1702 } else
1703 p = &(c->next);
1704 c = next;
1707 return ERROR_OK;
1710 int target_unregister_reset_callback(int (*callback)(struct target *target,
1711 enum target_reset_mode reset_mode, void *priv), void *priv)
1713 struct target_reset_callback *entry;
1715 if (!callback)
1716 return ERROR_COMMAND_SYNTAX_ERROR;
1718 list_for_each_entry(entry, &target_reset_callback_list, list) {
1719 if (entry->callback == callback && entry->priv == priv) {
1720 list_del(&entry->list);
1721 free(entry);
1722 break;
1726 return ERROR_OK;
1729 int target_unregister_trace_callback(int (*callback)(struct target *target,
1730 size_t len, uint8_t *data, void *priv), void *priv)
1732 struct target_trace_callback *entry;
1734 if (!callback)
1735 return ERROR_COMMAND_SYNTAX_ERROR;
1737 list_for_each_entry(entry, &target_trace_callback_list, list) {
1738 if (entry->callback == callback && entry->priv == priv) {
1739 list_del(&entry->list);
1740 free(entry);
1741 break;
1745 return ERROR_OK;
1748 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1750 if (!callback)
1751 return ERROR_COMMAND_SYNTAX_ERROR;
1753 for (struct target_timer_callback *c = target_timer_callbacks;
1754 c; c = c->next) {
1755 if ((c->callback == callback) && (c->priv == priv)) {
1756 c->removed = true;
1757 return ERROR_OK;
1761 return ERROR_FAIL;
1764 int target_call_event_callbacks(struct target *target, enum target_event event)
1766 struct target_event_callback *callback = target_event_callbacks;
1767 struct target_event_callback *next_callback;
1769 if (event == TARGET_EVENT_HALTED) {
1770 /* execute early halted first */
1771 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1774 LOG_DEBUG("target event %i (%s) for core %s", event,
1775 target_event_name(event),
1776 target_name(target));
1778 target_handle_event(target, event);
1780 while (callback) {
1781 next_callback = callback->next;
1782 callback->callback(target, event, callback->priv);
1783 callback = next_callback;
1786 return ERROR_OK;
1789 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1791 struct target_reset_callback *callback;
1793 LOG_DEBUG("target reset %i (%s)", reset_mode,
1794 nvp_value2name(nvp_reset_modes, reset_mode)->name);
1796 list_for_each_entry(callback, &target_reset_callback_list, list)
1797 callback->callback(target, reset_mode, callback->priv);
1799 return ERROR_OK;
1802 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1804 struct target_trace_callback *callback;
1806 list_for_each_entry(callback, &target_trace_callback_list, list)
1807 callback->callback(target, len, data, callback->priv);
1809 return ERROR_OK;
1812 static int target_timer_callback_periodic_restart(
1813 struct target_timer_callback *cb, int64_t *now)
1815 cb->when = *now + cb->time_ms;
1816 return ERROR_OK;
1819 static int target_call_timer_callback(struct target_timer_callback *cb,
1820 int64_t *now)
1822 cb->callback(cb->priv);
1824 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1825 return target_timer_callback_periodic_restart(cb, now);
1827 return target_unregister_timer_callback(cb->callback, cb->priv);
1830 static int target_call_timer_callbacks_check_time(int checktime)
1832 static bool callback_processing;
1834 /* Do not allow nesting */
1835 if (callback_processing)
1836 return ERROR_OK;
1838 callback_processing = true;
1840 keep_alive();
1842 int64_t now = timeval_ms();
1844 /* Initialize to a default value that's a ways into the future.
1845 * The loop below will make it closer to now if there are
1846 * callbacks that want to be called sooner. */
1847 target_timer_next_event_value = now + 1000;
1849 /* Store an address of the place containing a pointer to the
1850 * next item; initially, that's a standalone "root of the
1851 * list" variable. */
1852 struct target_timer_callback **callback = &target_timer_callbacks;
1853 while (callback && *callback) {
1854 if ((*callback)->removed) {
1855 struct target_timer_callback *p = *callback;
1856 *callback = (*callback)->next;
1857 free(p);
1858 continue;
1861 bool call_it = (*callback)->callback &&
1862 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1863 now >= (*callback)->when);
1865 if (call_it)
1866 target_call_timer_callback(*callback, &now);
1868 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1869 target_timer_next_event_value = (*callback)->when;
1871 callback = &(*callback)->next;
1874 callback_processing = false;
1875 return ERROR_OK;
1878 int target_call_timer_callbacks(void)
1880 return target_call_timer_callbacks_check_time(1);
1883 /* invoke periodic callbacks immediately */
1884 int target_call_timer_callbacks_now(void)
1886 return target_call_timer_callbacks_check_time(0);
1889 int64_t target_timer_next_event(void)
1891 return target_timer_next_event_value;
1894 /* Prints the working area layout for debug purposes */
1895 static void print_wa_layout(struct target *target)
1897 struct working_area *c = target->working_areas;
1899 while (c) {
1900 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1901 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1902 c->address, c->address + c->size - 1, c->size);
1903 c = c->next;
1907 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1908 static void target_split_working_area(struct working_area *area, uint32_t size)
1910 assert(area->free); /* Shouldn't split an allocated area */
1911 assert(size <= area->size); /* Caller should guarantee this */
1913 /* Split only if not already the right size */
1914 if (size < area->size) {
1915 struct working_area *new_wa = malloc(sizeof(*new_wa));
1917 if (!new_wa)
1918 return;
1920 new_wa->next = area->next;
1921 new_wa->size = area->size - size;
1922 new_wa->address = area->address + size;
1923 new_wa->backup = NULL;
1924 new_wa->user = NULL;
1925 new_wa->free = true;
1927 area->next = new_wa;
1928 area->size = size;
1930 /* If backup memory was allocated to this area, it has the wrong size
1931 * now so free it and it will be reallocated if/when needed */
1932 free(area->backup);
1933 area->backup = NULL;
1937 /* Merge all adjacent free areas into one */
1938 static void target_merge_working_areas(struct target *target)
1940 struct working_area *c = target->working_areas;
1942 while (c && c->next) {
1943 assert(c->next->address == c->address + c->size); /* This is an invariant */
1945 /* Find two adjacent free areas */
1946 if (c->free && c->next->free) {
1947 /* Merge the last into the first */
1948 c->size += c->next->size;
1950 /* Remove the last */
1951 struct working_area *to_be_freed = c->next;
1952 c->next = c->next->next;
1953 free(to_be_freed->backup);
1954 free(to_be_freed);
1956 /* If backup memory was allocated to the remaining area, it's has
1957 * the wrong size now */
1958 free(c->backup);
1959 c->backup = NULL;
1960 } else {
1961 c = c->next;
1966 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1968 /* Reevaluate working area address based on MMU state*/
1969 if (!target->working_areas) {
1970 int retval;
1971 int enabled;
1973 retval = target->type->mmu(target, &enabled);
1974 if (retval != ERROR_OK)
1975 return retval;
1977 if (!enabled) {
1978 if (target->working_area_phys_spec) {
1979 LOG_DEBUG("MMU disabled, using physical "
1980 "address for working memory " TARGET_ADDR_FMT,
1981 target->working_area_phys);
1982 target->working_area = target->working_area_phys;
1983 } else {
1984 LOG_ERROR("No working memory available. "
1985 "Specify -work-area-phys to target.");
1986 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1988 } else {
1989 if (target->working_area_virt_spec) {
1990 LOG_DEBUG("MMU enabled, using virtual "
1991 "address for working memory " TARGET_ADDR_FMT,
1992 target->working_area_virt);
1993 target->working_area = target->working_area_virt;
1994 } else {
1995 LOG_ERROR("No working memory available. "
1996 "Specify -work-area-virt to target.");
1997 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2001 /* Set up initial working area on first call */
2002 struct working_area *new_wa = malloc(sizeof(*new_wa));
2003 if (new_wa) {
2004 new_wa->next = NULL;
2005 new_wa->size = ALIGN_DOWN(target->working_area_size, 4); /* 4-byte align */
2006 new_wa->address = target->working_area;
2007 new_wa->backup = NULL;
2008 new_wa->user = NULL;
2009 new_wa->free = true;
2012 target->working_areas = new_wa;
2015 /* only allocate multiples of 4 byte */
2016 size = ALIGN_UP(size, 4);
2018 struct working_area *c = target->working_areas;
2020 /* Find the first large enough working area */
2021 while (c) {
2022 if (c->free && c->size >= size)
2023 break;
2024 c = c->next;
2027 if (!c)
2028 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2030 /* Split the working area into the requested size */
2031 target_split_working_area(c, size);
2033 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2034 size, c->address);
2036 if (target->backup_working_area) {
2037 if (!c->backup) {
2038 c->backup = malloc(c->size);
2039 if (!c->backup)
2040 return ERROR_FAIL;
2043 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2044 if (retval != ERROR_OK)
2045 return retval;
2048 /* mark as used, and return the new (reused) area */
2049 c->free = false;
2050 *area = c;
2052 /* user pointer */
2053 c->user = area;
2055 print_wa_layout(target);
2057 return ERROR_OK;
2060 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2062 int retval;
2064 retval = target_alloc_working_area_try(target, size, area);
2065 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2066 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2067 return retval;
2071 static int target_restore_working_area(struct target *target, struct working_area *area)
2073 int retval = ERROR_OK;
2075 if (target->backup_working_area && area->backup) {
2076 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2077 if (retval != ERROR_OK)
2078 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2079 area->size, area->address);
2082 return retval;
2085 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2086 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2088 if (!area || area->free)
2089 return ERROR_OK;
2091 int retval = ERROR_OK;
2092 if (restore) {
2093 retval = target_restore_working_area(target, area);
2094 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2095 if (retval != ERROR_OK)
2096 return retval;
2099 area->free = true;
2101 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2102 area->size, area->address);
2104 /* mark user pointer invalid */
2105 /* TODO: Is this really safe? It points to some previous caller's memory.
2106 * How could we know that the area pointer is still in that place and not
2107 * some other vital data? What's the purpose of this, anyway? */
2108 *area->user = NULL;
2109 area->user = NULL;
2111 target_merge_working_areas(target);
2113 print_wa_layout(target);
2115 return retval;
2118 int target_free_working_area(struct target *target, struct working_area *area)
2120 return target_free_working_area_restore(target, area, 1);
2123 /* free resources and restore memory, if restoring memory fails,
2124 * free up resources anyway
2126 static void target_free_all_working_areas_restore(struct target *target, int restore)
2128 struct working_area *c = target->working_areas;
2130 LOG_DEBUG("freeing all working areas");
2132 /* Loop through all areas, restoring the allocated ones and marking them as free */
2133 while (c) {
2134 if (!c->free) {
2135 if (restore)
2136 target_restore_working_area(target, c);
2137 c->free = true;
2138 *c->user = NULL; /* Same as above */
2139 c->user = NULL;
2141 c = c->next;
2144 /* Run a merge pass to combine all areas into one */
2145 target_merge_working_areas(target);
2147 print_wa_layout(target);
2150 void target_free_all_working_areas(struct target *target)
2152 target_free_all_working_areas_restore(target, 1);
2154 /* Now we have none or only one working area marked as free */
2155 if (target->working_areas) {
2156 /* Free the last one to allow on-the-fly moving and resizing */
2157 free(target->working_areas->backup);
2158 free(target->working_areas);
2159 target->working_areas = NULL;
2163 /* Find the largest number of bytes that can be allocated */
2164 uint32_t target_get_working_area_avail(struct target *target)
2166 struct working_area *c = target->working_areas;
2167 uint32_t max_size = 0;
2169 if (!c)
2170 return ALIGN_DOWN(target->working_area_size, 4);
2172 while (c) {
2173 if (c->free && max_size < c->size)
2174 max_size = c->size;
2176 c = c->next;
2179 return max_size;
2182 static void target_destroy(struct target *target)
2184 breakpoint_remove_all(target);
2185 watchpoint_remove_all(target);
2187 if (target->type->deinit_target)
2188 target->type->deinit_target(target);
2190 if (target->semihosting)
2191 free(target->semihosting->basedir);
2192 free(target->semihosting);
2194 jtag_unregister_event_callback(jtag_enable_callback, target);
2196 struct target_event_action *teap = target->event_action;
2197 while (teap) {
2198 struct target_event_action *next = teap->next;
2199 Jim_DecrRefCount(teap->interp, teap->body);
2200 free(teap);
2201 teap = next;
2204 target_free_all_working_areas(target);
2206 /* release the targets SMP list */
2207 if (target->smp) {
2208 struct target_list *head, *tmp;
2210 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2211 list_del(&head->lh);
2212 head->target->smp = 0;
2213 free(head);
2215 if (target->smp_targets != &empty_smp_targets)
2216 free(target->smp_targets);
2217 target->smp = 0;
2220 rtos_destroy(target);
2222 free(target->gdb_port_override);
2223 free(target->type);
2224 free(target->trace_info);
2225 free(target->fileio_info);
2226 free(target->cmd_name);
2227 free(target);
2230 void target_quit(void)
2232 struct target_event_callback *pe = target_event_callbacks;
2233 while (pe) {
2234 struct target_event_callback *t = pe->next;
2235 free(pe);
2236 pe = t;
2238 target_event_callbacks = NULL;
2240 struct target_timer_callback *pt = target_timer_callbacks;
2241 while (pt) {
2242 struct target_timer_callback *t = pt->next;
2243 free(pt);
2244 pt = t;
2246 target_timer_callbacks = NULL;
2248 for (struct target *target = all_targets; target;) {
2249 struct target *tmp;
2251 tmp = target->next;
2252 target_destroy(target);
2253 target = tmp;
2256 all_targets = NULL;
2259 int target_arch_state(struct target *target)
2261 int retval;
2262 if (!target) {
2263 LOG_WARNING("No target has been configured");
2264 return ERROR_OK;
2267 if (target->state != TARGET_HALTED)
2268 return ERROR_OK;
2270 retval = target->type->arch_state(target);
2271 return retval;
2274 static int target_get_gdb_fileio_info_default(struct target *target,
2275 struct gdb_fileio_info *fileio_info)
2277 /* If target does not support semi-hosting function, target
2278 has no need to provide .get_gdb_fileio_info callback.
2279 It just return ERROR_FAIL and gdb_server will return "Txx"
2280 as target halted every time. */
2281 return ERROR_FAIL;
2284 static int target_gdb_fileio_end_default(struct target *target,
2285 int retcode, int fileio_errno, bool ctrl_c)
2287 return ERROR_OK;
2290 int target_profiling_default(struct target *target, uint32_t *samples,
2291 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2293 struct timeval timeout, now;
2295 gettimeofday(&timeout, NULL);
2296 timeval_add_time(&timeout, seconds, 0);
2298 LOG_INFO("Starting profiling. Halting and resuming the"
2299 " target as often as we can...");
2301 uint32_t sample_count = 0;
2302 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2303 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2305 int retval = ERROR_OK;
2306 for (;;) {
2307 target_poll(target);
2308 if (target->state == TARGET_HALTED) {
2309 uint32_t t = buf_get_u32(reg->value, 0, 32);
2310 samples[sample_count++] = t;
2311 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2312 retval = target_resume(target, 1, 0, 0, 0);
2313 target_poll(target);
2314 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2315 } else if (target->state == TARGET_RUNNING) {
2316 /* We want to quickly sample the PC. */
2317 retval = target_halt(target);
2318 } else {
2319 LOG_INFO("Target not halted or running");
2320 retval = ERROR_OK;
2321 break;
2324 if (retval != ERROR_OK)
2325 break;
2327 gettimeofday(&now, NULL);
2328 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2329 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2330 break;
2334 *num_samples = sample_count;
2335 return retval;
2338 /* Single aligned words are guaranteed to use 16 or 32 bit access
2339 * mode respectively, otherwise data is handled as quickly as
2340 * possible
2342 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2344 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2345 size, address);
2347 if (!target_was_examined(target)) {
2348 LOG_ERROR("Target not examined yet");
2349 return ERROR_FAIL;
2352 if (size == 0)
2353 return ERROR_OK;
2355 if ((address + size - 1) < address) {
2356 /* GDB can request this when e.g. PC is 0xfffffffc */
2357 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2358 address,
2359 size);
2360 return ERROR_FAIL;
2363 return target->type->write_buffer(target, address, size, buffer);
2366 static int target_write_buffer_default(struct target *target,
2367 target_addr_t address, uint32_t count, const uint8_t *buffer)
2369 uint32_t size;
2370 unsigned int data_bytes = target_data_bits(target) / 8;
2372 /* Align up to maximum bytes. The loop condition makes sure the next pass
2373 * will have something to do with the size we leave to it. */
2374 for (size = 1;
2375 size < data_bytes && count >= size * 2 + (address & size);
2376 size *= 2) {
2377 if (address & size) {
2378 int retval = target_write_memory(target, address, size, 1, buffer);
2379 if (retval != ERROR_OK)
2380 return retval;
2381 address += size;
2382 count -= size;
2383 buffer += size;
2387 /* Write the data with as large access size as possible. */
2388 for (; size > 0; size /= 2) {
2389 uint32_t aligned = count - count % size;
2390 if (aligned > 0) {
2391 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2392 if (retval != ERROR_OK)
2393 return retval;
2394 address += aligned;
2395 count -= aligned;
2396 buffer += aligned;
2400 return ERROR_OK;
2403 /* Single aligned words are guaranteed to use 16 or 32 bit access
2404 * mode respectively, otherwise data is handled as quickly as
2405 * possible
2407 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2409 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2410 size, address);
2412 if (!target_was_examined(target)) {
2413 LOG_ERROR("Target not examined yet");
2414 return ERROR_FAIL;
2417 if (size == 0)
2418 return ERROR_OK;
2420 if ((address + size - 1) < address) {
2421 /* GDB can request this when e.g. PC is 0xfffffffc */
2422 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2423 address,
2424 size);
2425 return ERROR_FAIL;
2428 return target->type->read_buffer(target, address, size, buffer);
2431 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2433 uint32_t size;
2434 unsigned int data_bytes = target_data_bits(target) / 8;
2436 /* Align up to maximum bytes. The loop condition makes sure the next pass
2437 * will have something to do with the size we leave to it. */
2438 for (size = 1;
2439 size < data_bytes && count >= size * 2 + (address & size);
2440 size *= 2) {
2441 if (address & size) {
2442 int retval = target_read_memory(target, address, size, 1, buffer);
2443 if (retval != ERROR_OK)
2444 return retval;
2445 address += size;
2446 count -= size;
2447 buffer += size;
2451 /* Read the data with as large access size as possible. */
2452 for (; size > 0; size /= 2) {
2453 uint32_t aligned = count - count % size;
2454 if (aligned > 0) {
2455 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2456 if (retval != ERROR_OK)
2457 return retval;
2458 address += aligned;
2459 count -= aligned;
2460 buffer += aligned;
2464 return ERROR_OK;
2467 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2469 uint8_t *buffer;
2470 int retval;
2471 uint32_t i;
2472 uint32_t checksum = 0;
2473 if (!target_was_examined(target)) {
2474 LOG_ERROR("Target not examined yet");
2475 return ERROR_FAIL;
2477 if (!target->type->checksum_memory) {
2478 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2479 return ERROR_FAIL;
2482 retval = target->type->checksum_memory(target, address, size, &checksum);
2483 if (retval != ERROR_OK) {
2484 buffer = malloc(size);
2485 if (!buffer) {
2486 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2487 return ERROR_COMMAND_SYNTAX_ERROR;
2489 retval = target_read_buffer(target, address, size, buffer);
2490 if (retval != ERROR_OK) {
2491 free(buffer);
2492 return retval;
2495 /* convert to target endianness */
2496 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2497 uint32_t target_data;
2498 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2499 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2502 retval = image_calculate_checksum(buffer, size, &checksum);
2503 free(buffer);
2506 *crc = checksum;
2508 return retval;
2511 int target_blank_check_memory(struct target *target,
2512 struct target_memory_check_block *blocks, int num_blocks,
2513 uint8_t erased_value)
2515 if (!target_was_examined(target)) {
2516 LOG_ERROR("Target not examined yet");
2517 return ERROR_FAIL;
2520 if (!target->type->blank_check_memory)
2521 return ERROR_NOT_IMPLEMENTED;
2523 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2526 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2528 uint8_t value_buf[8];
2529 if (!target_was_examined(target)) {
2530 LOG_ERROR("Target not examined yet");
2531 return ERROR_FAIL;
2534 int retval = target_read_memory(target, address, 8, 1, value_buf);
2536 if (retval == ERROR_OK) {
2537 *value = target_buffer_get_u64(target, value_buf);
2538 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2539 address,
2540 *value);
2541 } else {
2542 *value = 0x0;
2543 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2544 address);
2547 return retval;
2550 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2552 uint8_t value_buf[4];
2553 if (!target_was_examined(target)) {
2554 LOG_ERROR("Target not examined yet");
2555 return ERROR_FAIL;
2558 int retval = target_read_memory(target, address, 4, 1, value_buf);
2560 if (retval == ERROR_OK) {
2561 *value = target_buffer_get_u32(target, value_buf);
2562 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2563 address,
2564 *value);
2565 } else {
2566 *value = 0x0;
2567 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2568 address);
2571 return retval;
2574 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2576 uint8_t value_buf[2];
2577 if (!target_was_examined(target)) {
2578 LOG_ERROR("Target not examined yet");
2579 return ERROR_FAIL;
2582 int retval = target_read_memory(target, address, 2, 1, value_buf);
2584 if (retval == ERROR_OK) {
2585 *value = target_buffer_get_u16(target, value_buf);
2586 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2587 address,
2588 *value);
2589 } else {
2590 *value = 0x0;
2591 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2592 address);
2595 return retval;
2598 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2600 if (!target_was_examined(target)) {
2601 LOG_ERROR("Target not examined yet");
2602 return ERROR_FAIL;
2605 int retval = target_read_memory(target, address, 1, 1, value);
2607 if (retval == ERROR_OK) {
2608 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2609 address,
2610 *value);
2611 } else {
2612 *value = 0x0;
2613 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2614 address);
2617 return retval;
2620 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2622 int retval;
2623 uint8_t value_buf[8];
2624 if (!target_was_examined(target)) {
2625 LOG_ERROR("Target not examined yet");
2626 return ERROR_FAIL;
2629 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2630 address,
2631 value);
2633 target_buffer_set_u64(target, value_buf, value);
2634 retval = target_write_memory(target, address, 8, 1, value_buf);
2635 if (retval != ERROR_OK)
2636 LOG_DEBUG("failed: %i", retval);
2638 return retval;
2641 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2643 int retval;
2644 uint8_t value_buf[4];
2645 if (!target_was_examined(target)) {
2646 LOG_ERROR("Target not examined yet");
2647 return ERROR_FAIL;
2650 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2651 address,
2652 value);
2654 target_buffer_set_u32(target, value_buf, value);
2655 retval = target_write_memory(target, address, 4, 1, value_buf);
2656 if (retval != ERROR_OK)
2657 LOG_DEBUG("failed: %i", retval);
2659 return retval;
2662 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2664 int retval;
2665 uint8_t value_buf[2];
2666 if (!target_was_examined(target)) {
2667 LOG_ERROR("Target not examined yet");
2668 return ERROR_FAIL;
2671 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2672 address,
2673 value);
2675 target_buffer_set_u16(target, value_buf, value);
2676 retval = target_write_memory(target, address, 2, 1, value_buf);
2677 if (retval != ERROR_OK)
2678 LOG_DEBUG("failed: %i", retval);
2680 return retval;
2683 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2685 int retval;
2686 if (!target_was_examined(target)) {
2687 LOG_ERROR("Target not examined yet");
2688 return ERROR_FAIL;
2691 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2692 address, value);
2694 retval = target_write_memory(target, address, 1, 1, &value);
2695 if (retval != ERROR_OK)
2696 LOG_DEBUG("failed: %i", retval);
2698 return retval;
2701 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2703 int retval;
2704 uint8_t value_buf[8];
2705 if (!target_was_examined(target)) {
2706 LOG_ERROR("Target not examined yet");
2707 return ERROR_FAIL;
2710 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2711 address,
2712 value);
2714 target_buffer_set_u64(target, value_buf, value);
2715 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2716 if (retval != ERROR_OK)
2717 LOG_DEBUG("failed: %i", retval);
2719 return retval;
2722 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2724 int retval;
2725 uint8_t value_buf[4];
2726 if (!target_was_examined(target)) {
2727 LOG_ERROR("Target not examined yet");
2728 return ERROR_FAIL;
2731 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2732 address,
2733 value);
2735 target_buffer_set_u32(target, value_buf, value);
2736 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2737 if (retval != ERROR_OK)
2738 LOG_DEBUG("failed: %i", retval);
2740 return retval;
2743 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2745 int retval;
2746 uint8_t value_buf[2];
2747 if (!target_was_examined(target)) {
2748 LOG_ERROR("Target not examined yet");
2749 return ERROR_FAIL;
2752 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2753 address,
2754 value);
2756 target_buffer_set_u16(target, value_buf, value);
2757 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2758 if (retval != ERROR_OK)
2759 LOG_DEBUG("failed: %i", retval);
2761 return retval;
2764 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2766 int retval;
2767 if (!target_was_examined(target)) {
2768 LOG_ERROR("Target not examined yet");
2769 return ERROR_FAIL;
2772 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2773 address, value);
2775 retval = target_write_phys_memory(target, address, 1, 1, &value);
2776 if (retval != ERROR_OK)
2777 LOG_DEBUG("failed: %i", retval);
2779 return retval;
2782 static int find_target(struct command_invocation *cmd, const char *name)
2784 struct target *target = get_target(name);
2785 if (!target) {
2786 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2787 return ERROR_FAIL;
2789 if (!target->tap->enabled) {
2790 command_print(cmd, "Target: TAP %s is disabled, "
2791 "can't be the current target\n",
2792 target->tap->dotted_name);
2793 return ERROR_FAIL;
2796 cmd->ctx->current_target = target;
2797 if (cmd->ctx->current_target_override)
2798 cmd->ctx->current_target_override = target;
2800 return ERROR_OK;
2804 COMMAND_HANDLER(handle_targets_command)
2806 int retval = ERROR_OK;
2807 if (CMD_ARGC == 1) {
2808 retval = find_target(CMD, CMD_ARGV[0]);
2809 if (retval == ERROR_OK) {
2810 /* we're done! */
2811 return retval;
2815 unsigned int index = 0;
2816 command_print(CMD, " TargetName Type Endian TapName State ");
2817 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2818 for (struct target *target = all_targets; target; target = target->next, ++index) {
2819 const char *state;
2820 char marker = ' ';
2822 if (target->tap->enabled)
2823 state = target_state_name(target);
2824 else
2825 state = "tap-disabled";
2827 if (CMD_CTX->current_target == target)
2828 marker = '*';
2830 /* keep columns lined up to match the headers above */
2831 command_print(CMD,
2832 "%2d%c %-18s %-10s %-6s %-18s %s",
2833 index,
2834 marker,
2835 target_name(target),
2836 target_type_name(target),
2837 jim_nvp_value2name_simple(nvp_target_endian,
2838 target->endianness)->name,
2839 target->tap->dotted_name,
2840 state);
2843 return retval;
2846 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2848 static int power_dropout;
2849 static int srst_asserted;
2851 static int run_power_restore;
2852 static int run_power_dropout;
2853 static int run_srst_asserted;
2854 static int run_srst_deasserted;
2856 static int sense_handler(void)
2858 static int prev_srst_asserted;
2859 static int prev_power_dropout;
2861 int retval = jtag_power_dropout(&power_dropout);
2862 if (retval != ERROR_OK)
2863 return retval;
2865 int power_restored;
2866 power_restored = prev_power_dropout && !power_dropout;
2867 if (power_restored)
2868 run_power_restore = 1;
2870 int64_t current = timeval_ms();
2871 static int64_t last_power;
2872 bool wait_more = last_power + 2000 > current;
2873 if (power_dropout && !wait_more) {
2874 run_power_dropout = 1;
2875 last_power = current;
2878 retval = jtag_srst_asserted(&srst_asserted);
2879 if (retval != ERROR_OK)
2880 return retval;
2882 int srst_deasserted;
2883 srst_deasserted = prev_srst_asserted && !srst_asserted;
2885 static int64_t last_srst;
2886 wait_more = last_srst + 2000 > current;
2887 if (srst_deasserted && !wait_more) {
2888 run_srst_deasserted = 1;
2889 last_srst = current;
2892 if (!prev_srst_asserted && srst_asserted)
2893 run_srst_asserted = 1;
2895 prev_srst_asserted = srst_asserted;
2896 prev_power_dropout = power_dropout;
2898 if (srst_deasserted || power_restored) {
2899 /* Other than logging the event we can't do anything here.
2900 * Issuing a reset is a particularly bad idea as we might
2901 * be inside a reset already.
2905 return ERROR_OK;
2908 /* process target state changes */
2909 static int handle_target(void *priv)
2911 Jim_Interp *interp = (Jim_Interp *)priv;
2912 int retval = ERROR_OK;
2914 if (!is_jtag_poll_safe()) {
2915 /* polling is disabled currently */
2916 return ERROR_OK;
2919 /* we do not want to recurse here... */
2920 static int recursive;
2921 if (!recursive) {
2922 recursive = 1;
2923 sense_handler();
2924 /* danger! running these procedures can trigger srst assertions and power dropouts.
2925 * We need to avoid an infinite loop/recursion here and we do that by
2926 * clearing the flags after running these events.
2928 int did_something = 0;
2929 if (run_srst_asserted) {
2930 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2931 Jim_Eval(interp, "srst_asserted");
2932 did_something = 1;
2934 if (run_srst_deasserted) {
2935 Jim_Eval(interp, "srst_deasserted");
2936 did_something = 1;
2938 if (run_power_dropout) {
2939 LOG_INFO("Power dropout detected, running power_dropout proc.");
2940 Jim_Eval(interp, "power_dropout");
2941 did_something = 1;
2943 if (run_power_restore) {
2944 Jim_Eval(interp, "power_restore");
2945 did_something = 1;
2948 if (did_something) {
2949 /* clear detect flags */
2950 sense_handler();
2953 /* clear action flags */
2955 run_srst_asserted = 0;
2956 run_srst_deasserted = 0;
2957 run_power_restore = 0;
2958 run_power_dropout = 0;
2960 recursive = 0;
2963 /* Poll targets for state changes unless that's globally disabled.
2964 * Skip targets that are currently disabled.
2966 for (struct target *target = all_targets;
2967 is_jtag_poll_safe() && target;
2968 target = target->next) {
2970 if (!target_was_examined(target))
2971 continue;
2973 if (!target->tap->enabled)
2974 continue;
2976 if (target->backoff.times > target->backoff.count) {
2977 /* do not poll this time as we failed previously */
2978 target->backoff.count++;
2979 continue;
2981 target->backoff.count = 0;
2983 /* only poll target if we've got power and srst isn't asserted */
2984 if (!power_dropout && !srst_asserted) {
2985 /* polling may fail silently until the target has been examined */
2986 retval = target_poll(target);
2987 if (retval != ERROR_OK) {
2988 /* 100ms polling interval. Increase interval between polling up to 5000ms */
2989 if (target->backoff.times * polling_interval < 5000) {
2990 target->backoff.times *= 2;
2991 target->backoff.times++;
2994 /* Tell GDB to halt the debugger. This allows the user to
2995 * run monitor commands to handle the situation.
2997 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
2999 if (target->backoff.times > 0) {
3000 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3001 target_reset_examined(target);
3002 retval = target_examine_one(target);
3003 /* Target examination could have failed due to unstable connection,
3004 * but we set the examined flag anyway to repoll it later */
3005 if (retval != ERROR_OK) {
3006 target_set_examined(target);
3007 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3008 target->backoff.times * polling_interval);
3009 return retval;
3013 /* Since we succeeded, we reset backoff count */
3014 target->backoff.times = 0;
3018 return retval;
3021 COMMAND_HANDLER(handle_reg_command)
3023 LOG_DEBUG("-");
3025 struct target *target = get_current_target(CMD_CTX);
3026 if (!target_was_examined(target)) {
3027 LOG_ERROR("Target not examined yet");
3028 return ERROR_TARGET_NOT_EXAMINED;
3030 struct reg *reg = NULL;
3032 /* list all available registers for the current target */
3033 if (CMD_ARGC == 0) {
3034 struct reg_cache *cache = target->reg_cache;
3036 unsigned int count = 0;
3037 while (cache) {
3038 unsigned i;
3040 command_print(CMD, "===== %s", cache->name);
3042 for (i = 0, reg = cache->reg_list;
3043 i < cache->num_regs;
3044 i++, reg++, count++) {
3045 if (reg->exist == false || reg->hidden)
3046 continue;
3047 /* only print cached values if they are valid */
3048 if (reg->valid) {
3049 char *value = buf_to_hex_str(reg->value,
3050 reg->size);
3051 command_print(CMD,
3052 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3053 count, reg->name,
3054 reg->size, value,
3055 reg->dirty
3056 ? " (dirty)"
3057 : "");
3058 free(value);
3059 } else {
3060 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3061 count, reg->name,
3062 reg->size);
3065 cache = cache->next;
3068 return ERROR_OK;
3071 /* access a single register by its ordinal number */
3072 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3073 unsigned num;
3074 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3076 struct reg_cache *cache = target->reg_cache;
3077 unsigned int count = 0;
3078 while (cache) {
3079 unsigned i;
3080 for (i = 0; i < cache->num_regs; i++) {
3081 if (count++ == num) {
3082 reg = &cache->reg_list[i];
3083 break;
3086 if (reg)
3087 break;
3088 cache = cache->next;
3091 if (!reg) {
3092 command_print(CMD, "%i is out of bounds, the current target "
3093 "has only %i registers (0 - %i)", num, count, count - 1);
3094 return ERROR_FAIL;
3096 } else {
3097 /* access a single register by its name */
3098 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3100 if (!reg)
3101 goto not_found;
3104 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3106 if (!reg->exist)
3107 goto not_found;
3109 /* display a register */
3110 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3111 && (CMD_ARGV[1][0] <= '9')))) {
3112 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3113 reg->valid = false;
3115 if (!reg->valid) {
3116 int retval = reg->type->get(reg);
3117 if (retval != ERROR_OK) {
3118 LOG_ERROR("Could not read register '%s'", reg->name);
3119 return retval;
3122 char *value = buf_to_hex_str(reg->value, reg->size);
3123 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3124 free(value);
3125 return ERROR_OK;
3128 /* set register value */
3129 if (CMD_ARGC == 2) {
3130 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3131 if (!buf)
3132 return ERROR_FAIL;
3133 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3135 int retval = reg->type->set(reg, buf);
3136 if (retval != ERROR_OK) {
3137 LOG_ERROR("Could not write to register '%s'", reg->name);
3138 } else {
3139 char *value = buf_to_hex_str(reg->value, reg->size);
3140 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3141 free(value);
3144 free(buf);
3146 return retval;
3149 return ERROR_COMMAND_SYNTAX_ERROR;
3151 not_found:
3152 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3153 return ERROR_FAIL;
3156 COMMAND_HANDLER(handle_poll_command)
3158 int retval = ERROR_OK;
3159 struct target *target = get_current_target(CMD_CTX);
3161 if (CMD_ARGC == 0) {
3162 command_print(CMD, "background polling: %s",
3163 jtag_poll_get_enabled() ? "on" : "off");
3164 command_print(CMD, "TAP: %s (%s)",
3165 target->tap->dotted_name,
3166 target->tap->enabled ? "enabled" : "disabled");
3167 if (!target->tap->enabled)
3168 return ERROR_OK;
3169 retval = target_poll(target);
3170 if (retval != ERROR_OK)
3171 return retval;
3172 retval = target_arch_state(target);
3173 if (retval != ERROR_OK)
3174 return retval;
3175 } else if (CMD_ARGC == 1) {
3176 bool enable;
3177 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3178 jtag_poll_set_enabled(enable);
3179 } else
3180 return ERROR_COMMAND_SYNTAX_ERROR;
3182 return retval;
3185 COMMAND_HANDLER(handle_wait_halt_command)
3187 if (CMD_ARGC > 1)
3188 return ERROR_COMMAND_SYNTAX_ERROR;
3190 unsigned ms = DEFAULT_HALT_TIMEOUT;
3191 if (1 == CMD_ARGC) {
3192 int retval = parse_uint(CMD_ARGV[0], &ms);
3193 if (retval != ERROR_OK)
3194 return ERROR_COMMAND_SYNTAX_ERROR;
3197 struct target *target = get_current_target(CMD_CTX);
3198 return target_wait_state(target, TARGET_HALTED, ms);
3201 /* wait for target state to change. The trick here is to have a low
3202 * latency for short waits and not to suck up all the CPU time
3203 * on longer waits.
3205 * After 500ms, keep_alive() is invoked
3207 int target_wait_state(struct target *target, enum target_state state, unsigned int ms)
3209 int retval;
3210 int64_t then = 0, cur;
3211 bool once = true;
3213 for (;;) {
3214 retval = target_poll(target);
3215 if (retval != ERROR_OK)
3216 return retval;
3217 if (target->state == state)
3218 break;
3219 cur = timeval_ms();
3220 if (once) {
3221 once = false;
3222 then = timeval_ms();
3223 LOG_DEBUG("waiting for target %s...",
3224 nvp_value2name(nvp_target_state, state)->name);
3227 if (cur-then > 500)
3228 keep_alive();
3230 if ((cur-then) > ms) {
3231 LOG_ERROR("timed out while waiting for target %s",
3232 nvp_value2name(nvp_target_state, state)->name);
3233 return ERROR_FAIL;
3237 return ERROR_OK;
3240 COMMAND_HANDLER(handle_halt_command)
3242 LOG_DEBUG("-");
3244 struct target *target = get_current_target(CMD_CTX);
3246 target->verbose_halt_msg = true;
3248 int retval = target_halt(target);
3249 if (retval != ERROR_OK)
3250 return retval;
3252 if (CMD_ARGC == 1) {
3253 unsigned wait_local;
3254 retval = parse_uint(CMD_ARGV[0], &wait_local);
3255 if (retval != ERROR_OK)
3256 return ERROR_COMMAND_SYNTAX_ERROR;
3257 if (!wait_local)
3258 return ERROR_OK;
3261 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3264 COMMAND_HANDLER(handle_soft_reset_halt_command)
3266 struct target *target = get_current_target(CMD_CTX);
3268 LOG_TARGET_INFO(target, "requesting target halt and executing a soft reset");
3270 target_soft_reset_halt(target);
3272 return ERROR_OK;
3275 COMMAND_HANDLER(handle_reset_command)
3277 if (CMD_ARGC > 1)
3278 return ERROR_COMMAND_SYNTAX_ERROR;
3280 enum target_reset_mode reset_mode = RESET_RUN;
3281 if (CMD_ARGC == 1) {
3282 const struct nvp *n;
3283 n = nvp_name2value(nvp_reset_modes, CMD_ARGV[0]);
3284 if ((!n->name) || (n->value == RESET_UNKNOWN))
3285 return ERROR_COMMAND_SYNTAX_ERROR;
3286 reset_mode = n->value;
3289 /* reset *all* targets */
3290 return target_process_reset(CMD, reset_mode);
3294 COMMAND_HANDLER(handle_resume_command)
3296 int current = 1;
3297 if (CMD_ARGC > 1)
3298 return ERROR_COMMAND_SYNTAX_ERROR;
3300 struct target *target = get_current_target(CMD_CTX);
3302 /* with no CMD_ARGV, resume from current pc, addr = 0,
3303 * with one arguments, addr = CMD_ARGV[0],
3304 * handle breakpoints, not debugging */
3305 target_addr_t addr = 0;
3306 if (CMD_ARGC == 1) {
3307 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3308 current = 0;
3311 return target_resume(target, current, addr, 1, 0);
3314 COMMAND_HANDLER(handle_step_command)
3316 if (CMD_ARGC > 1)
3317 return ERROR_COMMAND_SYNTAX_ERROR;
3319 LOG_DEBUG("-");
3321 /* with no CMD_ARGV, step from current pc, addr = 0,
3322 * with one argument addr = CMD_ARGV[0],
3323 * handle breakpoints, debugging */
3324 target_addr_t addr = 0;
3325 int current_pc = 1;
3326 if (CMD_ARGC == 1) {
3327 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3328 current_pc = 0;
3331 struct target *target = get_current_target(CMD_CTX);
3333 return target_step(target, current_pc, addr, 1);
3336 void target_handle_md_output(struct command_invocation *cmd,
3337 struct target *target, target_addr_t address, unsigned size,
3338 unsigned count, const uint8_t *buffer)
3340 const unsigned line_bytecnt = 32;
3341 unsigned line_modulo = line_bytecnt / size;
3343 char output[line_bytecnt * 4 + 1];
3344 unsigned output_len = 0;
3346 const char *value_fmt;
3347 switch (size) {
3348 case 8:
3349 value_fmt = "%16.16"PRIx64" ";
3350 break;
3351 case 4:
3352 value_fmt = "%8.8"PRIx64" ";
3353 break;
3354 case 2:
3355 value_fmt = "%4.4"PRIx64" ";
3356 break;
3357 case 1:
3358 value_fmt = "%2.2"PRIx64" ";
3359 break;
3360 default:
3361 /* "can't happen", caller checked */
3362 LOG_ERROR("invalid memory read size: %u", size);
3363 return;
3366 for (unsigned i = 0; i < count; i++) {
3367 if (i % line_modulo == 0) {
3368 output_len += snprintf(output + output_len,
3369 sizeof(output) - output_len,
3370 TARGET_ADDR_FMT ": ",
3371 (address + (i * size)));
3374 uint64_t value = 0;
3375 const uint8_t *value_ptr = buffer + i * size;
3376 switch (size) {
3377 case 8:
3378 value = target_buffer_get_u64(target, value_ptr);
3379 break;
3380 case 4:
3381 value = target_buffer_get_u32(target, value_ptr);
3382 break;
3383 case 2:
3384 value = target_buffer_get_u16(target, value_ptr);
3385 break;
3386 case 1:
3387 value = *value_ptr;
3389 output_len += snprintf(output + output_len,
3390 sizeof(output) - output_len,
3391 value_fmt, value);
3393 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3394 command_print(cmd, "%s", output);
3395 output_len = 0;
3400 COMMAND_HANDLER(handle_md_command)
3402 if (CMD_ARGC < 1)
3403 return ERROR_COMMAND_SYNTAX_ERROR;
3405 unsigned size = 0;
3406 switch (CMD_NAME[2]) {
3407 case 'd':
3408 size = 8;
3409 break;
3410 case 'w':
3411 size = 4;
3412 break;
3413 case 'h':
3414 size = 2;
3415 break;
3416 case 'b':
3417 size = 1;
3418 break;
3419 default:
3420 return ERROR_COMMAND_SYNTAX_ERROR;
3423 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3424 int (*fn)(struct target *target,
3425 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3426 if (physical) {
3427 CMD_ARGC--;
3428 CMD_ARGV++;
3429 fn = target_read_phys_memory;
3430 } else
3431 fn = target_read_memory;
3432 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3433 return ERROR_COMMAND_SYNTAX_ERROR;
3435 target_addr_t address;
3436 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3438 unsigned count = 1;
3439 if (CMD_ARGC == 2)
3440 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3442 uint8_t *buffer = calloc(count, size);
3443 if (!buffer) {
3444 LOG_ERROR("Failed to allocate md read buffer");
3445 return ERROR_FAIL;
3448 struct target *target = get_current_target(CMD_CTX);
3449 int retval = fn(target, address, size, count, buffer);
3450 if (retval == ERROR_OK)
3451 target_handle_md_output(CMD, target, address, size, count, buffer);
3453 free(buffer);
3455 return retval;
3458 typedef int (*target_write_fn)(struct target *target,
3459 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3461 static int target_fill_mem(struct target *target,
3462 target_addr_t address,
3463 target_write_fn fn,
3464 unsigned data_size,
3465 /* value */
3466 uint64_t b,
3467 /* count */
3468 unsigned c)
3470 /* We have to write in reasonably large chunks to be able
3471 * to fill large memory areas with any sane speed */
3472 const unsigned chunk_size = 16384;
3473 uint8_t *target_buf = malloc(chunk_size * data_size);
3474 if (!target_buf) {
3475 LOG_ERROR("Out of memory");
3476 return ERROR_FAIL;
3479 for (unsigned i = 0; i < chunk_size; i++) {
3480 switch (data_size) {
3481 case 8:
3482 target_buffer_set_u64(target, target_buf + i * data_size, b);
3483 break;
3484 case 4:
3485 target_buffer_set_u32(target, target_buf + i * data_size, b);
3486 break;
3487 case 2:
3488 target_buffer_set_u16(target, target_buf + i * data_size, b);
3489 break;
3490 case 1:
3491 target_buffer_set_u8(target, target_buf + i * data_size, b);
3492 break;
3493 default:
3494 exit(-1);
3498 int retval = ERROR_OK;
3500 for (unsigned x = 0; x < c; x += chunk_size) {
3501 unsigned current;
3502 current = c - x;
3503 if (current > chunk_size)
3504 current = chunk_size;
3505 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3506 if (retval != ERROR_OK)
3507 break;
3508 /* avoid GDB timeouts */
3509 keep_alive();
3511 free(target_buf);
3513 return retval;
3517 COMMAND_HANDLER(handle_mw_command)
3519 if (CMD_ARGC < 2)
3520 return ERROR_COMMAND_SYNTAX_ERROR;
3521 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3522 target_write_fn fn;
3523 if (physical) {
3524 CMD_ARGC--;
3525 CMD_ARGV++;
3526 fn = target_write_phys_memory;
3527 } else
3528 fn = target_write_memory;
3529 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3530 return ERROR_COMMAND_SYNTAX_ERROR;
3532 target_addr_t address;
3533 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3535 uint64_t value;
3536 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3538 unsigned count = 1;
3539 if (CMD_ARGC == 3)
3540 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3542 struct target *target = get_current_target(CMD_CTX);
3543 unsigned wordsize;
3544 switch (CMD_NAME[2]) {
3545 case 'd':
3546 wordsize = 8;
3547 break;
3548 case 'w':
3549 wordsize = 4;
3550 break;
3551 case 'h':
3552 wordsize = 2;
3553 break;
3554 case 'b':
3555 wordsize = 1;
3556 break;
3557 default:
3558 return ERROR_COMMAND_SYNTAX_ERROR;
3561 return target_fill_mem(target, address, fn, wordsize, value, count);
3564 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3565 target_addr_t *min_address, target_addr_t *max_address)
3567 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3568 return ERROR_COMMAND_SYNTAX_ERROR;
3570 /* a base address isn't always necessary,
3571 * default to 0x0 (i.e. don't relocate) */
3572 if (CMD_ARGC >= 2) {
3573 target_addr_t addr;
3574 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3575 image->base_address = addr;
3576 image->base_address_set = true;
3577 } else
3578 image->base_address_set = false;
3580 image->start_address_set = false;
3582 if (CMD_ARGC >= 4)
3583 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3584 if (CMD_ARGC == 5) {
3585 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3586 /* use size (given) to find max (required) */
3587 *max_address += *min_address;
3590 if (*min_address > *max_address)
3591 return ERROR_COMMAND_SYNTAX_ERROR;
3593 return ERROR_OK;
3596 COMMAND_HANDLER(handle_load_image_command)
3598 uint8_t *buffer;
3599 size_t buf_cnt;
3600 uint32_t image_size;
3601 target_addr_t min_address = 0;
3602 target_addr_t max_address = -1;
3603 struct image image;
3605 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3606 &image, &min_address, &max_address);
3607 if (retval != ERROR_OK)
3608 return retval;
3610 struct target *target = get_current_target(CMD_CTX);
3612 struct duration bench;
3613 duration_start(&bench);
3615 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3616 return ERROR_FAIL;
3618 image_size = 0x0;
3619 retval = ERROR_OK;
3620 for (unsigned int i = 0; i < image.num_sections; i++) {
3621 buffer = malloc(image.sections[i].size);
3622 if (!buffer) {
3623 command_print(CMD,
3624 "error allocating buffer for section (%d bytes)",
3625 (int)(image.sections[i].size));
3626 retval = ERROR_FAIL;
3627 break;
3630 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3631 if (retval != ERROR_OK) {
3632 free(buffer);
3633 break;
3636 uint32_t offset = 0;
3637 uint32_t length = buf_cnt;
3639 /* DANGER!!! beware of unsigned comparison here!!! */
3641 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3642 (image.sections[i].base_address < max_address)) {
3644 if (image.sections[i].base_address < min_address) {
3645 /* clip addresses below */
3646 offset += min_address-image.sections[i].base_address;
3647 length -= offset;
3650 if (image.sections[i].base_address + buf_cnt > max_address)
3651 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3653 retval = target_write_buffer(target,
3654 image.sections[i].base_address + offset, length, buffer + offset);
3655 if (retval != ERROR_OK) {
3656 free(buffer);
3657 break;
3659 image_size += length;
3660 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3661 (unsigned int)length,
3662 image.sections[i].base_address + offset);
3665 free(buffer);
3668 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3669 command_print(CMD, "downloaded %" PRIu32 " bytes "
3670 "in %fs (%0.3f KiB/s)", image_size,
3671 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3674 image_close(&image);
3676 return retval;
3680 COMMAND_HANDLER(handle_dump_image_command)
3682 struct fileio *fileio;
3683 uint8_t *buffer;
3684 int retval, retvaltemp;
3685 target_addr_t address, size;
3686 struct duration bench;
3687 struct target *target = get_current_target(CMD_CTX);
3689 if (CMD_ARGC != 3)
3690 return ERROR_COMMAND_SYNTAX_ERROR;
3692 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3693 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3695 uint32_t buf_size = (size > 4096) ? 4096 : size;
3696 buffer = malloc(buf_size);
3697 if (!buffer)
3698 return ERROR_FAIL;
3700 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3701 if (retval != ERROR_OK) {
3702 free(buffer);
3703 return retval;
3706 duration_start(&bench);
3708 while (size > 0) {
3709 size_t size_written;
3710 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3711 retval = target_read_buffer(target, address, this_run_size, buffer);
3712 if (retval != ERROR_OK)
3713 break;
3715 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3716 if (retval != ERROR_OK)
3717 break;
3719 size -= this_run_size;
3720 address += this_run_size;
3723 free(buffer);
3725 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3726 size_t filesize;
3727 retval = fileio_size(fileio, &filesize);
3728 if (retval != ERROR_OK)
3729 return retval;
3730 command_print(CMD,
3731 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3732 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3735 retvaltemp = fileio_close(fileio);
3736 if (retvaltemp != ERROR_OK)
3737 return retvaltemp;
3739 return retval;
3742 enum verify_mode {
3743 IMAGE_TEST = 0,
3744 IMAGE_VERIFY = 1,
3745 IMAGE_CHECKSUM_ONLY = 2
3748 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3750 uint8_t *buffer;
3751 size_t buf_cnt;
3752 uint32_t image_size;
3753 int retval;
3754 uint32_t checksum = 0;
3755 uint32_t mem_checksum = 0;
3757 struct image image;
3759 struct target *target = get_current_target(CMD_CTX);
3761 if (CMD_ARGC < 1)
3762 return ERROR_COMMAND_SYNTAX_ERROR;
3764 if (!target) {
3765 LOG_ERROR("no target selected");
3766 return ERROR_FAIL;
3769 struct duration bench;
3770 duration_start(&bench);
3772 if (CMD_ARGC >= 2) {
3773 target_addr_t addr;
3774 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3775 image.base_address = addr;
3776 image.base_address_set = true;
3777 } else {
3778 image.base_address_set = false;
3779 image.base_address = 0x0;
3782 image.start_address_set = false;
3784 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3785 if (retval != ERROR_OK)
3786 return retval;
3788 image_size = 0x0;
3789 int diffs = 0;
3790 retval = ERROR_OK;
3791 for (unsigned int i = 0; i < image.num_sections; i++) {
3792 buffer = malloc(image.sections[i].size);
3793 if (!buffer) {
3794 command_print(CMD,
3795 "error allocating buffer for section (%" PRIu32 " bytes)",
3796 image.sections[i].size);
3797 break;
3799 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3800 if (retval != ERROR_OK) {
3801 free(buffer);
3802 break;
3805 if (verify >= IMAGE_VERIFY) {
3806 /* calculate checksum of image */
3807 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3808 if (retval != ERROR_OK) {
3809 free(buffer);
3810 break;
3813 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3814 if (retval != ERROR_OK) {
3815 free(buffer);
3816 break;
3818 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3819 LOG_ERROR("checksum mismatch");
3820 free(buffer);
3821 retval = ERROR_FAIL;
3822 goto done;
3824 if (checksum != mem_checksum) {
3825 /* failed crc checksum, fall back to a binary compare */
3826 uint8_t *data;
3828 if (diffs == 0)
3829 LOG_ERROR("checksum mismatch - attempting binary compare");
3831 data = malloc(buf_cnt);
3833 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3834 if (retval == ERROR_OK) {
3835 uint32_t t;
3836 for (t = 0; t < buf_cnt; t++) {
3837 if (data[t] != buffer[t]) {
3838 command_print(CMD,
3839 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3840 diffs,
3841 (unsigned)(t + image.sections[i].base_address),
3842 data[t],
3843 buffer[t]);
3844 if (diffs++ >= 127) {
3845 command_print(CMD, "More than 128 errors, the rest are not printed.");
3846 free(data);
3847 free(buffer);
3848 goto done;
3851 keep_alive();
3854 free(data);
3856 } else {
3857 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3858 image.sections[i].base_address,
3859 buf_cnt);
3862 free(buffer);
3863 image_size += buf_cnt;
3865 if (diffs > 0)
3866 command_print(CMD, "No more differences found.");
3867 done:
3868 if (diffs > 0)
3869 retval = ERROR_FAIL;
3870 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3871 command_print(CMD, "verified %" PRIu32 " bytes "
3872 "in %fs (%0.3f KiB/s)", image_size,
3873 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3876 image_close(&image);
3878 return retval;
3881 COMMAND_HANDLER(handle_verify_image_checksum_command)
3883 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3886 COMMAND_HANDLER(handle_verify_image_command)
3888 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3891 COMMAND_HANDLER(handle_test_image_command)
3893 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3896 static int handle_bp_command_list(struct command_invocation *cmd)
3898 struct target *target = get_current_target(cmd->ctx);
3899 struct breakpoint *breakpoint = target->breakpoints;
3900 while (breakpoint) {
3901 if (breakpoint->type == BKPT_SOFT) {
3902 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3903 breakpoint->length);
3904 command_print(cmd, "Software breakpoint(IVA): addr=" TARGET_ADDR_FMT ", len=0x%x, orig_instr=0x%s",
3905 breakpoint->address,
3906 breakpoint->length,
3907 buf);
3908 free(buf);
3909 } else {
3910 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3911 command_print(cmd, "Context breakpoint: asid=0x%8.8" PRIx32 ", len=0x%x, num=%u",
3912 breakpoint->asid,
3913 breakpoint->length, breakpoint->number);
3914 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3915 command_print(cmd, "Hybrid breakpoint(IVA): addr=" TARGET_ADDR_FMT ", len=0x%x, num=%u",
3916 breakpoint->address,
3917 breakpoint->length, breakpoint->number);
3918 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3919 breakpoint->asid);
3920 } else
3921 command_print(cmd, "Hardware breakpoint(IVA): addr=" TARGET_ADDR_FMT ", len=0x%x, num=%u",
3922 breakpoint->address,
3923 breakpoint->length, breakpoint->number);
3926 breakpoint = breakpoint->next;
3928 return ERROR_OK;
3931 static int handle_bp_command_set(struct command_invocation *cmd,
3932 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3934 struct target *target = get_current_target(cmd->ctx);
3935 int retval;
3937 if (asid == 0) {
3938 retval = breakpoint_add(target, addr, length, hw);
3939 /* error is always logged in breakpoint_add(), do not print it again */
3940 if (retval == ERROR_OK)
3941 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
3943 } else if (addr == 0) {
3944 if (!target->type->add_context_breakpoint) {
3945 LOG_TARGET_ERROR(target, "Context breakpoint not available");
3946 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3948 retval = context_breakpoint_add(target, asid, length, hw);
3949 /* error is always logged in context_breakpoint_add(), do not print it again */
3950 if (retval == ERROR_OK)
3951 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3953 } else {
3954 if (!target->type->add_hybrid_breakpoint) {
3955 LOG_TARGET_ERROR(target, "Hybrid breakpoint not available");
3956 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3958 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3959 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
3960 if (retval == ERROR_OK)
3961 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3963 return retval;
3966 COMMAND_HANDLER(handle_bp_command)
3968 target_addr_t addr;
3969 uint32_t asid;
3970 uint32_t length;
3971 int hw = BKPT_SOFT;
3973 switch (CMD_ARGC) {
3974 case 0:
3975 return handle_bp_command_list(CMD);
3977 case 2:
3978 asid = 0;
3979 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3980 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3981 return handle_bp_command_set(CMD, addr, asid, length, hw);
3983 case 3:
3984 if (strcmp(CMD_ARGV[2], "hw") == 0) {
3985 hw = BKPT_HARD;
3986 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3987 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3988 asid = 0;
3989 return handle_bp_command_set(CMD, addr, asid, length, hw);
3990 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
3991 hw = BKPT_HARD;
3992 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
3993 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3994 addr = 0;
3995 return handle_bp_command_set(CMD, addr, asid, length, hw);
3997 /* fallthrough */
3998 case 4:
3999 hw = BKPT_HARD;
4000 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4001 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4002 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4003 return handle_bp_command_set(CMD, addr, asid, length, hw);
4005 default:
4006 return ERROR_COMMAND_SYNTAX_ERROR;
4010 COMMAND_HANDLER(handle_rbp_command)
4012 int retval;
4014 if (CMD_ARGC != 1)
4015 return ERROR_COMMAND_SYNTAX_ERROR;
4017 struct target *target = get_current_target(CMD_CTX);
4019 if (!strcmp(CMD_ARGV[0], "all")) {
4020 retval = breakpoint_remove_all(target);
4022 if (retval != ERROR_OK) {
4023 command_print(CMD, "Error encountered during removal of all breakpoints.");
4024 command_print(CMD, "Some breakpoints may have remained set.");
4026 } else {
4027 target_addr_t addr;
4028 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4030 retval = breakpoint_remove(target, addr);
4032 if (retval != ERROR_OK)
4033 command_print(CMD, "Error during removal of breakpoint at address " TARGET_ADDR_FMT, addr);
4036 return retval;
4039 COMMAND_HANDLER(handle_wp_command)
4041 struct target *target = get_current_target(CMD_CTX);
4043 if (CMD_ARGC == 0) {
4044 struct watchpoint *watchpoint = target->watchpoints;
4046 while (watchpoint) {
4047 char wp_type = (watchpoint->rw == WPT_READ ? 'r' : (watchpoint->rw == WPT_WRITE ? 'w' : 'a'));
4048 command_print(CMD, "address: " TARGET_ADDR_FMT
4049 ", len: 0x%8.8" PRIx32
4050 ", r/w/a: %c, value: 0x%8.8" PRIx64
4051 ", mask: 0x%8.8" PRIx64,
4052 watchpoint->address,
4053 watchpoint->length,
4054 wp_type,
4055 watchpoint->value,
4056 watchpoint->mask);
4057 watchpoint = watchpoint->next;
4059 return ERROR_OK;
4062 enum watchpoint_rw type = WPT_ACCESS;
4063 target_addr_t addr = 0;
4064 uint32_t length = 0;
4065 uint64_t data_value = 0x0;
4066 uint64_t data_mask = WATCHPOINT_IGNORE_DATA_VALUE_MASK;
4067 bool mask_specified = false;
4069 switch (CMD_ARGC) {
4070 case 5:
4071 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[4], data_mask);
4072 mask_specified = true;
4073 /* fall through */
4074 case 4:
4075 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[3], data_value);
4076 // if user specified only data value without mask - the mask should be 0
4077 if (!mask_specified)
4078 data_mask = 0;
4079 /* fall through */
4080 case 3:
4081 switch (CMD_ARGV[2][0]) {
4082 case 'r':
4083 type = WPT_READ;
4084 break;
4085 case 'w':
4086 type = WPT_WRITE;
4087 break;
4088 case 'a':
4089 type = WPT_ACCESS;
4090 break;
4091 default:
4092 LOG_TARGET_ERROR(target, "invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4093 return ERROR_COMMAND_SYNTAX_ERROR;
4095 /* fall through */
4096 case 2:
4097 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4098 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4099 break;
4101 default:
4102 return ERROR_COMMAND_SYNTAX_ERROR;
4105 int retval = watchpoint_add(target, addr, length, type,
4106 data_value, data_mask);
4107 if (retval != ERROR_OK)
4108 LOG_TARGET_ERROR(target, "Failure setting watchpoints");
4110 return retval;
4113 COMMAND_HANDLER(handle_rwp_command)
4115 int retval;
4117 if (CMD_ARGC != 1)
4118 return ERROR_COMMAND_SYNTAX_ERROR;
4120 struct target *target = get_current_target(CMD_CTX);
4121 if (!strcmp(CMD_ARGV[0], "all")) {
4122 retval = watchpoint_remove_all(target);
4124 if (retval != ERROR_OK) {
4125 command_print(CMD, "Error encountered during removal of all watchpoints.");
4126 command_print(CMD, "Some watchpoints may have remained set.");
4128 } else {
4129 target_addr_t addr;
4130 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4132 retval = watchpoint_remove(target, addr);
4134 if (retval != ERROR_OK)
4135 command_print(CMD, "Error during removal of watchpoint at address " TARGET_ADDR_FMT, addr);
4138 return retval;
4142 * Translate a virtual address to a physical address.
4144 * The low-level target implementation must have logged a detailed error
4145 * which is forwarded to telnet/GDB session.
4147 COMMAND_HANDLER(handle_virt2phys_command)
4149 if (CMD_ARGC != 1)
4150 return ERROR_COMMAND_SYNTAX_ERROR;
4152 target_addr_t va;
4153 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4154 target_addr_t pa;
4156 struct target *target = get_current_target(CMD_CTX);
4157 int retval = target->type->virt2phys(target, va, &pa);
4158 if (retval == ERROR_OK)
4159 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4161 return retval;
4164 static void write_data(FILE *f, const void *data, size_t len)
4166 size_t written = fwrite(data, 1, len, f);
4167 if (written != len)
4168 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4171 static void write_long(FILE *f, int l, struct target *target)
4173 uint8_t val[4];
4175 target_buffer_set_u32(target, val, l);
4176 write_data(f, val, 4);
4179 static void write_string(FILE *f, char *s)
4181 write_data(f, s, strlen(s));
4184 typedef unsigned char UNIT[2]; /* unit of profiling */
4186 /* Dump a gmon.out histogram file. */
4187 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4188 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4190 uint32_t i;
4191 FILE *f = fopen(filename, "w");
4192 if (!f)
4193 return;
4194 write_string(f, "gmon");
4195 write_long(f, 0x00000001, target); /* Version */
4196 write_long(f, 0, target); /* padding */
4197 write_long(f, 0, target); /* padding */
4198 write_long(f, 0, target); /* padding */
4200 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4201 write_data(f, &zero, 1);
4203 /* figure out bucket size */
4204 uint32_t min;
4205 uint32_t max;
4206 if (with_range) {
4207 min = start_address;
4208 max = end_address;
4209 } else {
4210 min = samples[0];
4211 max = samples[0];
4212 for (i = 0; i < sample_num; i++) {
4213 if (min > samples[i])
4214 min = samples[i];
4215 if (max < samples[i])
4216 max = samples[i];
4219 /* max should be (largest sample + 1)
4220 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4221 if (max < UINT32_MAX)
4222 max++;
4224 /* gprof requires (max - min) >= 2 */
4225 while ((max - min) < 2) {
4226 if (max < UINT32_MAX)
4227 max++;
4228 else
4229 min--;
4233 uint32_t address_space = max - min;
4235 /* FIXME: What is the reasonable number of buckets?
4236 * The profiling result will be more accurate if there are enough buckets. */
4237 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4238 uint32_t num_buckets = address_space / sizeof(UNIT);
4239 if (num_buckets > max_buckets)
4240 num_buckets = max_buckets;
4241 int *buckets = malloc(sizeof(int) * num_buckets);
4242 if (!buckets) {
4243 fclose(f);
4244 return;
4246 memset(buckets, 0, sizeof(int) * num_buckets);
4247 for (i = 0; i < sample_num; i++) {
4248 uint32_t address = samples[i];
4250 if ((address < min) || (max <= address))
4251 continue;
4253 long long a = address - min;
4254 long long b = num_buckets;
4255 long long c = address_space;
4256 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4257 buckets[index_t]++;
4260 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4261 write_long(f, min, target); /* low_pc */
4262 write_long(f, max, target); /* high_pc */
4263 write_long(f, num_buckets, target); /* # of buckets */
4264 float sample_rate = sample_num / (duration_ms / 1000.0);
4265 write_long(f, sample_rate, target);
4266 write_string(f, "seconds");
4267 for (i = 0; i < (15-strlen("seconds")); i++)
4268 write_data(f, &zero, 1);
4269 write_string(f, "s");
4271 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4273 char *data = malloc(2 * num_buckets);
4274 if (data) {
4275 for (i = 0; i < num_buckets; i++) {
4276 int val;
4277 val = buckets[i];
4278 if (val > 65535)
4279 val = 65535;
4280 data[i * 2] = val&0xff;
4281 data[i * 2 + 1] = (val >> 8) & 0xff;
4283 free(buckets);
4284 write_data(f, data, num_buckets * 2);
4285 free(data);
4286 } else
4287 free(buckets);
4289 fclose(f);
4292 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4293 * which will be used as a random sampling of PC */
4294 COMMAND_HANDLER(handle_profile_command)
4296 struct target *target = get_current_target(CMD_CTX);
4298 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4299 return ERROR_COMMAND_SYNTAX_ERROR;
4301 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4302 uint32_t offset;
4303 uint32_t num_of_samples;
4304 int retval = ERROR_OK;
4305 bool halted_before_profiling = target->state == TARGET_HALTED;
4307 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4309 uint32_t start_address = 0;
4310 uint32_t end_address = 0;
4311 bool with_range = false;
4312 if (CMD_ARGC == 4) {
4313 with_range = true;
4314 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4315 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4316 if (start_address > end_address || (end_address - start_address) < 2) {
4317 command_print(CMD, "Error: end - start < 2");
4318 return ERROR_COMMAND_ARGUMENT_INVALID;
4322 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4323 if (!samples) {
4324 LOG_ERROR("No memory to store samples.");
4325 return ERROR_FAIL;
4328 uint64_t timestart_ms = timeval_ms();
4330 * Some cores let us sample the PC without the
4331 * annoying halt/resume step; for example, ARMv7 PCSR.
4332 * Provide a way to use that more efficient mechanism.
4334 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4335 &num_of_samples, offset);
4336 if (retval != ERROR_OK) {
4337 free(samples);
4338 return retval;
4340 uint32_t duration_ms = timeval_ms() - timestart_ms;
4342 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4344 retval = target_poll(target);
4345 if (retval != ERROR_OK) {
4346 free(samples);
4347 return retval;
4350 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4351 /* The target was halted before we started and is running now. Halt it,
4352 * for consistency. */
4353 retval = target_halt(target);
4354 if (retval != ERROR_OK) {
4355 free(samples);
4356 return retval;
4358 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4359 /* The target was running before we started and is halted now. Resume
4360 * it, for consistency. */
4361 retval = target_resume(target, 1, 0, 0, 0);
4362 if (retval != ERROR_OK) {
4363 free(samples);
4364 return retval;
4368 retval = target_poll(target);
4369 if (retval != ERROR_OK) {
4370 free(samples);
4371 return retval;
4374 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4375 with_range, start_address, end_address, target, duration_ms);
4376 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4378 free(samples);
4379 return retval;
4382 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4384 char *namebuf;
4385 Jim_Obj *obj_name, *obj_val;
4386 int result;
4388 namebuf = alloc_printf("%s(%d)", varname, idx);
4389 if (!namebuf)
4390 return JIM_ERR;
4392 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4393 jim_wide wide_val = val;
4394 obj_val = Jim_NewWideObj(interp, wide_val);
4395 if (!obj_name || !obj_val) {
4396 free(namebuf);
4397 return JIM_ERR;
4400 Jim_IncrRefCount(obj_name);
4401 Jim_IncrRefCount(obj_val);
4402 result = Jim_SetVariable(interp, obj_name, obj_val);
4403 Jim_DecrRefCount(interp, obj_name);
4404 Jim_DecrRefCount(interp, obj_val);
4405 free(namebuf);
4406 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4407 return result;
4410 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4412 int e;
4414 LOG_WARNING("DEPRECATED! use 'read_memory' not 'mem2array'");
4416 /* argv[0] = name of array to receive the data
4417 * argv[1] = desired element width in bits
4418 * argv[2] = memory address
4419 * argv[3] = count of times to read
4420 * argv[4] = optional "phys"
4422 if (argc < 4 || argc > 5) {
4423 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4424 return JIM_ERR;
4427 /* Arg 0: Name of the array variable */
4428 const char *varname = Jim_GetString(argv[0], NULL);
4430 /* Arg 1: Bit width of one element */
4431 long l;
4432 e = Jim_GetLong(interp, argv[1], &l);
4433 if (e != JIM_OK)
4434 return e;
4435 const unsigned int width_bits = l;
4437 if (width_bits != 8 &&
4438 width_bits != 16 &&
4439 width_bits != 32 &&
4440 width_bits != 64) {
4441 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4442 Jim_AppendStrings(interp, Jim_GetResult(interp),
4443 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4444 return JIM_ERR;
4446 const unsigned int width = width_bits / 8;
4448 /* Arg 2: Memory address */
4449 jim_wide wide_addr;
4450 e = Jim_GetWide(interp, argv[2], &wide_addr);
4451 if (e != JIM_OK)
4452 return e;
4453 target_addr_t addr = (target_addr_t)wide_addr;
4455 /* Arg 3: Number of elements to read */
4456 e = Jim_GetLong(interp, argv[3], &l);
4457 if (e != JIM_OK)
4458 return e;
4459 size_t len = l;
4461 /* Arg 4: phys */
4462 bool is_phys = false;
4463 if (argc > 4) {
4464 int str_len = 0;
4465 const char *phys = Jim_GetString(argv[4], &str_len);
4466 if (!strncmp(phys, "phys", str_len))
4467 is_phys = true;
4468 else
4469 return JIM_ERR;
4472 /* Argument checks */
4473 if (len == 0) {
4474 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4475 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4476 return JIM_ERR;
4478 if ((addr + (len * width)) < addr) {
4479 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4480 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4481 return JIM_ERR;
4483 if (len > 65536) {
4484 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4485 Jim_AppendStrings(interp, Jim_GetResult(interp),
4486 "mem2array: too large read request, exceeds 64K items", NULL);
4487 return JIM_ERR;
4490 if ((width == 1) ||
4491 ((width == 2) && ((addr & 1) == 0)) ||
4492 ((width == 4) && ((addr & 3) == 0)) ||
4493 ((width == 8) && ((addr & 7) == 0))) {
4494 /* alignment correct */
4495 } else {
4496 char buf[100];
4497 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4498 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4499 addr,
4500 width);
4501 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4502 return JIM_ERR;
4505 /* Transfer loop */
4507 /* index counter */
4508 size_t idx = 0;
4510 const size_t buffersize = 4096;
4511 uint8_t *buffer = malloc(buffersize);
4512 if (!buffer)
4513 return JIM_ERR;
4515 /* assume ok */
4516 e = JIM_OK;
4517 while (len) {
4518 /* Slurp... in buffer size chunks */
4519 const unsigned int max_chunk_len = buffersize / width;
4520 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4522 int retval;
4523 if (is_phys)
4524 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4525 else
4526 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4527 if (retval != ERROR_OK) {
4528 /* BOO !*/
4529 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4530 addr,
4531 width,
4532 chunk_len);
4533 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4534 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4535 e = JIM_ERR;
4536 break;
4537 } else {
4538 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4539 uint64_t v = 0;
4540 switch (width) {
4541 case 8:
4542 v = target_buffer_get_u64(target, &buffer[i*width]);
4543 break;
4544 case 4:
4545 v = target_buffer_get_u32(target, &buffer[i*width]);
4546 break;
4547 case 2:
4548 v = target_buffer_get_u16(target, &buffer[i*width]);
4549 break;
4550 case 1:
4551 v = buffer[i] & 0x0ff;
4552 break;
4554 new_u64_array_element(interp, varname, idx, v);
4556 len -= chunk_len;
4557 addr += chunk_len * width;
4561 free(buffer);
4563 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4565 return e;
4568 COMMAND_HANDLER(handle_target_read_memory)
4571 * CMD_ARGV[0] = memory address
4572 * CMD_ARGV[1] = desired element width in bits
4573 * CMD_ARGV[2] = number of elements to read
4574 * CMD_ARGV[3] = optional "phys"
4577 if (CMD_ARGC < 3 || CMD_ARGC > 4)
4578 return ERROR_COMMAND_SYNTAX_ERROR;
4580 /* Arg 1: Memory address. */
4581 target_addr_t addr;
4582 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], addr);
4584 /* Arg 2: Bit width of one element. */
4585 unsigned int width_bits;
4586 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], width_bits);
4588 /* Arg 3: Number of elements to read. */
4589 unsigned int count;
4590 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
4592 /* Arg 4: Optional 'phys'. */
4593 bool is_phys = false;
4594 if (CMD_ARGC == 4) {
4595 if (strcmp(CMD_ARGV[3], "phys")) {
4596 command_print(CMD, "invalid argument '%s', must be 'phys'", CMD_ARGV[3]);
4597 return ERROR_COMMAND_ARGUMENT_INVALID;
4600 is_phys = true;
4603 switch (width_bits) {
4604 case 8:
4605 case 16:
4606 case 32:
4607 case 64:
4608 break;
4609 default:
4610 command_print(CMD, "invalid width, must be 8, 16, 32 or 64");
4611 return ERROR_COMMAND_ARGUMENT_INVALID;
4614 const unsigned int width = width_bits / 8;
4616 if ((addr + (count * width)) < addr) {
4617 command_print(CMD, "read_memory: addr + count wraps to zero");
4618 return ERROR_COMMAND_ARGUMENT_INVALID;
4621 if (count > 65536) {
4622 command_print(CMD, "read_memory: too large read request, exceeds 64K elements");
4623 return ERROR_COMMAND_ARGUMENT_INVALID;
4626 struct target *target = get_current_target(CMD_CTX);
4628 const size_t buffersize = 4096;
4629 uint8_t *buffer = malloc(buffersize);
4631 if (!buffer) {
4632 LOG_ERROR("Failed to allocate memory");
4633 return ERROR_FAIL;
4636 char *separator = "";
4637 while (count > 0) {
4638 const unsigned int max_chunk_len = buffersize / width;
4639 const size_t chunk_len = MIN(count, max_chunk_len);
4641 int retval;
4643 if (is_phys)
4644 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4645 else
4646 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4648 if (retval != ERROR_OK) {
4649 LOG_DEBUG("read_memory: read at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
4650 addr, width_bits, chunk_len);
4652 * FIXME: we append the errmsg to the list of value already read.
4653 * Add a way to flush and replace old output, but LOG_DEBUG() it
4655 command_print(CMD, "read_memory: failed to read memory");
4656 free(buffer);
4657 return retval;
4660 for (size_t i = 0; i < chunk_len ; i++) {
4661 uint64_t v = 0;
4663 switch (width) {
4664 case 8:
4665 v = target_buffer_get_u64(target, &buffer[i * width]);
4666 break;
4667 case 4:
4668 v = target_buffer_get_u32(target, &buffer[i * width]);
4669 break;
4670 case 2:
4671 v = target_buffer_get_u16(target, &buffer[i * width]);
4672 break;
4673 case 1:
4674 v = buffer[i];
4675 break;
4678 command_print_sameline(CMD, "%s0x%" PRIx64, separator, v);
4679 separator = " ";
4682 count -= chunk_len;
4683 addr += chunk_len * width;
4686 free(buffer);
4688 return ERROR_OK;
4691 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4693 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4694 if (!namebuf)
4695 return JIM_ERR;
4697 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4698 if (!obj_name) {
4699 free(namebuf);
4700 return JIM_ERR;
4703 Jim_IncrRefCount(obj_name);
4704 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4705 Jim_DecrRefCount(interp, obj_name);
4706 free(namebuf);
4707 if (!obj_val)
4708 return JIM_ERR;
4710 jim_wide wide_val;
4711 int result = Jim_GetWide(interp, obj_val, &wide_val);
4712 *val = wide_val;
4713 return result;
4716 static int target_array2mem(Jim_Interp *interp, struct target *target,
4717 int argc, Jim_Obj *const *argv)
4719 int e;
4721 LOG_WARNING("DEPRECATED! use 'write_memory' not 'array2mem'");
4723 /* argv[0] = name of array from which to read the data
4724 * argv[1] = desired element width in bits
4725 * argv[2] = memory address
4726 * argv[3] = number of elements to write
4727 * argv[4] = optional "phys"
4729 if (argc < 4 || argc > 5) {
4730 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4731 return JIM_ERR;
4734 /* Arg 0: Name of the array variable */
4735 const char *varname = Jim_GetString(argv[0], NULL);
4737 /* Arg 1: Bit width of one element */
4738 long l;
4739 e = Jim_GetLong(interp, argv[1], &l);
4740 if (e != JIM_OK)
4741 return e;
4742 const unsigned int width_bits = l;
4744 if (width_bits != 8 &&
4745 width_bits != 16 &&
4746 width_bits != 32 &&
4747 width_bits != 64) {
4748 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4749 Jim_AppendStrings(interp, Jim_GetResult(interp),
4750 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4751 return JIM_ERR;
4753 const unsigned int width = width_bits / 8;
4755 /* Arg 2: Memory address */
4756 jim_wide wide_addr;
4757 e = Jim_GetWide(interp, argv[2], &wide_addr);
4758 if (e != JIM_OK)
4759 return e;
4760 target_addr_t addr = (target_addr_t)wide_addr;
4762 /* Arg 3: Number of elements to write */
4763 e = Jim_GetLong(interp, argv[3], &l);
4764 if (e != JIM_OK)
4765 return e;
4766 size_t len = l;
4768 /* Arg 4: Phys */
4769 bool is_phys = false;
4770 if (argc > 4) {
4771 int str_len = 0;
4772 const char *phys = Jim_GetString(argv[4], &str_len);
4773 if (!strncmp(phys, "phys", str_len))
4774 is_phys = true;
4775 else
4776 return JIM_ERR;
4779 /* Argument checks */
4780 if (len == 0) {
4781 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4782 Jim_AppendStrings(interp, Jim_GetResult(interp),
4783 "array2mem: zero width read?", NULL);
4784 return JIM_ERR;
4787 if ((addr + (len * width)) < addr) {
4788 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4789 Jim_AppendStrings(interp, Jim_GetResult(interp),
4790 "array2mem: addr + len - wraps to zero?", NULL);
4791 return JIM_ERR;
4794 if (len > 65536) {
4795 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4796 Jim_AppendStrings(interp, Jim_GetResult(interp),
4797 "array2mem: too large memory write request, exceeds 64K items", NULL);
4798 return JIM_ERR;
4801 if ((width == 1) ||
4802 ((width == 2) && ((addr & 1) == 0)) ||
4803 ((width == 4) && ((addr & 3) == 0)) ||
4804 ((width == 8) && ((addr & 7) == 0))) {
4805 /* alignment correct */
4806 } else {
4807 char buf[100];
4808 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4809 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4810 addr,
4811 width);
4812 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4813 return JIM_ERR;
4816 /* Transfer loop */
4818 /* assume ok */
4819 e = JIM_OK;
4821 const size_t buffersize = 4096;
4822 uint8_t *buffer = malloc(buffersize);
4823 if (!buffer)
4824 return JIM_ERR;
4826 /* index counter */
4827 size_t idx = 0;
4829 while (len) {
4830 /* Slurp... in buffer size chunks */
4831 const unsigned int max_chunk_len = buffersize / width;
4833 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4835 /* Fill the buffer */
4836 for (size_t i = 0; i < chunk_len; i++, idx++) {
4837 uint64_t v = 0;
4838 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4839 free(buffer);
4840 return JIM_ERR;
4842 switch (width) {
4843 case 8:
4844 target_buffer_set_u64(target, &buffer[i * width], v);
4845 break;
4846 case 4:
4847 target_buffer_set_u32(target, &buffer[i * width], v);
4848 break;
4849 case 2:
4850 target_buffer_set_u16(target, &buffer[i * width], v);
4851 break;
4852 case 1:
4853 buffer[i] = v & 0x0ff;
4854 break;
4857 len -= chunk_len;
4859 /* Write the buffer to memory */
4860 int retval;
4861 if (is_phys)
4862 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4863 else
4864 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4865 if (retval != ERROR_OK) {
4866 /* BOO !*/
4867 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4868 addr,
4869 width,
4870 chunk_len);
4871 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4872 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4873 e = JIM_ERR;
4874 break;
4876 addr += chunk_len * width;
4879 free(buffer);
4881 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4883 return e;
4886 static int target_jim_write_memory(Jim_Interp *interp, int argc,
4887 Jim_Obj * const *argv)
4890 * argv[1] = memory address
4891 * argv[2] = desired element width in bits
4892 * argv[3] = list of data to write
4893 * argv[4] = optional "phys"
4896 if (argc < 4 || argc > 5) {
4897 Jim_WrongNumArgs(interp, 1, argv, "address width data ['phys']");
4898 return JIM_ERR;
4901 /* Arg 1: Memory address. */
4902 int e;
4903 jim_wide wide_addr;
4904 e = Jim_GetWide(interp, argv[1], &wide_addr);
4906 if (e != JIM_OK)
4907 return e;
4909 target_addr_t addr = (target_addr_t)wide_addr;
4911 /* Arg 2: Bit width of one element. */
4912 long l;
4913 e = Jim_GetLong(interp, argv[2], &l);
4915 if (e != JIM_OK)
4916 return e;
4918 const unsigned int width_bits = l;
4919 size_t count = Jim_ListLength(interp, argv[3]);
4921 /* Arg 4: Optional 'phys'. */
4922 bool is_phys = false;
4924 if (argc > 4) {
4925 const char *phys = Jim_GetString(argv[4], NULL);
4927 if (strcmp(phys, "phys")) {
4928 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4929 return JIM_ERR;
4932 is_phys = true;
4935 switch (width_bits) {
4936 case 8:
4937 case 16:
4938 case 32:
4939 case 64:
4940 break;
4941 default:
4942 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4943 return JIM_ERR;
4946 const unsigned int width = width_bits / 8;
4948 if ((addr + (count * width)) < addr) {
4949 Jim_SetResultString(interp, "write_memory: addr + len wraps to zero", -1);
4950 return JIM_ERR;
4953 if (count > 65536) {
4954 Jim_SetResultString(interp, "write_memory: too large memory write request, exceeds 64K elements", -1);
4955 return JIM_ERR;
4958 struct command_context *cmd_ctx = current_command_context(interp);
4959 assert(cmd_ctx != NULL);
4960 struct target *target = get_current_target(cmd_ctx);
4962 const size_t buffersize = 4096;
4963 uint8_t *buffer = malloc(buffersize);
4965 if (!buffer) {
4966 LOG_ERROR("Failed to allocate memory");
4967 return JIM_ERR;
4970 size_t j = 0;
4972 while (count > 0) {
4973 const unsigned int max_chunk_len = buffersize / width;
4974 const size_t chunk_len = MIN(count, max_chunk_len);
4976 for (size_t i = 0; i < chunk_len; i++, j++) {
4977 Jim_Obj *tmp = Jim_ListGetIndex(interp, argv[3], j);
4978 jim_wide element_wide;
4979 Jim_GetWide(interp, tmp, &element_wide);
4981 const uint64_t v = element_wide;
4983 switch (width) {
4984 case 8:
4985 target_buffer_set_u64(target, &buffer[i * width], v);
4986 break;
4987 case 4:
4988 target_buffer_set_u32(target, &buffer[i * width], v);
4989 break;
4990 case 2:
4991 target_buffer_set_u16(target, &buffer[i * width], v);
4992 break;
4993 case 1:
4994 buffer[i] = v & 0x0ff;
4995 break;
4999 count -= chunk_len;
5001 int retval;
5003 if (is_phys)
5004 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
5005 else
5006 retval = target_write_memory(target, addr, width, chunk_len, buffer);
5008 if (retval != ERROR_OK) {
5009 LOG_ERROR("write_memory: write at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
5010 addr, width_bits, chunk_len);
5011 Jim_SetResultString(interp, "write_memory: failed to write memory", -1);
5012 e = JIM_ERR;
5013 break;
5016 addr += chunk_len * width;
5019 free(buffer);
5021 return e;
5024 /* FIX? should we propagate errors here rather than printing them
5025 * and continuing?
5027 void target_handle_event(struct target *target, enum target_event e)
5029 struct target_event_action *teap;
5030 int retval;
5032 for (teap = target->event_action; teap; teap = teap->next) {
5033 if (teap->event == e) {
5034 LOG_DEBUG("target: %s (%s) event: %d (%s) action: %s",
5035 target_name(target),
5036 target_type_name(target),
5038 target_event_name(e),
5039 Jim_GetString(teap->body, NULL));
5041 /* Override current target by the target an event
5042 * is issued from (lot of scripts need it).
5043 * Return back to previous override as soon
5044 * as the handler processing is done */
5045 struct command_context *cmd_ctx = current_command_context(teap->interp);
5046 struct target *saved_target_override = cmd_ctx->current_target_override;
5047 cmd_ctx->current_target_override = target;
5049 retval = Jim_EvalObj(teap->interp, teap->body);
5051 cmd_ctx->current_target_override = saved_target_override;
5053 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
5054 return;
5056 if (retval == JIM_RETURN)
5057 retval = teap->interp->returnCode;
5059 if (retval != JIM_OK) {
5060 Jim_MakeErrorMessage(teap->interp);
5061 LOG_USER("Error executing event %s on target %s:\n%s",
5062 target_event_name(e),
5063 target_name(target),
5064 Jim_GetString(Jim_GetResult(teap->interp), NULL));
5065 /* clean both error code and stacktrace before return */
5066 Jim_Eval(teap->interp, "error \"\" \"\"");
5072 static int target_jim_get_reg(Jim_Interp *interp, int argc,
5073 Jim_Obj * const *argv)
5075 bool force = false;
5077 if (argc == 3) {
5078 const char *option = Jim_GetString(argv[1], NULL);
5080 if (!strcmp(option, "-force")) {
5081 argc--;
5082 argv++;
5083 force = true;
5084 } else {
5085 Jim_SetResultFormatted(interp, "invalid option '%s'", option);
5086 return JIM_ERR;
5090 if (argc != 2) {
5091 Jim_WrongNumArgs(interp, 1, argv, "[-force] list");
5092 return JIM_ERR;
5095 const int length = Jim_ListLength(interp, argv[1]);
5097 Jim_Obj *result_dict = Jim_NewDictObj(interp, NULL, 0);
5099 if (!result_dict)
5100 return JIM_ERR;
5102 struct command_context *cmd_ctx = current_command_context(interp);
5103 assert(cmd_ctx != NULL);
5104 const struct target *target = get_current_target(cmd_ctx);
5106 for (int i = 0; i < length; i++) {
5107 Jim_Obj *elem = Jim_ListGetIndex(interp, argv[1], i);
5109 if (!elem)
5110 return JIM_ERR;
5112 const char *reg_name = Jim_String(elem);
5114 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5115 false);
5117 if (!reg || !reg->exist) {
5118 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5119 return JIM_ERR;
5122 if (force || !reg->valid) {
5123 int retval = reg->type->get(reg);
5125 if (retval != ERROR_OK) {
5126 Jim_SetResultFormatted(interp, "failed to read register '%s'",
5127 reg_name);
5128 return JIM_ERR;
5132 char *reg_value = buf_to_hex_str(reg->value, reg->size);
5134 if (!reg_value) {
5135 LOG_ERROR("Failed to allocate memory");
5136 return JIM_ERR;
5139 char *tmp = alloc_printf("0x%s", reg_value);
5141 free(reg_value);
5143 if (!tmp) {
5144 LOG_ERROR("Failed to allocate memory");
5145 return JIM_ERR;
5148 Jim_DictAddElement(interp, result_dict, elem,
5149 Jim_NewStringObj(interp, tmp, -1));
5151 free(tmp);
5154 Jim_SetResult(interp, result_dict);
5156 return JIM_OK;
5159 static int target_jim_set_reg(Jim_Interp *interp, int argc,
5160 Jim_Obj * const *argv)
5162 if (argc != 2) {
5163 Jim_WrongNumArgs(interp, 1, argv, "dict");
5164 return JIM_ERR;
5167 int tmp;
5168 #if JIM_VERSION >= 80
5169 Jim_Obj **dict = Jim_DictPairs(interp, argv[1], &tmp);
5171 if (!dict)
5172 return JIM_ERR;
5173 #else
5174 Jim_Obj **dict;
5175 int ret = Jim_DictPairs(interp, argv[1], &dict, &tmp);
5177 if (ret != JIM_OK)
5178 return ret;
5179 #endif
5181 const unsigned int length = tmp;
5182 struct command_context *cmd_ctx = current_command_context(interp);
5183 assert(cmd_ctx);
5184 const struct target *target = get_current_target(cmd_ctx);
5186 for (unsigned int i = 0; i < length; i += 2) {
5187 const char *reg_name = Jim_String(dict[i]);
5188 const char *reg_value = Jim_String(dict[i + 1]);
5189 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5190 false);
5192 if (!reg || !reg->exist) {
5193 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5194 return JIM_ERR;
5197 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
5199 if (!buf) {
5200 LOG_ERROR("Failed to allocate memory");
5201 return JIM_ERR;
5204 str_to_buf(reg_value, strlen(reg_value), buf, reg->size, 0);
5205 int retval = reg->type->set(reg, buf);
5206 free(buf);
5208 if (retval != ERROR_OK) {
5209 Jim_SetResultFormatted(interp, "failed to set '%s' to register '%s'",
5210 reg_value, reg_name);
5211 return JIM_ERR;
5215 return JIM_OK;
5219 * Returns true only if the target has a handler for the specified event.
5221 bool target_has_event_action(struct target *target, enum target_event event)
5223 struct target_event_action *teap;
5225 for (teap = target->event_action; teap; teap = teap->next) {
5226 if (teap->event == event)
5227 return true;
5229 return false;
5232 enum target_cfg_param {
5233 TCFG_TYPE,
5234 TCFG_EVENT,
5235 TCFG_WORK_AREA_VIRT,
5236 TCFG_WORK_AREA_PHYS,
5237 TCFG_WORK_AREA_SIZE,
5238 TCFG_WORK_AREA_BACKUP,
5239 TCFG_ENDIAN,
5240 TCFG_COREID,
5241 TCFG_CHAIN_POSITION,
5242 TCFG_DBGBASE,
5243 TCFG_RTOS,
5244 TCFG_DEFER_EXAMINE,
5245 TCFG_GDB_PORT,
5246 TCFG_GDB_MAX_CONNECTIONS,
5249 static struct jim_nvp nvp_config_opts[] = {
5250 { .name = "-type", .value = TCFG_TYPE },
5251 { .name = "-event", .value = TCFG_EVENT },
5252 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
5253 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
5254 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
5255 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
5256 { .name = "-endian", .value = TCFG_ENDIAN },
5257 { .name = "-coreid", .value = TCFG_COREID },
5258 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
5259 { .name = "-dbgbase", .value = TCFG_DBGBASE },
5260 { .name = "-rtos", .value = TCFG_RTOS },
5261 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
5262 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
5263 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
5264 { .name = NULL, .value = -1 }
5267 static int target_configure(struct jim_getopt_info *goi, struct target *target)
5269 struct jim_nvp *n;
5270 Jim_Obj *o;
5271 jim_wide w;
5272 int e;
5274 /* parse config or cget options ... */
5275 while (goi->argc > 0) {
5276 Jim_SetEmptyResult(goi->interp);
5277 /* jim_getopt_debug(goi); */
5279 if (target->type->target_jim_configure) {
5280 /* target defines a configure function */
5281 /* target gets first dibs on parameters */
5282 e = (*(target->type->target_jim_configure))(target, goi);
5283 if (e == JIM_OK) {
5284 /* more? */
5285 continue;
5287 if (e == JIM_ERR) {
5288 /* An error */
5289 return e;
5291 /* otherwise we 'continue' below */
5293 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
5294 if (e != JIM_OK) {
5295 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
5296 return e;
5298 switch (n->value) {
5299 case TCFG_TYPE:
5300 /* not settable */
5301 if (goi->isconfigure) {
5302 Jim_SetResultFormatted(goi->interp,
5303 "not settable: %s", n->name);
5304 return JIM_ERR;
5305 } else {
5306 no_params:
5307 if (goi->argc != 0) {
5308 Jim_WrongNumArgs(goi->interp,
5309 goi->argc, goi->argv,
5310 "NO PARAMS");
5311 return JIM_ERR;
5314 Jim_SetResultString(goi->interp,
5315 target_type_name(target), -1);
5316 /* loop for more */
5317 break;
5318 case TCFG_EVENT:
5319 if (goi->argc == 0) {
5320 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
5321 return JIM_ERR;
5324 e = jim_getopt_nvp(goi, nvp_target_event, &n);
5325 if (e != JIM_OK) {
5326 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
5327 return e;
5330 if (goi->isconfigure) {
5331 if (goi->argc != 1) {
5332 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
5333 return JIM_ERR;
5335 } else {
5336 if (goi->argc != 0) {
5337 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
5338 return JIM_ERR;
5343 struct target_event_action *teap;
5345 teap = target->event_action;
5346 /* replace existing? */
5347 while (teap) {
5348 if (teap->event == (enum target_event)n->value)
5349 break;
5350 teap = teap->next;
5353 if (goi->isconfigure) {
5354 /* START_DEPRECATED_TPIU */
5355 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5356 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5357 /* END_DEPRECATED_TPIU */
5359 bool replace = true;
5360 if (!teap) {
5361 /* create new */
5362 teap = calloc(1, sizeof(*teap));
5363 replace = false;
5365 teap->event = n->value;
5366 teap->interp = goi->interp;
5367 jim_getopt_obj(goi, &o);
5368 if (teap->body)
5369 Jim_DecrRefCount(teap->interp, teap->body);
5370 teap->body = Jim_DuplicateObj(goi->interp, o);
5372 * FIXME:
5373 * Tcl/TK - "tk events" have a nice feature.
5374 * See the "BIND" command.
5375 * We should support that here.
5376 * You can specify %X and %Y in the event code.
5377 * The idea is: %T - target name.
5378 * The idea is: %N - target number
5379 * The idea is: %E - event name.
5381 Jim_IncrRefCount(teap->body);
5383 if (!replace) {
5384 /* add to head of event list */
5385 teap->next = target->event_action;
5386 target->event_action = teap;
5388 Jim_SetEmptyResult(goi->interp);
5389 } else {
5390 /* get */
5391 if (!teap)
5392 Jim_SetEmptyResult(goi->interp);
5393 else
5394 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5397 /* loop for more */
5398 break;
5400 case TCFG_WORK_AREA_VIRT:
5401 if (goi->isconfigure) {
5402 target_free_all_working_areas(target);
5403 e = jim_getopt_wide(goi, &w);
5404 if (e != JIM_OK)
5405 return e;
5406 target->working_area_virt = w;
5407 target->working_area_virt_spec = true;
5408 } else {
5409 if (goi->argc != 0)
5410 goto no_params;
5412 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5413 /* loop for more */
5414 break;
5416 case TCFG_WORK_AREA_PHYS:
5417 if (goi->isconfigure) {
5418 target_free_all_working_areas(target);
5419 e = jim_getopt_wide(goi, &w);
5420 if (e != JIM_OK)
5421 return e;
5422 target->working_area_phys = w;
5423 target->working_area_phys_spec = true;
5424 } else {
5425 if (goi->argc != 0)
5426 goto no_params;
5428 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5429 /* loop for more */
5430 break;
5432 case TCFG_WORK_AREA_SIZE:
5433 if (goi->isconfigure) {
5434 target_free_all_working_areas(target);
5435 e = jim_getopt_wide(goi, &w);
5436 if (e != JIM_OK)
5437 return e;
5438 target->working_area_size = w;
5439 } else {
5440 if (goi->argc != 0)
5441 goto no_params;
5443 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5444 /* loop for more */
5445 break;
5447 case TCFG_WORK_AREA_BACKUP:
5448 if (goi->isconfigure) {
5449 target_free_all_working_areas(target);
5450 e = jim_getopt_wide(goi, &w);
5451 if (e != JIM_OK)
5452 return e;
5453 /* make this exactly 1 or 0 */
5454 target->backup_working_area = (!!w);
5455 } else {
5456 if (goi->argc != 0)
5457 goto no_params;
5459 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5460 /* loop for more e*/
5461 break;
5464 case TCFG_ENDIAN:
5465 if (goi->isconfigure) {
5466 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5467 if (e != JIM_OK) {
5468 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5469 return e;
5471 target->endianness = n->value;
5472 } else {
5473 if (goi->argc != 0)
5474 goto no_params;
5476 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5477 if (!n->name) {
5478 target->endianness = TARGET_LITTLE_ENDIAN;
5479 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5481 Jim_SetResultString(goi->interp, n->name, -1);
5482 /* loop for more */
5483 break;
5485 case TCFG_COREID:
5486 if (goi->isconfigure) {
5487 e = jim_getopt_wide(goi, &w);
5488 if (e != JIM_OK)
5489 return e;
5490 target->coreid = (int32_t)w;
5491 } else {
5492 if (goi->argc != 0)
5493 goto no_params;
5495 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5496 /* loop for more */
5497 break;
5499 case TCFG_CHAIN_POSITION:
5500 if (goi->isconfigure) {
5501 Jim_Obj *o_t;
5502 struct jtag_tap *tap;
5504 if (target->has_dap) {
5505 Jim_SetResultString(goi->interp,
5506 "target requires -dap parameter instead of -chain-position!", -1);
5507 return JIM_ERR;
5510 target_free_all_working_areas(target);
5511 e = jim_getopt_obj(goi, &o_t);
5512 if (e != JIM_OK)
5513 return e;
5514 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5515 if (!tap)
5516 return JIM_ERR;
5517 target->tap = tap;
5518 target->tap_configured = true;
5519 } else {
5520 if (goi->argc != 0)
5521 goto no_params;
5523 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5524 /* loop for more e*/
5525 break;
5526 case TCFG_DBGBASE:
5527 if (goi->isconfigure) {
5528 e = jim_getopt_wide(goi, &w);
5529 if (e != JIM_OK)
5530 return e;
5531 target->dbgbase = (uint32_t)w;
5532 target->dbgbase_set = true;
5533 } else {
5534 if (goi->argc != 0)
5535 goto no_params;
5537 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5538 /* loop for more */
5539 break;
5540 case TCFG_RTOS:
5541 /* RTOS */
5543 int result = rtos_create(goi, target);
5544 if (result != JIM_OK)
5545 return result;
5547 /* loop for more */
5548 break;
5550 case TCFG_DEFER_EXAMINE:
5551 /* DEFER_EXAMINE */
5552 target->defer_examine = true;
5553 /* loop for more */
5554 break;
5556 case TCFG_GDB_PORT:
5557 if (goi->isconfigure) {
5558 struct command_context *cmd_ctx = current_command_context(goi->interp);
5559 if (cmd_ctx->mode != COMMAND_CONFIG) {
5560 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5561 return JIM_ERR;
5564 const char *s;
5565 e = jim_getopt_string(goi, &s, NULL);
5566 if (e != JIM_OK)
5567 return e;
5568 free(target->gdb_port_override);
5569 target->gdb_port_override = strdup(s);
5570 } else {
5571 if (goi->argc != 0)
5572 goto no_params;
5574 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5575 /* loop for more */
5576 break;
5578 case TCFG_GDB_MAX_CONNECTIONS:
5579 if (goi->isconfigure) {
5580 struct command_context *cmd_ctx = current_command_context(goi->interp);
5581 if (cmd_ctx->mode != COMMAND_CONFIG) {
5582 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5583 return JIM_ERR;
5586 e = jim_getopt_wide(goi, &w);
5587 if (e != JIM_OK)
5588 return e;
5589 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5590 } else {
5591 if (goi->argc != 0)
5592 goto no_params;
5594 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5595 break;
5597 } /* while (goi->argc) */
5600 /* done - we return */
5601 return JIM_OK;
5604 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5606 struct command *c = jim_to_command(interp);
5607 struct jim_getopt_info goi;
5609 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5610 goi.isconfigure = !strcmp(c->name, "configure");
5611 if (goi.argc < 1) {
5612 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5613 "missing: -option ...");
5614 return JIM_ERR;
5616 struct command_context *cmd_ctx = current_command_context(interp);
5617 assert(cmd_ctx);
5618 struct target *target = get_current_target(cmd_ctx);
5619 return target_configure(&goi, target);
5622 static int jim_target_mem2array(Jim_Interp *interp,
5623 int argc, Jim_Obj *const *argv)
5625 struct command_context *cmd_ctx = current_command_context(interp);
5626 assert(cmd_ctx);
5627 struct target *target = get_current_target(cmd_ctx);
5628 return target_mem2array(interp, target, argc - 1, argv + 1);
5631 static int jim_target_array2mem(Jim_Interp *interp,
5632 int argc, Jim_Obj *const *argv)
5634 struct command_context *cmd_ctx = current_command_context(interp);
5635 assert(cmd_ctx);
5636 struct target *target = get_current_target(cmd_ctx);
5637 return target_array2mem(interp, target, argc - 1, argv + 1);
5640 COMMAND_HANDLER(handle_target_examine)
5642 bool allow_defer = false;
5644 if (CMD_ARGC > 1)
5645 return ERROR_COMMAND_SYNTAX_ERROR;
5647 if (CMD_ARGC == 1) {
5648 if (strcmp(CMD_ARGV[0], "allow-defer"))
5649 return ERROR_COMMAND_ARGUMENT_INVALID;
5650 allow_defer = true;
5653 struct target *target = get_current_target(CMD_CTX);
5654 if (!target->tap->enabled) {
5655 command_print(CMD, "[TAP is disabled]");
5656 return ERROR_FAIL;
5659 if (allow_defer && target->defer_examine) {
5660 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5661 LOG_INFO("Use arp_examine command to examine it manually!");
5662 return ERROR_OK;
5665 int retval = target->type->examine(target);
5666 if (retval != ERROR_OK) {
5667 target_reset_examined(target);
5668 return retval;
5671 target_set_examined(target);
5673 return ERROR_OK;
5676 COMMAND_HANDLER(handle_target_was_examined)
5678 if (CMD_ARGC != 0)
5679 return ERROR_COMMAND_SYNTAX_ERROR;
5681 struct target *target = get_current_target(CMD_CTX);
5683 command_print(CMD, "%d", target_was_examined(target) ? 1 : 0);
5685 return ERROR_OK;
5688 COMMAND_HANDLER(handle_target_examine_deferred)
5690 if (CMD_ARGC != 0)
5691 return ERROR_COMMAND_SYNTAX_ERROR;
5693 struct target *target = get_current_target(CMD_CTX);
5695 command_print(CMD, "%d", target->defer_examine ? 1 : 0);
5697 return ERROR_OK;
5700 COMMAND_HANDLER(handle_target_halt_gdb)
5702 if (CMD_ARGC != 0)
5703 return ERROR_COMMAND_SYNTAX_ERROR;
5705 struct target *target = get_current_target(CMD_CTX);
5707 return target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
5710 COMMAND_HANDLER(handle_target_poll)
5712 if (CMD_ARGC != 0)
5713 return ERROR_COMMAND_SYNTAX_ERROR;
5715 struct target *target = get_current_target(CMD_CTX);
5716 if (!target->tap->enabled) {
5717 command_print(CMD, "[TAP is disabled]");
5718 return ERROR_FAIL;
5721 if (!(target_was_examined(target)))
5722 return ERROR_TARGET_NOT_EXAMINED;
5724 return target->type->poll(target);
5727 COMMAND_HANDLER(handle_target_reset)
5729 if (CMD_ARGC != 2)
5730 return ERROR_COMMAND_SYNTAX_ERROR;
5732 const struct nvp *n = nvp_name2value(nvp_assert, CMD_ARGV[0]);
5733 if (!n->name) {
5734 nvp_unknown_command_print(CMD, nvp_assert, NULL, CMD_ARGV[0]);
5735 return ERROR_COMMAND_ARGUMENT_INVALID;
5738 /* the halt or not param */
5739 int a;
5740 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], a);
5742 struct target *target = get_current_target(CMD_CTX);
5743 if (!target->tap->enabled) {
5744 command_print(CMD, "[TAP is disabled]");
5745 return ERROR_FAIL;
5748 if (!target->type->assert_reset || !target->type->deassert_reset) {
5749 command_print(CMD, "No target-specific reset for %s", target_name(target));
5750 return ERROR_FAIL;
5753 if (target->defer_examine)
5754 target_reset_examined(target);
5756 /* determine if we should halt or not. */
5757 target->reset_halt = (a != 0);
5758 /* When this happens - all workareas are invalid. */
5759 target_free_all_working_areas_restore(target, 0);
5761 /* do the assert */
5762 if (n->value == NVP_ASSERT)
5763 return target->type->assert_reset(target);
5764 return target->type->deassert_reset(target);
5767 COMMAND_HANDLER(handle_target_halt)
5769 if (CMD_ARGC != 0)
5770 return ERROR_COMMAND_SYNTAX_ERROR;
5772 struct target *target = get_current_target(CMD_CTX);
5773 if (!target->tap->enabled) {
5774 command_print(CMD, "[TAP is disabled]");
5775 return ERROR_FAIL;
5778 return target->type->halt(target);
5781 COMMAND_HANDLER(handle_target_wait_state)
5783 if (CMD_ARGC != 2)
5784 return ERROR_COMMAND_SYNTAX_ERROR;
5786 const struct nvp *n = nvp_name2value(nvp_target_state, CMD_ARGV[0]);
5787 if (!n->name) {
5788 nvp_unknown_command_print(CMD, nvp_target_state, NULL, CMD_ARGV[0]);
5789 return ERROR_COMMAND_ARGUMENT_INVALID;
5792 unsigned int a;
5793 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], a);
5795 struct target *target = get_current_target(CMD_CTX);
5796 if (!target->tap->enabled) {
5797 command_print(CMD, "[TAP is disabled]");
5798 return ERROR_FAIL;
5801 int retval = target_wait_state(target, n->value, a);
5802 if (retval != ERROR_OK) {
5803 command_print(CMD,
5804 "target: %s wait %s fails (%d) %s",
5805 target_name(target), n->name,
5806 retval, target_strerror_safe(retval));
5807 return retval;
5809 return ERROR_OK;
5811 /* List for human, Events defined for this target.
5812 * scripts/programs should use 'name cget -event NAME'
5814 COMMAND_HANDLER(handle_target_event_list)
5816 struct target *target = get_current_target(CMD_CTX);
5817 struct target_event_action *teap = target->event_action;
5819 command_print(CMD, "Event actions for target %s\n",
5820 target_name(target));
5821 command_print(CMD, "%-25s | Body", "Event");
5822 command_print(CMD, "------------------------- | "
5823 "----------------------------------------");
5824 while (teap) {
5825 command_print(CMD, "%-25s | %s",
5826 target_event_name(teap->event),
5827 Jim_GetString(teap->body, NULL));
5828 teap = teap->next;
5830 command_print(CMD, "***END***");
5831 return ERROR_OK;
5834 COMMAND_HANDLER(handle_target_current_state)
5836 if (CMD_ARGC != 0)
5837 return ERROR_COMMAND_SYNTAX_ERROR;
5839 struct target *target = get_current_target(CMD_CTX);
5841 command_print(CMD, "%s", target_state_name(target));
5843 return ERROR_OK;
5846 COMMAND_HANDLER(handle_target_debug_reason)
5848 if (CMD_ARGC != 0)
5849 return ERROR_COMMAND_SYNTAX_ERROR;
5851 struct target *target = get_current_target(CMD_CTX);
5854 const char *debug_reason = nvp_value2name(nvp_target_debug_reason,
5855 target->debug_reason)->name;
5857 if (!debug_reason) {
5858 command_print(CMD, "bug: invalid debug reason (%d)",
5859 target->debug_reason);
5860 return ERROR_FAIL;
5863 command_print(CMD, "%s", debug_reason);
5865 return ERROR_OK;
5868 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5870 struct jim_getopt_info goi;
5871 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5872 if (goi.argc != 1) {
5873 const char *cmd_name = Jim_GetString(argv[0], NULL);
5874 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5875 return JIM_ERR;
5877 struct jim_nvp *n;
5878 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5879 if (e != JIM_OK) {
5880 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5881 return e;
5883 struct command_context *cmd_ctx = current_command_context(interp);
5884 assert(cmd_ctx);
5885 struct target *target = get_current_target(cmd_ctx);
5886 target_handle_event(target, n->value);
5887 return JIM_OK;
5890 static const struct command_registration target_instance_command_handlers[] = {
5892 .name = "configure",
5893 .mode = COMMAND_ANY,
5894 .jim_handler = jim_target_configure,
5895 .help = "configure a new target for use",
5896 .usage = "[target_attribute ...]",
5899 .name = "cget",
5900 .mode = COMMAND_ANY,
5901 .jim_handler = jim_target_configure,
5902 .help = "returns the specified target attribute",
5903 .usage = "target_attribute",
5906 .name = "mwd",
5907 .handler = handle_mw_command,
5908 .mode = COMMAND_EXEC,
5909 .help = "Write 64-bit word(s) to target memory",
5910 .usage = "address data [count]",
5913 .name = "mww",
5914 .handler = handle_mw_command,
5915 .mode = COMMAND_EXEC,
5916 .help = "Write 32-bit word(s) to target memory",
5917 .usage = "address data [count]",
5920 .name = "mwh",
5921 .handler = handle_mw_command,
5922 .mode = COMMAND_EXEC,
5923 .help = "Write 16-bit half-word(s) to target memory",
5924 .usage = "address data [count]",
5927 .name = "mwb",
5928 .handler = handle_mw_command,
5929 .mode = COMMAND_EXEC,
5930 .help = "Write byte(s) to target memory",
5931 .usage = "address data [count]",
5934 .name = "mdd",
5935 .handler = handle_md_command,
5936 .mode = COMMAND_EXEC,
5937 .help = "Display target memory as 64-bit words",
5938 .usage = "address [count]",
5941 .name = "mdw",
5942 .handler = handle_md_command,
5943 .mode = COMMAND_EXEC,
5944 .help = "Display target memory as 32-bit words",
5945 .usage = "address [count]",
5948 .name = "mdh",
5949 .handler = handle_md_command,
5950 .mode = COMMAND_EXEC,
5951 .help = "Display target memory as 16-bit half-words",
5952 .usage = "address [count]",
5955 .name = "mdb",
5956 .handler = handle_md_command,
5957 .mode = COMMAND_EXEC,
5958 .help = "Display target memory as 8-bit bytes",
5959 .usage = "address [count]",
5962 .name = "array2mem",
5963 .mode = COMMAND_EXEC,
5964 .jim_handler = jim_target_array2mem,
5965 .help = "Writes Tcl array of 8/16/32 bit numbers "
5966 "to target memory",
5967 .usage = "arrayname bitwidth address count",
5970 .name = "mem2array",
5971 .mode = COMMAND_EXEC,
5972 .jim_handler = jim_target_mem2array,
5973 .help = "Loads Tcl array of 8/16/32 bit numbers "
5974 "from target memory",
5975 .usage = "arrayname bitwidth address count",
5978 .name = "get_reg",
5979 .mode = COMMAND_EXEC,
5980 .jim_handler = target_jim_get_reg,
5981 .help = "Get register values from the target",
5982 .usage = "list",
5985 .name = "set_reg",
5986 .mode = COMMAND_EXEC,
5987 .jim_handler = target_jim_set_reg,
5988 .help = "Set target register values",
5989 .usage = "dict",
5992 .name = "read_memory",
5993 .mode = COMMAND_EXEC,
5994 .handler = handle_target_read_memory,
5995 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
5996 .usage = "address width count ['phys']",
5999 .name = "write_memory",
6000 .mode = COMMAND_EXEC,
6001 .jim_handler = target_jim_write_memory,
6002 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
6003 .usage = "address width data ['phys']",
6006 .name = "eventlist",
6007 .handler = handle_target_event_list,
6008 .mode = COMMAND_EXEC,
6009 .help = "displays a table of events defined for this target",
6010 .usage = "",
6013 .name = "curstate",
6014 .mode = COMMAND_EXEC,
6015 .handler = handle_target_current_state,
6016 .help = "displays the current state of this target",
6017 .usage = "",
6020 .name = "debug_reason",
6021 .mode = COMMAND_EXEC,
6022 .handler = handle_target_debug_reason,
6023 .help = "displays the debug reason of this target",
6024 .usage = "",
6027 .name = "arp_examine",
6028 .mode = COMMAND_EXEC,
6029 .handler = handle_target_examine,
6030 .help = "used internally for reset processing",
6031 .usage = "['allow-defer']",
6034 .name = "was_examined",
6035 .mode = COMMAND_EXEC,
6036 .handler = handle_target_was_examined,
6037 .help = "used internally for reset processing",
6038 .usage = "",
6041 .name = "examine_deferred",
6042 .mode = COMMAND_EXEC,
6043 .handler = handle_target_examine_deferred,
6044 .help = "used internally for reset processing",
6045 .usage = "",
6048 .name = "arp_halt_gdb",
6049 .mode = COMMAND_EXEC,
6050 .handler = handle_target_halt_gdb,
6051 .help = "used internally for reset processing to halt GDB",
6052 .usage = "",
6055 .name = "arp_poll",
6056 .mode = COMMAND_EXEC,
6057 .handler = handle_target_poll,
6058 .help = "used internally for reset processing",
6059 .usage = "",
6062 .name = "arp_reset",
6063 .mode = COMMAND_EXEC,
6064 .handler = handle_target_reset,
6065 .help = "used internally for reset processing",
6066 .usage = "'assert'|'deassert' halt",
6069 .name = "arp_halt",
6070 .mode = COMMAND_EXEC,
6071 .handler = handle_target_halt,
6072 .help = "used internally for reset processing",
6073 .usage = "",
6076 .name = "arp_waitstate",
6077 .mode = COMMAND_EXEC,
6078 .handler = handle_target_wait_state,
6079 .help = "used internally for reset processing",
6080 .usage = "statename timeoutmsecs",
6083 .name = "invoke-event",
6084 .mode = COMMAND_EXEC,
6085 .jim_handler = jim_target_invoke_event,
6086 .help = "invoke handler for specified event",
6087 .usage = "event_name",
6089 COMMAND_REGISTRATION_DONE
6092 static int target_create(struct jim_getopt_info *goi)
6094 Jim_Obj *new_cmd;
6095 Jim_Cmd *cmd;
6096 const char *cp;
6097 int e;
6098 int x;
6099 struct target *target;
6100 struct command_context *cmd_ctx;
6102 cmd_ctx = current_command_context(goi->interp);
6103 assert(cmd_ctx);
6105 if (goi->argc < 3) {
6106 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
6107 return JIM_ERR;
6110 /* COMMAND */
6111 jim_getopt_obj(goi, &new_cmd);
6112 /* does this command exist? */
6113 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
6114 if (cmd) {
6115 cp = Jim_GetString(new_cmd, NULL);
6116 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
6117 return JIM_ERR;
6120 /* TYPE */
6121 e = jim_getopt_string(goi, &cp, NULL);
6122 if (e != JIM_OK)
6123 return e;
6124 struct transport *tr = get_current_transport();
6125 if (tr && tr->override_target) {
6126 e = tr->override_target(&cp);
6127 if (e != ERROR_OK) {
6128 LOG_ERROR("The selected transport doesn't support this target");
6129 return JIM_ERR;
6131 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
6133 /* now does target type exist */
6134 for (x = 0 ; target_types[x] ; x++) {
6135 if (strcmp(cp, target_types[x]->name) == 0) {
6136 /* found */
6137 break;
6140 if (!target_types[x]) {
6141 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
6142 for (x = 0 ; target_types[x] ; x++) {
6143 if (target_types[x + 1]) {
6144 Jim_AppendStrings(goi->interp,
6145 Jim_GetResult(goi->interp),
6146 target_types[x]->name,
6147 ", ", NULL);
6148 } else {
6149 Jim_AppendStrings(goi->interp,
6150 Jim_GetResult(goi->interp),
6151 " or ",
6152 target_types[x]->name, NULL);
6155 return JIM_ERR;
6158 /* Create it */
6159 target = calloc(1, sizeof(struct target));
6160 if (!target) {
6161 LOG_ERROR("Out of memory");
6162 return JIM_ERR;
6165 /* set empty smp cluster */
6166 target->smp_targets = &empty_smp_targets;
6168 /* allocate memory for each unique target type */
6169 target->type = malloc(sizeof(struct target_type));
6170 if (!target->type) {
6171 LOG_ERROR("Out of memory");
6172 free(target);
6173 return JIM_ERR;
6176 memcpy(target->type, target_types[x], sizeof(struct target_type));
6178 /* default to first core, override with -coreid */
6179 target->coreid = 0;
6181 target->working_area = 0x0;
6182 target->working_area_size = 0x0;
6183 target->working_areas = NULL;
6184 target->backup_working_area = 0;
6186 target->state = TARGET_UNKNOWN;
6187 target->debug_reason = DBG_REASON_UNDEFINED;
6188 target->reg_cache = NULL;
6189 target->breakpoints = NULL;
6190 target->watchpoints = NULL;
6191 target->next = NULL;
6192 target->arch_info = NULL;
6194 target->verbose_halt_msg = true;
6196 target->halt_issued = false;
6198 /* initialize trace information */
6199 target->trace_info = calloc(1, sizeof(struct trace));
6200 if (!target->trace_info) {
6201 LOG_ERROR("Out of memory");
6202 free(target->type);
6203 free(target);
6204 return JIM_ERR;
6207 target->dbgmsg = NULL;
6208 target->dbg_msg_enabled = 0;
6210 target->endianness = TARGET_ENDIAN_UNKNOWN;
6212 target->rtos = NULL;
6213 target->rtos_auto_detect = false;
6215 target->gdb_port_override = NULL;
6216 target->gdb_max_connections = 1;
6218 /* Do the rest as "configure" options */
6219 goi->isconfigure = 1;
6220 e = target_configure(goi, target);
6222 if (e == JIM_OK) {
6223 if (target->has_dap) {
6224 if (!target->dap_configured) {
6225 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
6226 e = JIM_ERR;
6228 } else {
6229 if (!target->tap_configured) {
6230 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
6231 e = JIM_ERR;
6234 /* tap must be set after target was configured */
6235 if (!target->tap)
6236 e = JIM_ERR;
6239 if (e != JIM_OK) {
6240 rtos_destroy(target);
6241 free(target->gdb_port_override);
6242 free(target->trace_info);
6243 free(target->type);
6244 free(target);
6245 return e;
6248 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
6249 /* default endian to little if not specified */
6250 target->endianness = TARGET_LITTLE_ENDIAN;
6253 cp = Jim_GetString(new_cmd, NULL);
6254 target->cmd_name = strdup(cp);
6255 if (!target->cmd_name) {
6256 LOG_ERROR("Out of memory");
6257 rtos_destroy(target);
6258 free(target->gdb_port_override);
6259 free(target->trace_info);
6260 free(target->type);
6261 free(target);
6262 return JIM_ERR;
6265 if (target->type->target_create) {
6266 e = (*(target->type->target_create))(target, goi->interp);
6267 if (e != ERROR_OK) {
6268 LOG_DEBUG("target_create failed");
6269 free(target->cmd_name);
6270 rtos_destroy(target);
6271 free(target->gdb_port_override);
6272 free(target->trace_info);
6273 free(target->type);
6274 free(target);
6275 return JIM_ERR;
6279 /* create the target specific commands */
6280 if (target->type->commands) {
6281 e = register_commands(cmd_ctx, NULL, target->type->commands);
6282 if (e != ERROR_OK)
6283 LOG_ERROR("unable to register '%s' commands", cp);
6286 /* now - create the new target name command */
6287 const struct command_registration target_subcommands[] = {
6289 .chain = target_instance_command_handlers,
6292 .chain = target->type->commands,
6294 COMMAND_REGISTRATION_DONE
6296 const struct command_registration target_commands[] = {
6298 .name = cp,
6299 .mode = COMMAND_ANY,
6300 .help = "target command group",
6301 .usage = "",
6302 .chain = target_subcommands,
6304 COMMAND_REGISTRATION_DONE
6306 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
6307 if (e != ERROR_OK) {
6308 if (target->type->deinit_target)
6309 target->type->deinit_target(target);
6310 free(target->cmd_name);
6311 rtos_destroy(target);
6312 free(target->gdb_port_override);
6313 free(target->trace_info);
6314 free(target->type);
6315 free(target);
6316 return JIM_ERR;
6319 /* append to end of list */
6320 append_to_list_all_targets(target);
6322 cmd_ctx->current_target = target;
6323 return JIM_OK;
6326 COMMAND_HANDLER(handle_target_current)
6328 if (CMD_ARGC != 0)
6329 return ERROR_COMMAND_SYNTAX_ERROR;
6331 struct target *target = get_current_target_or_null(CMD_CTX);
6332 if (target)
6333 command_print(CMD, "%s", target_name(target));
6335 return ERROR_OK;
6338 COMMAND_HANDLER(handle_target_types)
6340 if (CMD_ARGC != 0)
6341 return ERROR_COMMAND_SYNTAX_ERROR;
6343 for (unsigned int x = 0; target_types[x]; x++)
6344 command_print(CMD, "%s", target_types[x]->name);
6346 return ERROR_OK;
6349 COMMAND_HANDLER(handle_target_names)
6351 if (CMD_ARGC != 0)
6352 return ERROR_COMMAND_SYNTAX_ERROR;
6354 struct target *target = all_targets;
6355 while (target) {
6356 command_print(CMD, "%s", target_name(target));
6357 target = target->next;
6360 return ERROR_OK;
6363 static struct target_list *
6364 __attribute__((warn_unused_result))
6365 create_target_list_node(const char *targetname)
6367 struct target *target = get_target(targetname);
6368 LOG_DEBUG("%s ", targetname);
6369 if (!target)
6370 return NULL;
6372 struct target_list *new = malloc(sizeof(struct target_list));
6373 if (!new) {
6374 LOG_ERROR("Out of memory");
6375 return new;
6378 new->target = target;
6379 return new;
6382 static int get_target_with_common_rtos_type(struct command_invocation *cmd,
6383 struct list_head *lh, struct target **result)
6385 struct target *target = NULL;
6386 struct target_list *curr;
6387 foreach_smp_target(curr, lh) {
6388 struct rtos *curr_rtos = curr->target->rtos;
6389 if (curr_rtos) {
6390 if (target && target->rtos && target->rtos->type != curr_rtos->type) {
6391 command_print(cmd, "Different rtos types in members of one smp target!");
6392 return ERROR_FAIL;
6394 target = curr->target;
6397 *result = target;
6398 return ERROR_OK;
6401 COMMAND_HANDLER(handle_target_smp)
6403 static int smp_group = 1;
6405 if (CMD_ARGC == 0) {
6406 LOG_DEBUG("Empty SMP target");
6407 return ERROR_OK;
6409 LOG_DEBUG("%d", CMD_ARGC);
6410 /* CMD_ARGC[0] = target to associate in smp
6411 * CMD_ARGC[1] = target to associate in smp
6412 * CMD_ARGC[2] ...
6415 struct list_head *lh = malloc(sizeof(*lh));
6416 if (!lh) {
6417 LOG_ERROR("Out of memory");
6418 return ERROR_FAIL;
6420 INIT_LIST_HEAD(lh);
6422 for (unsigned int i = 0; i < CMD_ARGC; i++) {
6423 struct target_list *new = create_target_list_node(CMD_ARGV[i]);
6424 if (new)
6425 list_add_tail(&new->lh, lh);
6427 /* now parse the list of cpu and put the target in smp mode*/
6428 struct target_list *curr;
6429 foreach_smp_target(curr, lh) {
6430 struct target *target = curr->target;
6431 target->smp = smp_group;
6432 target->smp_targets = lh;
6434 smp_group++;
6436 struct target *rtos_target;
6437 int retval = get_target_with_common_rtos_type(CMD, lh, &rtos_target);
6438 if (retval == ERROR_OK && rtos_target)
6439 retval = rtos_smp_init(rtos_target);
6441 return retval;
6444 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6446 struct jim_getopt_info goi;
6447 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6448 if (goi.argc < 3) {
6449 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6450 "<name> <target_type> [<target_options> ...]");
6451 return JIM_ERR;
6453 return target_create(&goi);
6456 static const struct command_registration target_subcommand_handlers[] = {
6458 .name = "init",
6459 .mode = COMMAND_CONFIG,
6460 .handler = handle_target_init_command,
6461 .help = "initialize targets",
6462 .usage = "",
6465 .name = "create",
6466 .mode = COMMAND_CONFIG,
6467 .jim_handler = jim_target_create,
6468 .usage = "name type '-chain-position' name [options ...]",
6469 .help = "Creates and selects a new target",
6472 .name = "current",
6473 .mode = COMMAND_ANY,
6474 .handler = handle_target_current,
6475 .help = "Returns the currently selected target",
6476 .usage = "",
6479 .name = "types",
6480 .mode = COMMAND_ANY,
6481 .handler = handle_target_types,
6482 .help = "Returns the available target types as "
6483 "a list of strings",
6484 .usage = "",
6487 .name = "names",
6488 .mode = COMMAND_ANY,
6489 .handler = handle_target_names,
6490 .help = "Returns the names of all targets as a list of strings",
6491 .usage = "",
6494 .name = "smp",
6495 .mode = COMMAND_ANY,
6496 .handler = handle_target_smp,
6497 .usage = "targetname1 targetname2 ...",
6498 .help = "gather several target in a smp list"
6501 COMMAND_REGISTRATION_DONE
6504 struct fast_load {
6505 target_addr_t address;
6506 uint8_t *data;
6507 int length;
6511 static int fastload_num;
6512 static struct fast_load *fastload;
6514 static void free_fastload(void)
6516 if (fastload) {
6517 for (int i = 0; i < fastload_num; i++)
6518 free(fastload[i].data);
6519 free(fastload);
6520 fastload = NULL;
6524 COMMAND_HANDLER(handle_fast_load_image_command)
6526 uint8_t *buffer;
6527 size_t buf_cnt;
6528 uint32_t image_size;
6529 target_addr_t min_address = 0;
6530 target_addr_t max_address = -1;
6532 struct image image;
6534 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6535 &image, &min_address, &max_address);
6536 if (retval != ERROR_OK)
6537 return retval;
6539 struct duration bench;
6540 duration_start(&bench);
6542 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6543 if (retval != ERROR_OK)
6544 return retval;
6546 image_size = 0x0;
6547 retval = ERROR_OK;
6548 fastload_num = image.num_sections;
6549 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6550 if (!fastload) {
6551 command_print(CMD, "out of memory");
6552 image_close(&image);
6553 return ERROR_FAIL;
6555 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6556 for (unsigned int i = 0; i < image.num_sections; i++) {
6557 buffer = malloc(image.sections[i].size);
6558 if (!buffer) {
6559 command_print(CMD, "error allocating buffer for section (%d bytes)",
6560 (int)(image.sections[i].size));
6561 retval = ERROR_FAIL;
6562 break;
6565 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6566 if (retval != ERROR_OK) {
6567 free(buffer);
6568 break;
6571 uint32_t offset = 0;
6572 uint32_t length = buf_cnt;
6574 /* DANGER!!! beware of unsigned comparison here!!! */
6576 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6577 (image.sections[i].base_address < max_address)) {
6578 if (image.sections[i].base_address < min_address) {
6579 /* clip addresses below */
6580 offset += min_address-image.sections[i].base_address;
6581 length -= offset;
6584 if (image.sections[i].base_address + buf_cnt > max_address)
6585 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6587 fastload[i].address = image.sections[i].base_address + offset;
6588 fastload[i].data = malloc(length);
6589 if (!fastload[i].data) {
6590 free(buffer);
6591 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6592 length);
6593 retval = ERROR_FAIL;
6594 break;
6596 memcpy(fastload[i].data, buffer + offset, length);
6597 fastload[i].length = length;
6599 image_size += length;
6600 command_print(CMD, "%u bytes written at address 0x%8.8x",
6601 (unsigned int)length,
6602 ((unsigned int)(image.sections[i].base_address + offset)));
6605 free(buffer);
6608 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6609 command_print(CMD, "Loaded %" PRIu32 " bytes "
6610 "in %fs (%0.3f KiB/s)", image_size,
6611 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6613 command_print(CMD,
6614 "WARNING: image has not been loaded to target!"
6615 "You can issue a 'fast_load' to finish loading.");
6618 image_close(&image);
6620 if (retval != ERROR_OK)
6621 free_fastload();
6623 return retval;
6626 COMMAND_HANDLER(handle_fast_load_command)
6628 if (CMD_ARGC > 0)
6629 return ERROR_COMMAND_SYNTAX_ERROR;
6630 if (!fastload) {
6631 LOG_ERROR("No image in memory");
6632 return ERROR_FAIL;
6634 int i;
6635 int64_t ms = timeval_ms();
6636 int size = 0;
6637 int retval = ERROR_OK;
6638 for (i = 0; i < fastload_num; i++) {
6639 struct target *target = get_current_target(CMD_CTX);
6640 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6641 (unsigned int)(fastload[i].address),
6642 (unsigned int)(fastload[i].length));
6643 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6644 if (retval != ERROR_OK)
6645 break;
6646 size += fastload[i].length;
6648 if (retval == ERROR_OK) {
6649 int64_t after = timeval_ms();
6650 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6652 return retval;
6655 static const struct command_registration target_command_handlers[] = {
6657 .name = "targets",
6658 .handler = handle_targets_command,
6659 .mode = COMMAND_ANY,
6660 .help = "change current default target (one parameter) "
6661 "or prints table of all targets (no parameters)",
6662 .usage = "[target]",
6665 .name = "target",
6666 .mode = COMMAND_CONFIG,
6667 .help = "configure target",
6668 .chain = target_subcommand_handlers,
6669 .usage = "",
6671 COMMAND_REGISTRATION_DONE
6674 int target_register_commands(struct command_context *cmd_ctx)
6676 return register_commands(cmd_ctx, NULL, target_command_handlers);
6679 static bool target_reset_nag = true;
6681 bool get_target_reset_nag(void)
6683 return target_reset_nag;
6686 COMMAND_HANDLER(handle_target_reset_nag)
6688 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6689 &target_reset_nag, "Nag after each reset about options to improve "
6690 "performance");
6693 COMMAND_HANDLER(handle_ps_command)
6695 struct target *target = get_current_target(CMD_CTX);
6696 char *display;
6697 if (target->state != TARGET_HALTED) {
6698 command_print(CMD, "Error: [%s] not halted", target_name(target));
6699 return ERROR_TARGET_NOT_HALTED;
6702 if ((target->rtos) && (target->rtos->type)
6703 && (target->rtos->type->ps_command)) {
6704 display = target->rtos->type->ps_command(target);
6705 command_print(CMD, "%s", display);
6706 free(display);
6707 return ERROR_OK;
6708 } else {
6709 LOG_INFO("failed");
6710 return ERROR_TARGET_FAILURE;
6714 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6716 if (text)
6717 command_print_sameline(cmd, "%s", text);
6718 for (int i = 0; i < size; i++)
6719 command_print_sameline(cmd, " %02x", buf[i]);
6720 command_print(cmd, " ");
6723 COMMAND_HANDLER(handle_test_mem_access_command)
6725 struct target *target = get_current_target(CMD_CTX);
6726 uint32_t test_size;
6727 int retval = ERROR_OK;
6729 if (target->state != TARGET_HALTED) {
6730 command_print(CMD, "Error: [%s] not halted", target_name(target));
6731 return ERROR_TARGET_NOT_HALTED;
6734 if (CMD_ARGC != 1)
6735 return ERROR_COMMAND_SYNTAX_ERROR;
6737 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6739 /* Test reads */
6740 size_t num_bytes = test_size + 4;
6742 struct working_area *wa = NULL;
6743 retval = target_alloc_working_area(target, num_bytes, &wa);
6744 if (retval != ERROR_OK) {
6745 LOG_ERROR("Not enough working area");
6746 return ERROR_FAIL;
6749 uint8_t *test_pattern = malloc(num_bytes);
6751 for (size_t i = 0; i < num_bytes; i++)
6752 test_pattern[i] = rand();
6754 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6755 if (retval != ERROR_OK) {
6756 LOG_ERROR("Test pattern write failed");
6757 goto out;
6760 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6761 for (int size = 1; size <= 4; size *= 2) {
6762 for (int offset = 0; offset < 4; offset++) {
6763 uint32_t count = test_size / size;
6764 size_t host_bufsiz = (count + 2) * size + host_offset;
6765 uint8_t *read_ref = malloc(host_bufsiz);
6766 uint8_t *read_buf = malloc(host_bufsiz);
6768 for (size_t i = 0; i < host_bufsiz; i++) {
6769 read_ref[i] = rand();
6770 read_buf[i] = read_ref[i];
6772 command_print_sameline(CMD,
6773 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6774 size, offset, host_offset ? "un" : "");
6776 struct duration bench;
6777 duration_start(&bench);
6779 retval = target_read_memory(target, wa->address + offset, size, count,
6780 read_buf + size + host_offset);
6782 duration_measure(&bench);
6784 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6785 command_print(CMD, "Unsupported alignment");
6786 goto next;
6787 } else if (retval != ERROR_OK) {
6788 command_print(CMD, "Memory read failed");
6789 goto next;
6792 /* replay on host */
6793 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6795 /* check result */
6796 int result = memcmp(read_ref, read_buf, host_bufsiz);
6797 if (result == 0) {
6798 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6799 duration_elapsed(&bench),
6800 duration_kbps(&bench, count * size));
6801 } else {
6802 command_print(CMD, "Compare failed");
6803 binprint(CMD, "ref:", read_ref, host_bufsiz);
6804 binprint(CMD, "buf:", read_buf, host_bufsiz);
6806 next:
6807 free(read_ref);
6808 free(read_buf);
6813 out:
6814 free(test_pattern);
6816 target_free_working_area(target, wa);
6818 /* Test writes */
6819 num_bytes = test_size + 4 + 4 + 4;
6821 retval = target_alloc_working_area(target, num_bytes, &wa);
6822 if (retval != ERROR_OK) {
6823 LOG_ERROR("Not enough working area");
6824 return ERROR_FAIL;
6827 test_pattern = malloc(num_bytes);
6829 for (size_t i = 0; i < num_bytes; i++)
6830 test_pattern[i] = rand();
6832 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6833 for (int size = 1; size <= 4; size *= 2) {
6834 for (int offset = 0; offset < 4; offset++) {
6835 uint32_t count = test_size / size;
6836 size_t host_bufsiz = count * size + host_offset;
6837 uint8_t *read_ref = malloc(num_bytes);
6838 uint8_t *read_buf = malloc(num_bytes);
6839 uint8_t *write_buf = malloc(host_bufsiz);
6841 for (size_t i = 0; i < host_bufsiz; i++)
6842 write_buf[i] = rand();
6843 command_print_sameline(CMD,
6844 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6845 size, offset, host_offset ? "un" : "");
6847 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6848 if (retval != ERROR_OK) {
6849 command_print(CMD, "Test pattern write failed");
6850 goto nextw;
6853 /* replay on host */
6854 memcpy(read_ref, test_pattern, num_bytes);
6855 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6857 struct duration bench;
6858 duration_start(&bench);
6860 retval = target_write_memory(target, wa->address + size + offset, size, count,
6861 write_buf + host_offset);
6863 duration_measure(&bench);
6865 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6866 command_print(CMD, "Unsupported alignment");
6867 goto nextw;
6868 } else if (retval != ERROR_OK) {
6869 command_print(CMD, "Memory write failed");
6870 goto nextw;
6873 /* read back */
6874 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6875 if (retval != ERROR_OK) {
6876 command_print(CMD, "Test pattern write failed");
6877 goto nextw;
6880 /* check result */
6881 int result = memcmp(read_ref, read_buf, num_bytes);
6882 if (result == 0) {
6883 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6884 duration_elapsed(&bench),
6885 duration_kbps(&bench, count * size));
6886 } else {
6887 command_print(CMD, "Compare failed");
6888 binprint(CMD, "ref:", read_ref, num_bytes);
6889 binprint(CMD, "buf:", read_buf, num_bytes);
6891 nextw:
6892 free(read_ref);
6893 free(read_buf);
6898 free(test_pattern);
6900 target_free_working_area(target, wa);
6901 return retval;
6904 static const struct command_registration target_exec_command_handlers[] = {
6906 .name = "fast_load_image",
6907 .handler = handle_fast_load_image_command,
6908 .mode = COMMAND_ANY,
6909 .help = "Load image into server memory for later use by "
6910 "fast_load; primarily for profiling",
6911 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6912 "[min_address [max_length]]",
6915 .name = "fast_load",
6916 .handler = handle_fast_load_command,
6917 .mode = COMMAND_EXEC,
6918 .help = "loads active fast load image to current target "
6919 "- mainly for profiling purposes",
6920 .usage = "",
6923 .name = "profile",
6924 .handler = handle_profile_command,
6925 .mode = COMMAND_EXEC,
6926 .usage = "seconds filename [start end]",
6927 .help = "profiling samples the CPU PC",
6929 /** @todo don't register virt2phys() unless target supports it */
6931 .name = "virt2phys",
6932 .handler = handle_virt2phys_command,
6933 .mode = COMMAND_ANY,
6934 .help = "translate a virtual address into a physical address",
6935 .usage = "virtual_address",
6938 .name = "reg",
6939 .handler = handle_reg_command,
6940 .mode = COMMAND_EXEC,
6941 .help = "display (reread from target with \"force\") or set a register; "
6942 "with no arguments, displays all registers and their values",
6943 .usage = "[(register_number|register_name) [(value|'force')]]",
6946 .name = "poll",
6947 .handler = handle_poll_command,
6948 .mode = COMMAND_EXEC,
6949 .help = "poll target state; or reconfigure background polling",
6950 .usage = "['on'|'off']",
6953 .name = "wait_halt",
6954 .handler = handle_wait_halt_command,
6955 .mode = COMMAND_EXEC,
6956 .help = "wait up to the specified number of milliseconds "
6957 "(default 5000) for a previously requested halt",
6958 .usage = "[milliseconds]",
6961 .name = "halt",
6962 .handler = handle_halt_command,
6963 .mode = COMMAND_EXEC,
6964 .help = "request target to halt, then wait up to the specified "
6965 "number of milliseconds (default 5000) for it to complete",
6966 .usage = "[milliseconds]",
6969 .name = "resume",
6970 .handler = handle_resume_command,
6971 .mode = COMMAND_EXEC,
6972 .help = "resume target execution from current PC or address",
6973 .usage = "[address]",
6976 .name = "reset",
6977 .handler = handle_reset_command,
6978 .mode = COMMAND_EXEC,
6979 .usage = "[run|halt|init]",
6980 .help = "Reset all targets into the specified mode. "
6981 "Default reset mode is run, if not given.",
6984 .name = "soft_reset_halt",
6985 .handler = handle_soft_reset_halt_command,
6986 .mode = COMMAND_EXEC,
6987 .usage = "",
6988 .help = "halt the target and do a soft reset",
6991 .name = "step",
6992 .handler = handle_step_command,
6993 .mode = COMMAND_EXEC,
6994 .help = "step one instruction from current PC or address",
6995 .usage = "[address]",
6998 .name = "mdd",
6999 .handler = handle_md_command,
7000 .mode = COMMAND_EXEC,
7001 .help = "display memory double-words",
7002 .usage = "['phys'] address [count]",
7005 .name = "mdw",
7006 .handler = handle_md_command,
7007 .mode = COMMAND_EXEC,
7008 .help = "display memory words",
7009 .usage = "['phys'] address [count]",
7012 .name = "mdh",
7013 .handler = handle_md_command,
7014 .mode = COMMAND_EXEC,
7015 .help = "display memory half-words",
7016 .usage = "['phys'] address [count]",
7019 .name = "mdb",
7020 .handler = handle_md_command,
7021 .mode = COMMAND_EXEC,
7022 .help = "display memory bytes",
7023 .usage = "['phys'] address [count]",
7026 .name = "mwd",
7027 .handler = handle_mw_command,
7028 .mode = COMMAND_EXEC,
7029 .help = "write memory double-word",
7030 .usage = "['phys'] address value [count]",
7033 .name = "mww",
7034 .handler = handle_mw_command,
7035 .mode = COMMAND_EXEC,
7036 .help = "write memory word",
7037 .usage = "['phys'] address value [count]",
7040 .name = "mwh",
7041 .handler = handle_mw_command,
7042 .mode = COMMAND_EXEC,
7043 .help = "write memory half-word",
7044 .usage = "['phys'] address value [count]",
7047 .name = "mwb",
7048 .handler = handle_mw_command,
7049 .mode = COMMAND_EXEC,
7050 .help = "write memory byte",
7051 .usage = "['phys'] address value [count]",
7054 .name = "bp",
7055 .handler = handle_bp_command,
7056 .mode = COMMAND_EXEC,
7057 .help = "list or set hardware or software breakpoint",
7058 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
7061 .name = "rbp",
7062 .handler = handle_rbp_command,
7063 .mode = COMMAND_EXEC,
7064 .help = "remove breakpoint",
7065 .usage = "'all' | address",
7068 .name = "wp",
7069 .handler = handle_wp_command,
7070 .mode = COMMAND_EXEC,
7071 .help = "list (no params) or create watchpoints",
7072 .usage = "[address length [('r'|'w'|'a') [value [mask]]]]",
7075 .name = "rwp",
7076 .handler = handle_rwp_command,
7077 .mode = COMMAND_EXEC,
7078 .help = "remove watchpoint",
7079 .usage = "'all' | address",
7082 .name = "load_image",
7083 .handler = handle_load_image_command,
7084 .mode = COMMAND_EXEC,
7085 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
7086 "[min_address] [max_length]",
7089 .name = "dump_image",
7090 .handler = handle_dump_image_command,
7091 .mode = COMMAND_EXEC,
7092 .usage = "filename address size",
7095 .name = "verify_image_checksum",
7096 .handler = handle_verify_image_checksum_command,
7097 .mode = COMMAND_EXEC,
7098 .usage = "filename [offset [type]]",
7101 .name = "verify_image",
7102 .handler = handle_verify_image_command,
7103 .mode = COMMAND_EXEC,
7104 .usage = "filename [offset [type]]",
7107 .name = "test_image",
7108 .handler = handle_test_image_command,
7109 .mode = COMMAND_EXEC,
7110 .usage = "filename [offset [type]]",
7113 .name = "get_reg",
7114 .mode = COMMAND_EXEC,
7115 .jim_handler = target_jim_get_reg,
7116 .help = "Get register values from the target",
7117 .usage = "list",
7120 .name = "set_reg",
7121 .mode = COMMAND_EXEC,
7122 .jim_handler = target_jim_set_reg,
7123 .help = "Set target register values",
7124 .usage = "dict",
7127 .name = "read_memory",
7128 .mode = COMMAND_EXEC,
7129 .handler = handle_target_read_memory,
7130 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
7131 .usage = "address width count ['phys']",
7134 .name = "write_memory",
7135 .mode = COMMAND_EXEC,
7136 .jim_handler = target_jim_write_memory,
7137 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
7138 .usage = "address width data ['phys']",
7141 .name = "reset_nag",
7142 .handler = handle_target_reset_nag,
7143 .mode = COMMAND_ANY,
7144 .help = "Nag after each reset about options that could have been "
7145 "enabled to improve performance.",
7146 .usage = "['enable'|'disable']",
7149 .name = "ps",
7150 .handler = handle_ps_command,
7151 .mode = COMMAND_EXEC,
7152 .help = "list all tasks",
7153 .usage = "",
7156 .name = "test_mem_access",
7157 .handler = handle_test_mem_access_command,
7158 .mode = COMMAND_EXEC,
7159 .help = "Test the target's memory access functions",
7160 .usage = "size",
7163 COMMAND_REGISTRATION_DONE
7165 static int target_register_user_commands(struct command_context *cmd_ctx)
7167 int retval = ERROR_OK;
7168 retval = target_request_register_commands(cmd_ctx);
7169 if (retval != ERROR_OK)
7170 return retval;
7172 retval = trace_register_commands(cmd_ctx);
7173 if (retval != ERROR_OK)
7174 return retval;
7177 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
7180 const char *target_debug_reason_str(enum target_debug_reason reason)
7182 switch (reason) {
7183 case DBG_REASON_DBGRQ:
7184 return "DBGRQ";
7185 case DBG_REASON_BREAKPOINT:
7186 return "BREAKPOINT";
7187 case DBG_REASON_WATCHPOINT:
7188 return "WATCHPOINT";
7189 case DBG_REASON_WPTANDBKPT:
7190 return "WPTANDBKPT";
7191 case DBG_REASON_SINGLESTEP:
7192 return "SINGLESTEP";
7193 case DBG_REASON_NOTHALTED:
7194 return "NOTHALTED";
7195 case DBG_REASON_EXIT:
7196 return "EXIT";
7197 case DBG_REASON_EXC_CATCH:
7198 return "EXC_CATCH";
7199 case DBG_REASON_UNDEFINED:
7200 return "UNDEFINED";
7201 default:
7202 return "UNKNOWN!";