openocd: trivial replace of jim-nvp with new nvp
[openocd.git] / src / target / target.c
blobfb5ef72193725bb27b91f1d443e52255b62df352
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2007-2010 Øyvind Harboe *
8 * oyvind.harboe@zylin.com *
9 * *
10 * Copyright (C) 2008, Duane Ellis *
11 * openocd@duaneeellis.com *
12 * *
13 * Copyright (C) 2008 by Spencer Oliver *
14 * spen@spen-soft.co.uk *
15 * *
16 * Copyright (C) 2008 by Rick Altherr *
17 * kc8apf@kc8apf.net> *
18 * *
19 * Copyright (C) 2011 by Broadcom Corporation *
20 * Evan Hunter - ehunter@broadcom.com *
21 * *
22 * Copyright (C) ST-Ericsson SA 2011 *
23 * michel.jaouen@stericsson.com : smp minimum support *
24 * *
25 * Copyright (C) 2011 Andreas Fritiofson *
26 * andreas.fritiofson@gmail.com *
27 ***************************************************************************/
29 #ifdef HAVE_CONFIG_H
30 #include "config.h"
31 #endif
33 #include <helper/align.h>
34 #include <helper/nvp.h>
35 #include <helper/time_support.h>
36 #include <jtag/jtag.h>
37 #include <flash/nor/core.h>
39 #include "target.h"
40 #include "target_type.h"
41 #include "target_request.h"
42 #include "breakpoints.h"
43 #include "register.h"
44 #include "trace.h"
45 #include "image.h"
46 #include "rtos/rtos.h"
47 #include "transport/transport.h"
48 #include "arm_cti.h"
49 #include "smp.h"
50 #include "semihosting_common.h"
52 /* default halt wait timeout (ms) */
53 #define DEFAULT_HALT_TIMEOUT 5000
55 static int target_read_buffer_default(struct target *target, target_addr_t address,
56 uint32_t count, uint8_t *buffer);
57 static int target_write_buffer_default(struct target *target, target_addr_t address,
58 uint32_t count, const uint8_t *buffer);
59 static int target_array2mem(Jim_Interp *interp, struct target *target,
60 int argc, Jim_Obj * const *argv);
61 static int target_mem2array(Jim_Interp *interp, struct target *target,
62 int argc, Jim_Obj * const *argv);
63 static int target_register_user_commands(struct command_context *cmd_ctx);
64 static int target_get_gdb_fileio_info_default(struct target *target,
65 struct gdb_fileio_info *fileio_info);
66 static int target_gdb_fileio_end_default(struct target *target, int retcode,
67 int fileio_errno, bool ctrl_c);
69 /* targets */
70 extern struct target_type arm7tdmi_target;
71 extern struct target_type arm720t_target;
72 extern struct target_type arm9tdmi_target;
73 extern struct target_type arm920t_target;
74 extern struct target_type arm966e_target;
75 extern struct target_type arm946e_target;
76 extern struct target_type arm926ejs_target;
77 extern struct target_type fa526_target;
78 extern struct target_type feroceon_target;
79 extern struct target_type dragonite_target;
80 extern struct target_type xscale_target;
81 extern struct target_type xtensa_chip_target;
82 extern struct target_type cortexm_target;
83 extern struct target_type cortexa_target;
84 extern struct target_type aarch64_target;
85 extern struct target_type cortexr4_target;
86 extern struct target_type armv8r_target;
87 extern struct target_type arm11_target;
88 extern struct target_type ls1_sap_target;
89 extern struct target_type mips_m4k_target;
90 extern struct target_type mips_mips64_target;
91 extern struct target_type avr_target;
92 extern struct target_type dsp563xx_target;
93 extern struct target_type dsp5680xx_target;
94 extern struct target_type testee_target;
95 extern struct target_type avr32_ap7k_target;
96 extern struct target_type hla_target;
97 extern struct target_type esp32_target;
98 extern struct target_type esp32s2_target;
99 extern struct target_type esp32s3_target;
100 extern struct target_type or1k_target;
101 extern struct target_type quark_x10xx_target;
102 extern struct target_type quark_d20xx_target;
103 extern struct target_type stm8_target;
104 extern struct target_type riscv_target;
105 extern struct target_type mem_ap_target;
106 extern struct target_type esirisc_target;
107 extern struct target_type arcv2_target;
109 static struct target_type *target_types[] = {
110 &arm7tdmi_target,
111 &arm9tdmi_target,
112 &arm920t_target,
113 &arm720t_target,
114 &arm966e_target,
115 &arm946e_target,
116 &arm926ejs_target,
117 &fa526_target,
118 &feroceon_target,
119 &dragonite_target,
120 &xscale_target,
121 &xtensa_chip_target,
122 &cortexm_target,
123 &cortexa_target,
124 &cortexr4_target,
125 &arm11_target,
126 &ls1_sap_target,
127 &mips_m4k_target,
128 &avr_target,
129 &dsp563xx_target,
130 &dsp5680xx_target,
131 &testee_target,
132 &avr32_ap7k_target,
133 &hla_target,
134 &esp32_target,
135 &esp32s2_target,
136 &esp32s3_target,
137 &or1k_target,
138 &quark_x10xx_target,
139 &quark_d20xx_target,
140 &stm8_target,
141 &riscv_target,
142 &mem_ap_target,
143 &esirisc_target,
144 &arcv2_target,
145 &aarch64_target,
146 &armv8r_target,
147 &mips_mips64_target,
148 NULL,
151 struct target *all_targets;
152 static struct target_event_callback *target_event_callbacks;
153 static struct target_timer_callback *target_timer_callbacks;
154 static int64_t target_timer_next_event_value;
155 static LIST_HEAD(target_reset_callback_list);
156 static LIST_HEAD(target_trace_callback_list);
157 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
158 static LIST_HEAD(empty_smp_targets);
160 static const struct jim_nvp nvp_assert[] = {
161 { .name = "assert", NVP_ASSERT },
162 { .name = "deassert", NVP_DEASSERT },
163 { .name = "T", NVP_ASSERT },
164 { .name = "F", NVP_DEASSERT },
165 { .name = "t", NVP_ASSERT },
166 { .name = "f", NVP_DEASSERT },
167 { .name = NULL, .value = -1 }
170 static const struct nvp nvp_error_target[] = {
171 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
172 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
173 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
174 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
175 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
176 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
177 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
178 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
179 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
180 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
181 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
182 { .value = -1, .name = NULL }
185 static const char *target_strerror_safe(int err)
187 const struct nvp *n;
189 n = nvp_value2name(nvp_error_target, err);
190 if (!n->name)
191 return "unknown";
192 else
193 return n->name;
196 static const struct jim_nvp nvp_target_event[] = {
198 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
199 { .value = TARGET_EVENT_HALTED, .name = "halted" },
200 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
201 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
202 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
203 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
204 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
206 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
207 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
209 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
210 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
211 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
212 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
213 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
214 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
215 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
216 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
218 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
219 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
220 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
222 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
223 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
225 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
226 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
228 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
229 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
231 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
232 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
234 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
236 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X100, .name = "semihosting-user-cmd-0x100" },
237 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X101, .name = "semihosting-user-cmd-0x101" },
238 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X102, .name = "semihosting-user-cmd-0x102" },
239 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X103, .name = "semihosting-user-cmd-0x103" },
240 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X104, .name = "semihosting-user-cmd-0x104" },
241 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X105, .name = "semihosting-user-cmd-0x105" },
242 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X106, .name = "semihosting-user-cmd-0x106" },
243 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X107, .name = "semihosting-user-cmd-0x107" },
245 { .name = NULL, .value = -1 }
248 static const struct jim_nvp nvp_target_state[] = {
249 { .name = "unknown", .value = TARGET_UNKNOWN },
250 { .name = "running", .value = TARGET_RUNNING },
251 { .name = "halted", .value = TARGET_HALTED },
252 { .name = "reset", .value = TARGET_RESET },
253 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
254 { .name = NULL, .value = -1 },
257 static const struct nvp nvp_target_debug_reason[] = {
258 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
259 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
260 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
261 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
262 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
263 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
264 { .name = "program-exit", .value = DBG_REASON_EXIT },
265 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
266 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
267 { .name = NULL, .value = -1 },
270 static const struct jim_nvp nvp_target_endian[] = {
271 { .name = "big", .value = TARGET_BIG_ENDIAN },
272 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
273 { .name = "be", .value = TARGET_BIG_ENDIAN },
274 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
275 { .name = NULL, .value = -1 },
278 static const struct nvp nvp_reset_modes[] = {
279 { .name = "unknown", .value = RESET_UNKNOWN },
280 { .name = "run", .value = RESET_RUN },
281 { .name = "halt", .value = RESET_HALT },
282 { .name = "init", .value = RESET_INIT },
283 { .name = NULL, .value = -1 },
286 const char *debug_reason_name(struct target *t)
288 const char *cp;
290 cp = nvp_value2name(nvp_target_debug_reason,
291 t->debug_reason)->name;
292 if (!cp) {
293 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
294 cp = "(*BUG*unknown*BUG*)";
296 return cp;
299 const char *target_state_name(struct target *t)
301 const char *cp;
302 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
303 if (!cp) {
304 LOG_ERROR("Invalid target state: %d", (int)(t->state));
305 cp = "(*BUG*unknown*BUG*)";
308 if (!target_was_examined(t) && t->defer_examine)
309 cp = "examine deferred";
311 return cp;
314 const char *target_event_name(enum target_event event)
316 const char *cp;
317 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
318 if (!cp) {
319 LOG_ERROR("Invalid target event: %d", (int)(event));
320 cp = "(*BUG*unknown*BUG*)";
322 return cp;
325 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
327 const char *cp;
328 cp = nvp_value2name(nvp_reset_modes, reset_mode)->name;
329 if (!cp) {
330 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
331 cp = "(*BUG*unknown*BUG*)";
333 return cp;
336 /* determine the number of the new target */
337 static int new_target_number(void)
339 struct target *t;
340 int x;
342 /* number is 0 based */
343 x = -1;
344 t = all_targets;
345 while (t) {
346 if (x < t->target_number)
347 x = t->target_number;
348 t = t->next;
350 return x + 1;
353 static void append_to_list_all_targets(struct target *target)
355 struct target **t = &all_targets;
357 while (*t)
358 t = &((*t)->next);
359 *t = target;
362 /* read a uint64_t from a buffer in target memory endianness */
363 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
365 if (target->endianness == TARGET_LITTLE_ENDIAN)
366 return le_to_h_u64(buffer);
367 else
368 return be_to_h_u64(buffer);
371 /* read a uint32_t from a buffer in target memory endianness */
372 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
374 if (target->endianness == TARGET_LITTLE_ENDIAN)
375 return le_to_h_u32(buffer);
376 else
377 return be_to_h_u32(buffer);
380 /* read a uint24_t from a buffer in target memory endianness */
381 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
383 if (target->endianness == TARGET_LITTLE_ENDIAN)
384 return le_to_h_u24(buffer);
385 else
386 return be_to_h_u24(buffer);
389 /* read a uint16_t from a buffer in target memory endianness */
390 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
392 if (target->endianness == TARGET_LITTLE_ENDIAN)
393 return le_to_h_u16(buffer);
394 else
395 return be_to_h_u16(buffer);
398 /* write a uint64_t to a buffer in target memory endianness */
399 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
401 if (target->endianness == TARGET_LITTLE_ENDIAN)
402 h_u64_to_le(buffer, value);
403 else
404 h_u64_to_be(buffer, value);
407 /* write a uint32_t to a buffer in target memory endianness */
408 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
410 if (target->endianness == TARGET_LITTLE_ENDIAN)
411 h_u32_to_le(buffer, value);
412 else
413 h_u32_to_be(buffer, value);
416 /* write a uint24_t to a buffer in target memory endianness */
417 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
419 if (target->endianness == TARGET_LITTLE_ENDIAN)
420 h_u24_to_le(buffer, value);
421 else
422 h_u24_to_be(buffer, value);
425 /* write a uint16_t to a buffer in target memory endianness */
426 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
428 if (target->endianness == TARGET_LITTLE_ENDIAN)
429 h_u16_to_le(buffer, value);
430 else
431 h_u16_to_be(buffer, value);
434 /* write a uint8_t to a buffer in target memory endianness */
435 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
437 *buffer = value;
440 /* write a uint64_t array to a buffer in target memory endianness */
441 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
443 uint32_t i;
444 for (i = 0; i < count; i++)
445 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
448 /* write a uint32_t array to a buffer in target memory endianness */
449 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
451 uint32_t i;
452 for (i = 0; i < count; i++)
453 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
456 /* write a uint16_t array to a buffer in target memory endianness */
457 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
459 uint32_t i;
460 for (i = 0; i < count; i++)
461 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
464 /* write a uint64_t array to a buffer in target memory endianness */
465 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
467 uint32_t i;
468 for (i = 0; i < count; i++)
469 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
472 /* write a uint32_t array to a buffer in target memory endianness */
473 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
475 uint32_t i;
476 for (i = 0; i < count; i++)
477 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
480 /* write a uint16_t array to a buffer in target memory endianness */
481 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
483 uint32_t i;
484 for (i = 0; i < count; i++)
485 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
488 /* return a pointer to a configured target; id is name or number */
489 struct target *get_target(const char *id)
491 struct target *target;
493 /* try as tcltarget name */
494 for (target = all_targets; target; target = target->next) {
495 if (!target_name(target))
496 continue;
497 if (strcmp(id, target_name(target)) == 0)
498 return target;
501 /* It's OK to remove this fallback sometime after August 2010 or so */
503 /* no match, try as number */
504 unsigned num;
505 if (parse_uint(id, &num) != ERROR_OK)
506 return NULL;
508 for (target = all_targets; target; target = target->next) {
509 if (target->target_number == (int)num) {
510 LOG_WARNING("use '%s' as target identifier, not '%u'",
511 target_name(target), num);
512 return target;
516 return NULL;
519 /* returns a pointer to the n-th configured target */
520 struct target *get_target_by_num(int num)
522 struct target *target = all_targets;
524 while (target) {
525 if (target->target_number == num)
526 return target;
527 target = target->next;
530 return NULL;
533 struct target *get_current_target(struct command_context *cmd_ctx)
535 struct target *target = get_current_target_or_null(cmd_ctx);
537 if (!target) {
538 LOG_ERROR("BUG: current_target out of bounds");
539 exit(-1);
542 return target;
545 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
547 return cmd_ctx->current_target_override
548 ? cmd_ctx->current_target_override
549 : cmd_ctx->current_target;
552 int target_poll(struct target *target)
554 int retval;
556 /* We can't poll until after examine */
557 if (!target_was_examined(target)) {
558 /* Fail silently lest we pollute the log */
559 return ERROR_FAIL;
562 retval = target->type->poll(target);
563 if (retval != ERROR_OK)
564 return retval;
566 if (target->halt_issued) {
567 if (target->state == TARGET_HALTED)
568 target->halt_issued = false;
569 else {
570 int64_t t = timeval_ms() - target->halt_issued_time;
571 if (t > DEFAULT_HALT_TIMEOUT) {
572 target->halt_issued = false;
573 LOG_INFO("Halt timed out, wake up GDB.");
574 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
579 return ERROR_OK;
582 int target_halt(struct target *target)
584 int retval;
585 /* We can't poll until after examine */
586 if (!target_was_examined(target)) {
587 LOG_ERROR("Target not examined yet");
588 return ERROR_FAIL;
591 retval = target->type->halt(target);
592 if (retval != ERROR_OK)
593 return retval;
595 target->halt_issued = true;
596 target->halt_issued_time = timeval_ms();
598 return ERROR_OK;
602 * Make the target (re)start executing using its saved execution
603 * context (possibly with some modifications).
605 * @param target Which target should start executing.
606 * @param current True to use the target's saved program counter instead
607 * of the address parameter
608 * @param address Optionally used as the program counter.
609 * @param handle_breakpoints True iff breakpoints at the resumption PC
610 * should be skipped. (For example, maybe execution was stopped by
611 * such a breakpoint, in which case it would be counterproductive to
612 * let it re-trigger.
613 * @param debug_execution False if all working areas allocated by OpenOCD
614 * should be released and/or restored to their original contents.
615 * (This would for example be true to run some downloaded "helper"
616 * algorithm code, which resides in one such working buffer and uses
617 * another for data storage.)
619 * @todo Resolve the ambiguity about what the "debug_execution" flag
620 * signifies. For example, Target implementations don't agree on how
621 * it relates to invalidation of the register cache, or to whether
622 * breakpoints and watchpoints should be enabled. (It would seem wrong
623 * to enable breakpoints when running downloaded "helper" algorithms
624 * (debug_execution true), since the breakpoints would be set to match
625 * target firmware being debugged, not the helper algorithm.... and
626 * enabling them could cause such helpers to malfunction (for example,
627 * by overwriting data with a breakpoint instruction. On the other
628 * hand the infrastructure for running such helpers might use this
629 * procedure but rely on hardware breakpoint to detect termination.)
631 int target_resume(struct target *target, int current, target_addr_t address,
632 int handle_breakpoints, int debug_execution)
634 int retval;
636 /* We can't poll until after examine */
637 if (!target_was_examined(target)) {
638 LOG_ERROR("Target not examined yet");
639 return ERROR_FAIL;
642 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
644 /* note that resume *must* be asynchronous. The CPU can halt before
645 * we poll. The CPU can even halt at the current PC as a result of
646 * a software breakpoint being inserted by (a bug?) the application.
649 * resume() triggers the event 'resumed'. The execution of TCL commands
650 * in the event handler causes the polling of targets. If the target has
651 * already halted for a breakpoint, polling will run the 'halted' event
652 * handler before the pending 'resumed' handler.
653 * Disable polling during resume() to guarantee the execution of handlers
654 * in the correct order.
656 bool save_poll_mask = jtag_poll_mask();
657 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
658 jtag_poll_unmask(save_poll_mask);
660 if (retval != ERROR_OK)
661 return retval;
663 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
665 return retval;
668 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
670 char buf[100];
671 int retval;
672 const struct nvp *n;
673 n = nvp_value2name(nvp_reset_modes, reset_mode);
674 if (!n->name) {
675 LOG_ERROR("invalid reset mode");
676 return ERROR_FAIL;
679 struct target *target;
680 for (target = all_targets; target; target = target->next)
681 target_call_reset_callbacks(target, reset_mode);
683 /* disable polling during reset to make reset event scripts
684 * more predictable, i.e. dr/irscan & pathmove in events will
685 * not have JTAG operations injected into the middle of a sequence.
687 bool save_poll_mask = jtag_poll_mask();
689 sprintf(buf, "ocd_process_reset %s", n->name);
690 retval = Jim_Eval(cmd->ctx->interp, buf);
692 jtag_poll_unmask(save_poll_mask);
694 if (retval != JIM_OK) {
695 Jim_MakeErrorMessage(cmd->ctx->interp);
696 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
697 return ERROR_FAIL;
700 /* We want any events to be processed before the prompt */
701 retval = target_call_timer_callbacks_now();
703 for (target = all_targets; target; target = target->next) {
704 target->type->check_reset(target);
705 target->running_alg = false;
708 return retval;
711 static int identity_virt2phys(struct target *target,
712 target_addr_t virtual, target_addr_t *physical)
714 *physical = virtual;
715 return ERROR_OK;
718 static int no_mmu(struct target *target, int *enabled)
720 *enabled = 0;
721 return ERROR_OK;
725 * Reset the @c examined flag for the given target.
726 * Pure paranoia -- targets are zeroed on allocation.
728 static inline void target_reset_examined(struct target *target)
730 target->examined = false;
733 static int default_examine(struct target *target)
735 target_set_examined(target);
736 return ERROR_OK;
739 /* no check by default */
740 static int default_check_reset(struct target *target)
742 return ERROR_OK;
745 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
746 * Keep in sync */
747 int target_examine_one(struct target *target)
749 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
751 int retval = target->type->examine(target);
752 if (retval != ERROR_OK) {
753 target_reset_examined(target);
754 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
755 return retval;
758 target_set_examined(target);
759 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
761 return ERROR_OK;
764 static int jtag_enable_callback(enum jtag_event event, void *priv)
766 struct target *target = priv;
768 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
769 return ERROR_OK;
771 jtag_unregister_event_callback(jtag_enable_callback, target);
773 return target_examine_one(target);
776 /* Targets that correctly implement init + examine, i.e.
777 * no communication with target during init:
779 * XScale
781 int target_examine(void)
783 int retval = ERROR_OK;
784 struct target *target;
786 for (target = all_targets; target; target = target->next) {
787 /* defer examination, but don't skip it */
788 if (!target->tap->enabled) {
789 jtag_register_event_callback(jtag_enable_callback,
790 target);
791 continue;
794 if (target->defer_examine)
795 continue;
797 int retval2 = target_examine_one(target);
798 if (retval2 != ERROR_OK) {
799 LOG_WARNING("target %s examination failed", target_name(target));
800 retval = retval2;
803 return retval;
806 const char *target_type_name(struct target *target)
808 return target->type->name;
811 static int target_soft_reset_halt(struct target *target)
813 if (!target_was_examined(target)) {
814 LOG_ERROR("Target not examined yet");
815 return ERROR_FAIL;
817 if (!target->type->soft_reset_halt) {
818 LOG_ERROR("Target %s does not support soft_reset_halt",
819 target_name(target));
820 return ERROR_FAIL;
822 return target->type->soft_reset_halt(target);
826 * Downloads a target-specific native code algorithm to the target,
827 * and executes it. * Note that some targets may need to set up, enable,
828 * and tear down a breakpoint (hard or * soft) to detect algorithm
829 * termination, while others may support lower overhead schemes where
830 * soft breakpoints embedded in the algorithm automatically terminate the
831 * algorithm.
833 * @param target used to run the algorithm
834 * @param num_mem_params
835 * @param mem_params
836 * @param num_reg_params
837 * @param reg_param
838 * @param entry_point
839 * @param exit_point
840 * @param timeout_ms
841 * @param arch_info target-specific description of the algorithm.
843 int target_run_algorithm(struct target *target,
844 int num_mem_params, struct mem_param *mem_params,
845 int num_reg_params, struct reg_param *reg_param,
846 target_addr_t entry_point, target_addr_t exit_point,
847 int timeout_ms, void *arch_info)
849 int retval = ERROR_FAIL;
851 if (!target_was_examined(target)) {
852 LOG_ERROR("Target not examined yet");
853 goto done;
855 if (!target->type->run_algorithm) {
856 LOG_ERROR("Target type '%s' does not support %s",
857 target_type_name(target), __func__);
858 goto done;
861 target->running_alg = true;
862 retval = target->type->run_algorithm(target,
863 num_mem_params, mem_params,
864 num_reg_params, reg_param,
865 entry_point, exit_point, timeout_ms, arch_info);
866 target->running_alg = false;
868 done:
869 return retval;
873 * Executes a target-specific native code algorithm and leaves it running.
875 * @param target used to run the algorithm
876 * @param num_mem_params
877 * @param mem_params
878 * @param num_reg_params
879 * @param reg_params
880 * @param entry_point
881 * @param exit_point
882 * @param arch_info target-specific description of the algorithm.
884 int target_start_algorithm(struct target *target,
885 int num_mem_params, struct mem_param *mem_params,
886 int num_reg_params, struct reg_param *reg_params,
887 target_addr_t entry_point, target_addr_t exit_point,
888 void *arch_info)
890 int retval = ERROR_FAIL;
892 if (!target_was_examined(target)) {
893 LOG_ERROR("Target not examined yet");
894 goto done;
896 if (!target->type->start_algorithm) {
897 LOG_ERROR("Target type '%s' does not support %s",
898 target_type_name(target), __func__);
899 goto done;
901 if (target->running_alg) {
902 LOG_ERROR("Target is already running an algorithm");
903 goto done;
906 target->running_alg = true;
907 retval = target->type->start_algorithm(target,
908 num_mem_params, mem_params,
909 num_reg_params, reg_params,
910 entry_point, exit_point, arch_info);
912 done:
913 return retval;
917 * Waits for an algorithm started with target_start_algorithm() to complete.
919 * @param target used to run the algorithm
920 * @param num_mem_params
921 * @param mem_params
922 * @param num_reg_params
923 * @param reg_params
924 * @param exit_point
925 * @param timeout_ms
926 * @param arch_info target-specific description of the algorithm.
928 int target_wait_algorithm(struct target *target,
929 int num_mem_params, struct mem_param *mem_params,
930 int num_reg_params, struct reg_param *reg_params,
931 target_addr_t exit_point, int timeout_ms,
932 void *arch_info)
934 int retval = ERROR_FAIL;
936 if (!target->type->wait_algorithm) {
937 LOG_ERROR("Target type '%s' does not support %s",
938 target_type_name(target), __func__);
939 goto done;
941 if (!target->running_alg) {
942 LOG_ERROR("Target is not running an algorithm");
943 goto done;
946 retval = target->type->wait_algorithm(target,
947 num_mem_params, mem_params,
948 num_reg_params, reg_params,
949 exit_point, timeout_ms, arch_info);
950 if (retval != ERROR_TARGET_TIMEOUT)
951 target->running_alg = false;
953 done:
954 return retval;
958 * Streams data to a circular buffer on target intended for consumption by code
959 * running asynchronously on target.
961 * This is intended for applications where target-specific native code runs
962 * on the target, receives data from the circular buffer, does something with
963 * it (most likely writing it to a flash memory), and advances the circular
964 * buffer pointer.
966 * This assumes that the helper algorithm has already been loaded to the target,
967 * but has not been started yet. Given memory and register parameters are passed
968 * to the algorithm.
970 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
971 * following format:
973 * [buffer_start + 0, buffer_start + 4):
974 * Write Pointer address (aka head). Written and updated by this
975 * routine when new data is written to the circular buffer.
976 * [buffer_start + 4, buffer_start + 8):
977 * Read Pointer address (aka tail). Updated by code running on the
978 * target after it consumes data.
979 * [buffer_start + 8, buffer_start + buffer_size):
980 * Circular buffer contents.
982 * See contrib/loaders/flash/stm32f1x.S for an example.
984 * @param target used to run the algorithm
985 * @param buffer address on the host where data to be sent is located
986 * @param count number of blocks to send
987 * @param block_size size in bytes of each block
988 * @param num_mem_params count of memory-based params to pass to algorithm
989 * @param mem_params memory-based params to pass to algorithm
990 * @param num_reg_params count of register-based params to pass to algorithm
991 * @param reg_params memory-based params to pass to algorithm
992 * @param buffer_start address on the target of the circular buffer structure
993 * @param buffer_size size of the circular buffer structure
994 * @param entry_point address on the target to execute to start the algorithm
995 * @param exit_point address at which to set a breakpoint to catch the
996 * end of the algorithm; can be 0 if target triggers a breakpoint itself
997 * @param arch_info
1000 int target_run_flash_async_algorithm(struct target *target,
1001 const uint8_t *buffer, uint32_t count, int block_size,
1002 int num_mem_params, struct mem_param *mem_params,
1003 int num_reg_params, struct reg_param *reg_params,
1004 uint32_t buffer_start, uint32_t buffer_size,
1005 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1007 int retval;
1008 int timeout = 0;
1010 const uint8_t *buffer_orig = buffer;
1012 /* Set up working area. First word is write pointer, second word is read pointer,
1013 * rest is fifo data area. */
1014 uint32_t wp_addr = buffer_start;
1015 uint32_t rp_addr = buffer_start + 4;
1016 uint32_t fifo_start_addr = buffer_start + 8;
1017 uint32_t fifo_end_addr = buffer_start + buffer_size;
1019 uint32_t wp = fifo_start_addr;
1020 uint32_t rp = fifo_start_addr;
1022 /* validate block_size is 2^n */
1023 assert(IS_PWR_OF_2(block_size));
1025 retval = target_write_u32(target, wp_addr, wp);
1026 if (retval != ERROR_OK)
1027 return retval;
1028 retval = target_write_u32(target, rp_addr, rp);
1029 if (retval != ERROR_OK)
1030 return retval;
1032 /* Start up algorithm on target and let it idle while writing the first chunk */
1033 retval = target_start_algorithm(target, num_mem_params, mem_params,
1034 num_reg_params, reg_params,
1035 entry_point,
1036 exit_point,
1037 arch_info);
1039 if (retval != ERROR_OK) {
1040 LOG_ERROR("error starting target flash write algorithm");
1041 return retval;
1044 while (count > 0) {
1046 retval = target_read_u32(target, rp_addr, &rp);
1047 if (retval != ERROR_OK) {
1048 LOG_ERROR("failed to get read pointer");
1049 break;
1052 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1053 (size_t) (buffer - buffer_orig), count, wp, rp);
1055 if (rp == 0) {
1056 LOG_ERROR("flash write algorithm aborted by target");
1057 retval = ERROR_FLASH_OPERATION_FAILED;
1058 break;
1061 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1062 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1063 break;
1066 /* Count the number of bytes available in the fifo without
1067 * crossing the wrap around. Make sure to not fill it completely,
1068 * because that would make wp == rp and that's the empty condition. */
1069 uint32_t thisrun_bytes;
1070 if (rp > wp)
1071 thisrun_bytes = rp - wp - block_size;
1072 else if (rp > fifo_start_addr)
1073 thisrun_bytes = fifo_end_addr - wp;
1074 else
1075 thisrun_bytes = fifo_end_addr - wp - block_size;
1077 if (thisrun_bytes == 0) {
1078 /* Throttle polling a bit if transfer is (much) faster than flash
1079 * programming. The exact delay shouldn't matter as long as it's
1080 * less than buffer size / flash speed. This is very unlikely to
1081 * run when using high latency connections such as USB. */
1082 alive_sleep(2);
1084 /* to stop an infinite loop on some targets check and increment a timeout
1085 * this issue was observed on a stellaris using the new ICDI interface */
1086 if (timeout++ >= 2500) {
1087 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1088 return ERROR_FLASH_OPERATION_FAILED;
1090 continue;
1093 /* reset our timeout */
1094 timeout = 0;
1096 /* Limit to the amount of data we actually want to write */
1097 if (thisrun_bytes > count * block_size)
1098 thisrun_bytes = count * block_size;
1100 /* Force end of large blocks to be word aligned */
1101 if (thisrun_bytes >= 16)
1102 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1104 /* Write data to fifo */
1105 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1106 if (retval != ERROR_OK)
1107 break;
1109 /* Update counters and wrap write pointer */
1110 buffer += thisrun_bytes;
1111 count -= thisrun_bytes / block_size;
1112 wp += thisrun_bytes;
1113 if (wp >= fifo_end_addr)
1114 wp = fifo_start_addr;
1116 /* Store updated write pointer to target */
1117 retval = target_write_u32(target, wp_addr, wp);
1118 if (retval != ERROR_OK)
1119 break;
1121 /* Avoid GDB timeouts */
1122 keep_alive();
1125 if (retval != ERROR_OK) {
1126 /* abort flash write algorithm on target */
1127 target_write_u32(target, wp_addr, 0);
1130 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1131 num_reg_params, reg_params,
1132 exit_point,
1133 10000,
1134 arch_info);
1136 if (retval2 != ERROR_OK) {
1137 LOG_ERROR("error waiting for target flash write algorithm");
1138 retval = retval2;
1141 if (retval == ERROR_OK) {
1142 /* check if algorithm set rp = 0 after fifo writer loop finished */
1143 retval = target_read_u32(target, rp_addr, &rp);
1144 if (retval == ERROR_OK && rp == 0) {
1145 LOG_ERROR("flash write algorithm aborted by target");
1146 retval = ERROR_FLASH_OPERATION_FAILED;
1150 return retval;
1153 int target_run_read_async_algorithm(struct target *target,
1154 uint8_t *buffer, uint32_t count, int block_size,
1155 int num_mem_params, struct mem_param *mem_params,
1156 int num_reg_params, struct reg_param *reg_params,
1157 uint32_t buffer_start, uint32_t buffer_size,
1158 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1160 int retval;
1161 int timeout = 0;
1163 const uint8_t *buffer_orig = buffer;
1165 /* Set up working area. First word is write pointer, second word is read pointer,
1166 * rest is fifo data area. */
1167 uint32_t wp_addr = buffer_start;
1168 uint32_t rp_addr = buffer_start + 4;
1169 uint32_t fifo_start_addr = buffer_start + 8;
1170 uint32_t fifo_end_addr = buffer_start + buffer_size;
1172 uint32_t wp = fifo_start_addr;
1173 uint32_t rp = fifo_start_addr;
1175 /* validate block_size is 2^n */
1176 assert(IS_PWR_OF_2(block_size));
1178 retval = target_write_u32(target, wp_addr, wp);
1179 if (retval != ERROR_OK)
1180 return retval;
1181 retval = target_write_u32(target, rp_addr, rp);
1182 if (retval != ERROR_OK)
1183 return retval;
1185 /* Start up algorithm on target */
1186 retval = target_start_algorithm(target, num_mem_params, mem_params,
1187 num_reg_params, reg_params,
1188 entry_point,
1189 exit_point,
1190 arch_info);
1192 if (retval != ERROR_OK) {
1193 LOG_ERROR("error starting target flash read algorithm");
1194 return retval;
1197 while (count > 0) {
1198 retval = target_read_u32(target, wp_addr, &wp);
1199 if (retval != ERROR_OK) {
1200 LOG_ERROR("failed to get write pointer");
1201 break;
1204 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1205 (size_t)(buffer - buffer_orig), count, wp, rp);
1207 if (wp == 0) {
1208 LOG_ERROR("flash read algorithm aborted by target");
1209 retval = ERROR_FLASH_OPERATION_FAILED;
1210 break;
1213 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1214 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1215 break;
1218 /* Count the number of bytes available in the fifo without
1219 * crossing the wrap around. */
1220 uint32_t thisrun_bytes;
1221 if (wp >= rp)
1222 thisrun_bytes = wp - rp;
1223 else
1224 thisrun_bytes = fifo_end_addr - rp;
1226 if (thisrun_bytes == 0) {
1227 /* Throttle polling a bit if transfer is (much) faster than flash
1228 * reading. The exact delay shouldn't matter as long as it's
1229 * less than buffer size / flash speed. This is very unlikely to
1230 * run when using high latency connections such as USB. */
1231 alive_sleep(2);
1233 /* to stop an infinite loop on some targets check and increment a timeout
1234 * this issue was observed on a stellaris using the new ICDI interface */
1235 if (timeout++ >= 2500) {
1236 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1237 return ERROR_FLASH_OPERATION_FAILED;
1239 continue;
1242 /* Reset our timeout */
1243 timeout = 0;
1245 /* Limit to the amount of data we actually want to read */
1246 if (thisrun_bytes > count * block_size)
1247 thisrun_bytes = count * block_size;
1249 /* Force end of large blocks to be word aligned */
1250 if (thisrun_bytes >= 16)
1251 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1253 /* Read data from fifo */
1254 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1255 if (retval != ERROR_OK)
1256 break;
1258 /* Update counters and wrap write pointer */
1259 buffer += thisrun_bytes;
1260 count -= thisrun_bytes / block_size;
1261 rp += thisrun_bytes;
1262 if (rp >= fifo_end_addr)
1263 rp = fifo_start_addr;
1265 /* Store updated write pointer to target */
1266 retval = target_write_u32(target, rp_addr, rp);
1267 if (retval != ERROR_OK)
1268 break;
1270 /* Avoid GDB timeouts */
1271 keep_alive();
1275 if (retval != ERROR_OK) {
1276 /* abort flash write algorithm on target */
1277 target_write_u32(target, rp_addr, 0);
1280 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1281 num_reg_params, reg_params,
1282 exit_point,
1283 10000,
1284 arch_info);
1286 if (retval2 != ERROR_OK) {
1287 LOG_ERROR("error waiting for target flash write algorithm");
1288 retval = retval2;
1291 if (retval == ERROR_OK) {
1292 /* check if algorithm set wp = 0 after fifo writer loop finished */
1293 retval = target_read_u32(target, wp_addr, &wp);
1294 if (retval == ERROR_OK && wp == 0) {
1295 LOG_ERROR("flash read algorithm aborted by target");
1296 retval = ERROR_FLASH_OPERATION_FAILED;
1300 return retval;
1303 int target_read_memory(struct target *target,
1304 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1306 if (!target_was_examined(target)) {
1307 LOG_ERROR("Target not examined yet");
1308 return ERROR_FAIL;
1310 if (!target->type->read_memory) {
1311 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1312 return ERROR_FAIL;
1314 return target->type->read_memory(target, address, size, count, buffer);
1317 int target_read_phys_memory(struct target *target,
1318 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1320 if (!target_was_examined(target)) {
1321 LOG_ERROR("Target not examined yet");
1322 return ERROR_FAIL;
1324 if (!target->type->read_phys_memory) {
1325 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1326 return ERROR_FAIL;
1328 return target->type->read_phys_memory(target, address, size, count, buffer);
1331 int target_write_memory(struct target *target,
1332 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1334 if (!target_was_examined(target)) {
1335 LOG_ERROR("Target not examined yet");
1336 return ERROR_FAIL;
1338 if (!target->type->write_memory) {
1339 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1340 return ERROR_FAIL;
1342 return target->type->write_memory(target, address, size, count, buffer);
1345 int target_write_phys_memory(struct target *target,
1346 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1348 if (!target_was_examined(target)) {
1349 LOG_ERROR("Target not examined yet");
1350 return ERROR_FAIL;
1352 if (!target->type->write_phys_memory) {
1353 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1354 return ERROR_FAIL;
1356 return target->type->write_phys_memory(target, address, size, count, buffer);
1359 int target_add_breakpoint(struct target *target,
1360 struct breakpoint *breakpoint)
1362 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1363 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1364 return ERROR_TARGET_NOT_HALTED;
1366 return target->type->add_breakpoint(target, breakpoint);
1369 int target_add_context_breakpoint(struct target *target,
1370 struct breakpoint *breakpoint)
1372 if (target->state != TARGET_HALTED) {
1373 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1374 return ERROR_TARGET_NOT_HALTED;
1376 return target->type->add_context_breakpoint(target, breakpoint);
1379 int target_add_hybrid_breakpoint(struct target *target,
1380 struct breakpoint *breakpoint)
1382 if (target->state != TARGET_HALTED) {
1383 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1384 return ERROR_TARGET_NOT_HALTED;
1386 return target->type->add_hybrid_breakpoint(target, breakpoint);
1389 int target_remove_breakpoint(struct target *target,
1390 struct breakpoint *breakpoint)
1392 return target->type->remove_breakpoint(target, breakpoint);
1395 int target_add_watchpoint(struct target *target,
1396 struct watchpoint *watchpoint)
1398 if (target->state != TARGET_HALTED) {
1399 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1400 return ERROR_TARGET_NOT_HALTED;
1402 return target->type->add_watchpoint(target, watchpoint);
1404 int target_remove_watchpoint(struct target *target,
1405 struct watchpoint *watchpoint)
1407 return target->type->remove_watchpoint(target, watchpoint);
1409 int target_hit_watchpoint(struct target *target,
1410 struct watchpoint **hit_watchpoint)
1412 if (target->state != TARGET_HALTED) {
1413 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1414 return ERROR_TARGET_NOT_HALTED;
1417 if (!target->type->hit_watchpoint) {
1418 /* For backward compatible, if hit_watchpoint is not implemented,
1419 * return ERROR_FAIL such that gdb_server will not take the nonsense
1420 * information. */
1421 return ERROR_FAIL;
1424 return target->type->hit_watchpoint(target, hit_watchpoint);
1427 const char *target_get_gdb_arch(struct target *target)
1429 if (!target->type->get_gdb_arch)
1430 return NULL;
1431 return target->type->get_gdb_arch(target);
1434 int target_get_gdb_reg_list(struct target *target,
1435 struct reg **reg_list[], int *reg_list_size,
1436 enum target_register_class reg_class)
1438 int result = ERROR_FAIL;
1440 if (!target_was_examined(target)) {
1441 LOG_ERROR("Target not examined yet");
1442 goto done;
1445 result = target->type->get_gdb_reg_list(target, reg_list,
1446 reg_list_size, reg_class);
1448 done:
1449 if (result != ERROR_OK) {
1450 *reg_list = NULL;
1451 *reg_list_size = 0;
1453 return result;
1456 int target_get_gdb_reg_list_noread(struct target *target,
1457 struct reg **reg_list[], int *reg_list_size,
1458 enum target_register_class reg_class)
1460 if (target->type->get_gdb_reg_list_noread &&
1461 target->type->get_gdb_reg_list_noread(target, reg_list,
1462 reg_list_size, reg_class) == ERROR_OK)
1463 return ERROR_OK;
1464 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1467 bool target_supports_gdb_connection(struct target *target)
1470 * exclude all the targets that don't provide get_gdb_reg_list
1471 * or that have explicit gdb_max_connection == 0
1473 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1476 int target_step(struct target *target,
1477 int current, target_addr_t address, int handle_breakpoints)
1479 int retval;
1481 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1483 retval = target->type->step(target, current, address, handle_breakpoints);
1484 if (retval != ERROR_OK)
1485 return retval;
1487 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1489 return retval;
1492 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1494 if (target->state != TARGET_HALTED) {
1495 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1496 return ERROR_TARGET_NOT_HALTED;
1498 return target->type->get_gdb_fileio_info(target, fileio_info);
1501 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1503 if (target->state != TARGET_HALTED) {
1504 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1505 return ERROR_TARGET_NOT_HALTED;
1507 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1510 target_addr_t target_address_max(struct target *target)
1512 unsigned bits = target_address_bits(target);
1513 if (sizeof(target_addr_t) * 8 == bits)
1514 return (target_addr_t) -1;
1515 else
1516 return (((target_addr_t) 1) << bits) - 1;
1519 unsigned target_address_bits(struct target *target)
1521 if (target->type->address_bits)
1522 return target->type->address_bits(target);
1523 return 32;
1526 unsigned int target_data_bits(struct target *target)
1528 if (target->type->data_bits)
1529 return target->type->data_bits(target);
1530 return 32;
1533 static int target_profiling(struct target *target, uint32_t *samples,
1534 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1536 return target->type->profiling(target, samples, max_num_samples,
1537 num_samples, seconds);
1540 static int handle_target(void *priv);
1542 static int target_init_one(struct command_context *cmd_ctx,
1543 struct target *target)
1545 target_reset_examined(target);
1547 struct target_type *type = target->type;
1548 if (!type->examine)
1549 type->examine = default_examine;
1551 if (!type->check_reset)
1552 type->check_reset = default_check_reset;
1554 assert(type->init_target);
1556 int retval = type->init_target(cmd_ctx, target);
1557 if (retval != ERROR_OK) {
1558 LOG_ERROR("target '%s' init failed", target_name(target));
1559 return retval;
1562 /* Sanity-check MMU support ... stub in what we must, to help
1563 * implement it in stages, but warn if we need to do so.
1565 if (type->mmu) {
1566 if (!type->virt2phys) {
1567 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1568 type->virt2phys = identity_virt2phys;
1570 } else {
1571 /* Make sure no-MMU targets all behave the same: make no
1572 * distinction between physical and virtual addresses, and
1573 * ensure that virt2phys() is always an identity mapping.
1575 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1576 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1578 type->mmu = no_mmu;
1579 type->write_phys_memory = type->write_memory;
1580 type->read_phys_memory = type->read_memory;
1581 type->virt2phys = identity_virt2phys;
1584 if (!target->type->read_buffer)
1585 target->type->read_buffer = target_read_buffer_default;
1587 if (!target->type->write_buffer)
1588 target->type->write_buffer = target_write_buffer_default;
1590 if (!target->type->get_gdb_fileio_info)
1591 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1593 if (!target->type->gdb_fileio_end)
1594 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1596 if (!target->type->profiling)
1597 target->type->profiling = target_profiling_default;
1599 return ERROR_OK;
1602 static int target_init(struct command_context *cmd_ctx)
1604 struct target *target;
1605 int retval;
1607 for (target = all_targets; target; target = target->next) {
1608 retval = target_init_one(cmd_ctx, target);
1609 if (retval != ERROR_OK)
1610 return retval;
1613 if (!all_targets)
1614 return ERROR_OK;
1616 retval = target_register_user_commands(cmd_ctx);
1617 if (retval != ERROR_OK)
1618 return retval;
1620 retval = target_register_timer_callback(&handle_target,
1621 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1622 if (retval != ERROR_OK)
1623 return retval;
1625 return ERROR_OK;
1628 COMMAND_HANDLER(handle_target_init_command)
1630 int retval;
1632 if (CMD_ARGC != 0)
1633 return ERROR_COMMAND_SYNTAX_ERROR;
1635 static bool target_initialized;
1636 if (target_initialized) {
1637 LOG_INFO("'target init' has already been called");
1638 return ERROR_OK;
1640 target_initialized = true;
1642 retval = command_run_line(CMD_CTX, "init_targets");
1643 if (retval != ERROR_OK)
1644 return retval;
1646 retval = command_run_line(CMD_CTX, "init_target_events");
1647 if (retval != ERROR_OK)
1648 return retval;
1650 retval = command_run_line(CMD_CTX, "init_board");
1651 if (retval != ERROR_OK)
1652 return retval;
1654 LOG_DEBUG("Initializing targets...");
1655 return target_init(CMD_CTX);
1658 int target_register_event_callback(int (*callback)(struct target *target,
1659 enum target_event event, void *priv), void *priv)
1661 struct target_event_callback **callbacks_p = &target_event_callbacks;
1663 if (!callback)
1664 return ERROR_COMMAND_SYNTAX_ERROR;
1666 if (*callbacks_p) {
1667 while ((*callbacks_p)->next)
1668 callbacks_p = &((*callbacks_p)->next);
1669 callbacks_p = &((*callbacks_p)->next);
1672 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1673 (*callbacks_p)->callback = callback;
1674 (*callbacks_p)->priv = priv;
1675 (*callbacks_p)->next = NULL;
1677 return ERROR_OK;
1680 int target_register_reset_callback(int (*callback)(struct target *target,
1681 enum target_reset_mode reset_mode, void *priv), void *priv)
1683 struct target_reset_callback *entry;
1685 if (!callback)
1686 return ERROR_COMMAND_SYNTAX_ERROR;
1688 entry = malloc(sizeof(struct target_reset_callback));
1689 if (!entry) {
1690 LOG_ERROR("error allocating buffer for reset callback entry");
1691 return ERROR_COMMAND_SYNTAX_ERROR;
1694 entry->callback = callback;
1695 entry->priv = priv;
1696 list_add(&entry->list, &target_reset_callback_list);
1699 return ERROR_OK;
1702 int target_register_trace_callback(int (*callback)(struct target *target,
1703 size_t len, uint8_t *data, void *priv), void *priv)
1705 struct target_trace_callback *entry;
1707 if (!callback)
1708 return ERROR_COMMAND_SYNTAX_ERROR;
1710 entry = malloc(sizeof(struct target_trace_callback));
1711 if (!entry) {
1712 LOG_ERROR("error allocating buffer for trace callback entry");
1713 return ERROR_COMMAND_SYNTAX_ERROR;
1716 entry->callback = callback;
1717 entry->priv = priv;
1718 list_add(&entry->list, &target_trace_callback_list);
1721 return ERROR_OK;
1724 int target_register_timer_callback(int (*callback)(void *priv),
1725 unsigned int time_ms, enum target_timer_type type, void *priv)
1727 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1729 if (!callback)
1730 return ERROR_COMMAND_SYNTAX_ERROR;
1732 if (*callbacks_p) {
1733 while ((*callbacks_p)->next)
1734 callbacks_p = &((*callbacks_p)->next);
1735 callbacks_p = &((*callbacks_p)->next);
1738 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1739 (*callbacks_p)->callback = callback;
1740 (*callbacks_p)->type = type;
1741 (*callbacks_p)->time_ms = time_ms;
1742 (*callbacks_p)->removed = false;
1744 (*callbacks_p)->when = timeval_ms() + time_ms;
1745 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1747 (*callbacks_p)->priv = priv;
1748 (*callbacks_p)->next = NULL;
1750 return ERROR_OK;
1753 int target_unregister_event_callback(int (*callback)(struct target *target,
1754 enum target_event event, void *priv), void *priv)
1756 struct target_event_callback **p = &target_event_callbacks;
1757 struct target_event_callback *c = target_event_callbacks;
1759 if (!callback)
1760 return ERROR_COMMAND_SYNTAX_ERROR;
1762 while (c) {
1763 struct target_event_callback *next = c->next;
1764 if ((c->callback == callback) && (c->priv == priv)) {
1765 *p = next;
1766 free(c);
1767 return ERROR_OK;
1768 } else
1769 p = &(c->next);
1770 c = next;
1773 return ERROR_OK;
1776 int target_unregister_reset_callback(int (*callback)(struct target *target,
1777 enum target_reset_mode reset_mode, void *priv), void *priv)
1779 struct target_reset_callback *entry;
1781 if (!callback)
1782 return ERROR_COMMAND_SYNTAX_ERROR;
1784 list_for_each_entry(entry, &target_reset_callback_list, list) {
1785 if (entry->callback == callback && entry->priv == priv) {
1786 list_del(&entry->list);
1787 free(entry);
1788 break;
1792 return ERROR_OK;
1795 int target_unregister_trace_callback(int (*callback)(struct target *target,
1796 size_t len, uint8_t *data, void *priv), void *priv)
1798 struct target_trace_callback *entry;
1800 if (!callback)
1801 return ERROR_COMMAND_SYNTAX_ERROR;
1803 list_for_each_entry(entry, &target_trace_callback_list, list) {
1804 if (entry->callback == callback && entry->priv == priv) {
1805 list_del(&entry->list);
1806 free(entry);
1807 break;
1811 return ERROR_OK;
1814 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1816 if (!callback)
1817 return ERROR_COMMAND_SYNTAX_ERROR;
1819 for (struct target_timer_callback *c = target_timer_callbacks;
1820 c; c = c->next) {
1821 if ((c->callback == callback) && (c->priv == priv)) {
1822 c->removed = true;
1823 return ERROR_OK;
1827 return ERROR_FAIL;
1830 int target_call_event_callbacks(struct target *target, enum target_event event)
1832 struct target_event_callback *callback = target_event_callbacks;
1833 struct target_event_callback *next_callback;
1835 if (event == TARGET_EVENT_HALTED) {
1836 /* execute early halted first */
1837 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1840 LOG_DEBUG("target event %i (%s) for core %s", event,
1841 target_event_name(event),
1842 target_name(target));
1844 target_handle_event(target, event);
1846 while (callback) {
1847 next_callback = callback->next;
1848 callback->callback(target, event, callback->priv);
1849 callback = next_callback;
1852 return ERROR_OK;
1855 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1857 struct target_reset_callback *callback;
1859 LOG_DEBUG("target reset %i (%s)", reset_mode,
1860 nvp_value2name(nvp_reset_modes, reset_mode)->name);
1862 list_for_each_entry(callback, &target_reset_callback_list, list)
1863 callback->callback(target, reset_mode, callback->priv);
1865 return ERROR_OK;
1868 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1870 struct target_trace_callback *callback;
1872 list_for_each_entry(callback, &target_trace_callback_list, list)
1873 callback->callback(target, len, data, callback->priv);
1875 return ERROR_OK;
1878 static int target_timer_callback_periodic_restart(
1879 struct target_timer_callback *cb, int64_t *now)
1881 cb->when = *now + cb->time_ms;
1882 return ERROR_OK;
1885 static int target_call_timer_callback(struct target_timer_callback *cb,
1886 int64_t *now)
1888 cb->callback(cb->priv);
1890 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1891 return target_timer_callback_periodic_restart(cb, now);
1893 return target_unregister_timer_callback(cb->callback, cb->priv);
1896 static int target_call_timer_callbacks_check_time(int checktime)
1898 static bool callback_processing;
1900 /* Do not allow nesting */
1901 if (callback_processing)
1902 return ERROR_OK;
1904 callback_processing = true;
1906 keep_alive();
1908 int64_t now = timeval_ms();
1910 /* Initialize to a default value that's a ways into the future.
1911 * The loop below will make it closer to now if there are
1912 * callbacks that want to be called sooner. */
1913 target_timer_next_event_value = now + 1000;
1915 /* Store an address of the place containing a pointer to the
1916 * next item; initially, that's a standalone "root of the
1917 * list" variable. */
1918 struct target_timer_callback **callback = &target_timer_callbacks;
1919 while (callback && *callback) {
1920 if ((*callback)->removed) {
1921 struct target_timer_callback *p = *callback;
1922 *callback = (*callback)->next;
1923 free(p);
1924 continue;
1927 bool call_it = (*callback)->callback &&
1928 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1929 now >= (*callback)->when);
1931 if (call_it)
1932 target_call_timer_callback(*callback, &now);
1934 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1935 target_timer_next_event_value = (*callback)->when;
1937 callback = &(*callback)->next;
1940 callback_processing = false;
1941 return ERROR_OK;
1944 int target_call_timer_callbacks(void)
1946 return target_call_timer_callbacks_check_time(1);
1949 /* invoke periodic callbacks immediately */
1950 int target_call_timer_callbacks_now(void)
1952 return target_call_timer_callbacks_check_time(0);
1955 int64_t target_timer_next_event(void)
1957 return target_timer_next_event_value;
1960 /* Prints the working area layout for debug purposes */
1961 static void print_wa_layout(struct target *target)
1963 struct working_area *c = target->working_areas;
1965 while (c) {
1966 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1967 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1968 c->address, c->address + c->size - 1, c->size);
1969 c = c->next;
1973 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1974 static void target_split_working_area(struct working_area *area, uint32_t size)
1976 assert(area->free); /* Shouldn't split an allocated area */
1977 assert(size <= area->size); /* Caller should guarantee this */
1979 /* Split only if not already the right size */
1980 if (size < area->size) {
1981 struct working_area *new_wa = malloc(sizeof(*new_wa));
1983 if (!new_wa)
1984 return;
1986 new_wa->next = area->next;
1987 new_wa->size = area->size - size;
1988 new_wa->address = area->address + size;
1989 new_wa->backup = NULL;
1990 new_wa->user = NULL;
1991 new_wa->free = true;
1993 area->next = new_wa;
1994 area->size = size;
1996 /* If backup memory was allocated to this area, it has the wrong size
1997 * now so free it and it will be reallocated if/when needed */
1998 free(area->backup);
1999 area->backup = NULL;
2003 /* Merge all adjacent free areas into one */
2004 static void target_merge_working_areas(struct target *target)
2006 struct working_area *c = target->working_areas;
2008 while (c && c->next) {
2009 assert(c->next->address == c->address + c->size); /* This is an invariant */
2011 /* Find two adjacent free areas */
2012 if (c->free && c->next->free) {
2013 /* Merge the last into the first */
2014 c->size += c->next->size;
2016 /* Remove the last */
2017 struct working_area *to_be_freed = c->next;
2018 c->next = c->next->next;
2019 free(to_be_freed->backup);
2020 free(to_be_freed);
2022 /* If backup memory was allocated to the remaining area, it's has
2023 * the wrong size now */
2024 free(c->backup);
2025 c->backup = NULL;
2026 } else {
2027 c = c->next;
2032 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2034 /* Reevaluate working area address based on MMU state*/
2035 if (!target->working_areas) {
2036 int retval;
2037 int enabled;
2039 retval = target->type->mmu(target, &enabled);
2040 if (retval != ERROR_OK)
2041 return retval;
2043 if (!enabled) {
2044 if (target->working_area_phys_spec) {
2045 LOG_DEBUG("MMU disabled, using physical "
2046 "address for working memory " TARGET_ADDR_FMT,
2047 target->working_area_phys);
2048 target->working_area = target->working_area_phys;
2049 } else {
2050 LOG_ERROR("No working memory available. "
2051 "Specify -work-area-phys to target.");
2052 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2054 } else {
2055 if (target->working_area_virt_spec) {
2056 LOG_DEBUG("MMU enabled, using virtual "
2057 "address for working memory " TARGET_ADDR_FMT,
2058 target->working_area_virt);
2059 target->working_area = target->working_area_virt;
2060 } else {
2061 LOG_ERROR("No working memory available. "
2062 "Specify -work-area-virt to target.");
2063 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2067 /* Set up initial working area on first call */
2068 struct working_area *new_wa = malloc(sizeof(*new_wa));
2069 if (new_wa) {
2070 new_wa->next = NULL;
2071 new_wa->size = ALIGN_DOWN(target->working_area_size, 4); /* 4-byte align */
2072 new_wa->address = target->working_area;
2073 new_wa->backup = NULL;
2074 new_wa->user = NULL;
2075 new_wa->free = true;
2078 target->working_areas = new_wa;
2081 /* only allocate multiples of 4 byte */
2082 size = ALIGN_UP(size, 4);
2084 struct working_area *c = target->working_areas;
2086 /* Find the first large enough working area */
2087 while (c) {
2088 if (c->free && c->size >= size)
2089 break;
2090 c = c->next;
2093 if (!c)
2094 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2096 /* Split the working area into the requested size */
2097 target_split_working_area(c, size);
2099 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2100 size, c->address);
2102 if (target->backup_working_area) {
2103 if (!c->backup) {
2104 c->backup = malloc(c->size);
2105 if (!c->backup)
2106 return ERROR_FAIL;
2109 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2110 if (retval != ERROR_OK)
2111 return retval;
2114 /* mark as used, and return the new (reused) area */
2115 c->free = false;
2116 *area = c;
2118 /* user pointer */
2119 c->user = area;
2121 print_wa_layout(target);
2123 return ERROR_OK;
2126 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2128 int retval;
2130 retval = target_alloc_working_area_try(target, size, area);
2131 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2132 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2133 return retval;
2137 static int target_restore_working_area(struct target *target, struct working_area *area)
2139 int retval = ERROR_OK;
2141 if (target->backup_working_area && area->backup) {
2142 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2143 if (retval != ERROR_OK)
2144 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2145 area->size, area->address);
2148 return retval;
2151 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2152 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2154 if (!area || area->free)
2155 return ERROR_OK;
2157 int retval = ERROR_OK;
2158 if (restore) {
2159 retval = target_restore_working_area(target, area);
2160 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2161 if (retval != ERROR_OK)
2162 return retval;
2165 area->free = true;
2167 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2168 area->size, area->address);
2170 /* mark user pointer invalid */
2171 /* TODO: Is this really safe? It points to some previous caller's memory.
2172 * How could we know that the area pointer is still in that place and not
2173 * some other vital data? What's the purpose of this, anyway? */
2174 *area->user = NULL;
2175 area->user = NULL;
2177 target_merge_working_areas(target);
2179 print_wa_layout(target);
2181 return retval;
2184 int target_free_working_area(struct target *target, struct working_area *area)
2186 return target_free_working_area_restore(target, area, 1);
2189 /* free resources and restore memory, if restoring memory fails,
2190 * free up resources anyway
2192 static void target_free_all_working_areas_restore(struct target *target, int restore)
2194 struct working_area *c = target->working_areas;
2196 LOG_DEBUG("freeing all working areas");
2198 /* Loop through all areas, restoring the allocated ones and marking them as free */
2199 while (c) {
2200 if (!c->free) {
2201 if (restore)
2202 target_restore_working_area(target, c);
2203 c->free = true;
2204 *c->user = NULL; /* Same as above */
2205 c->user = NULL;
2207 c = c->next;
2210 /* Run a merge pass to combine all areas into one */
2211 target_merge_working_areas(target);
2213 print_wa_layout(target);
2216 void target_free_all_working_areas(struct target *target)
2218 target_free_all_working_areas_restore(target, 1);
2220 /* Now we have none or only one working area marked as free */
2221 if (target->working_areas) {
2222 /* Free the last one to allow on-the-fly moving and resizing */
2223 free(target->working_areas->backup);
2224 free(target->working_areas);
2225 target->working_areas = NULL;
2229 /* Find the largest number of bytes that can be allocated */
2230 uint32_t target_get_working_area_avail(struct target *target)
2232 struct working_area *c = target->working_areas;
2233 uint32_t max_size = 0;
2235 if (!c)
2236 return ALIGN_DOWN(target->working_area_size, 4);
2238 while (c) {
2239 if (c->free && max_size < c->size)
2240 max_size = c->size;
2242 c = c->next;
2245 return max_size;
2248 static void target_destroy(struct target *target)
2250 if (target->type->deinit_target)
2251 target->type->deinit_target(target);
2253 if (target->semihosting)
2254 free(target->semihosting->basedir);
2255 free(target->semihosting);
2257 jtag_unregister_event_callback(jtag_enable_callback, target);
2259 struct target_event_action *teap = target->event_action;
2260 while (teap) {
2261 struct target_event_action *next = teap->next;
2262 Jim_DecrRefCount(teap->interp, teap->body);
2263 free(teap);
2264 teap = next;
2267 target_free_all_working_areas(target);
2269 /* release the targets SMP list */
2270 if (target->smp) {
2271 struct target_list *head, *tmp;
2273 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2274 list_del(&head->lh);
2275 head->target->smp = 0;
2276 free(head);
2278 if (target->smp_targets != &empty_smp_targets)
2279 free(target->smp_targets);
2280 target->smp = 0;
2283 rtos_destroy(target);
2285 free(target->gdb_port_override);
2286 free(target->type);
2287 free(target->trace_info);
2288 free(target->fileio_info);
2289 free(target->cmd_name);
2290 free(target);
2293 void target_quit(void)
2295 struct target_event_callback *pe = target_event_callbacks;
2296 while (pe) {
2297 struct target_event_callback *t = pe->next;
2298 free(pe);
2299 pe = t;
2301 target_event_callbacks = NULL;
2303 struct target_timer_callback *pt = target_timer_callbacks;
2304 while (pt) {
2305 struct target_timer_callback *t = pt->next;
2306 free(pt);
2307 pt = t;
2309 target_timer_callbacks = NULL;
2311 for (struct target *target = all_targets; target;) {
2312 struct target *tmp;
2314 tmp = target->next;
2315 target_destroy(target);
2316 target = tmp;
2319 all_targets = NULL;
2322 int target_arch_state(struct target *target)
2324 int retval;
2325 if (!target) {
2326 LOG_WARNING("No target has been configured");
2327 return ERROR_OK;
2330 if (target->state != TARGET_HALTED)
2331 return ERROR_OK;
2333 retval = target->type->arch_state(target);
2334 return retval;
2337 static int target_get_gdb_fileio_info_default(struct target *target,
2338 struct gdb_fileio_info *fileio_info)
2340 /* If target does not support semi-hosting function, target
2341 has no need to provide .get_gdb_fileio_info callback.
2342 It just return ERROR_FAIL and gdb_server will return "Txx"
2343 as target halted every time. */
2344 return ERROR_FAIL;
2347 static int target_gdb_fileio_end_default(struct target *target,
2348 int retcode, int fileio_errno, bool ctrl_c)
2350 return ERROR_OK;
2353 int target_profiling_default(struct target *target, uint32_t *samples,
2354 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2356 struct timeval timeout, now;
2358 gettimeofday(&timeout, NULL);
2359 timeval_add_time(&timeout, seconds, 0);
2361 LOG_INFO("Starting profiling. Halting and resuming the"
2362 " target as often as we can...");
2364 uint32_t sample_count = 0;
2365 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2366 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2368 int retval = ERROR_OK;
2369 for (;;) {
2370 target_poll(target);
2371 if (target->state == TARGET_HALTED) {
2372 uint32_t t = buf_get_u32(reg->value, 0, 32);
2373 samples[sample_count++] = t;
2374 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2375 retval = target_resume(target, 1, 0, 0, 0);
2376 target_poll(target);
2377 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2378 } else if (target->state == TARGET_RUNNING) {
2379 /* We want to quickly sample the PC. */
2380 retval = target_halt(target);
2381 } else {
2382 LOG_INFO("Target not halted or running");
2383 retval = ERROR_OK;
2384 break;
2387 if (retval != ERROR_OK)
2388 break;
2390 gettimeofday(&now, NULL);
2391 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2392 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2393 break;
2397 *num_samples = sample_count;
2398 return retval;
2401 /* Single aligned words are guaranteed to use 16 or 32 bit access
2402 * mode respectively, otherwise data is handled as quickly as
2403 * possible
2405 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2407 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2408 size, address);
2410 if (!target_was_examined(target)) {
2411 LOG_ERROR("Target not examined yet");
2412 return ERROR_FAIL;
2415 if (size == 0)
2416 return ERROR_OK;
2418 if ((address + size - 1) < address) {
2419 /* GDB can request this when e.g. PC is 0xfffffffc */
2420 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2421 address,
2422 size);
2423 return ERROR_FAIL;
2426 return target->type->write_buffer(target, address, size, buffer);
2429 static int target_write_buffer_default(struct target *target,
2430 target_addr_t address, uint32_t count, const uint8_t *buffer)
2432 uint32_t size;
2433 unsigned int data_bytes = target_data_bits(target) / 8;
2435 /* Align up to maximum bytes. The loop condition makes sure the next pass
2436 * will have something to do with the size we leave to it. */
2437 for (size = 1;
2438 size < data_bytes && count >= size * 2 + (address & size);
2439 size *= 2) {
2440 if (address & size) {
2441 int retval = target_write_memory(target, address, size, 1, buffer);
2442 if (retval != ERROR_OK)
2443 return retval;
2444 address += size;
2445 count -= size;
2446 buffer += size;
2450 /* Write the data with as large access size as possible. */
2451 for (; size > 0; size /= 2) {
2452 uint32_t aligned = count - count % size;
2453 if (aligned > 0) {
2454 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2455 if (retval != ERROR_OK)
2456 return retval;
2457 address += aligned;
2458 count -= aligned;
2459 buffer += aligned;
2463 return ERROR_OK;
2466 /* Single aligned words are guaranteed to use 16 or 32 bit access
2467 * mode respectively, otherwise data is handled as quickly as
2468 * possible
2470 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2472 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2473 size, address);
2475 if (!target_was_examined(target)) {
2476 LOG_ERROR("Target not examined yet");
2477 return ERROR_FAIL;
2480 if (size == 0)
2481 return ERROR_OK;
2483 if ((address + size - 1) < address) {
2484 /* GDB can request this when e.g. PC is 0xfffffffc */
2485 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2486 address,
2487 size);
2488 return ERROR_FAIL;
2491 return target->type->read_buffer(target, address, size, buffer);
2494 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2496 uint32_t size;
2497 unsigned int data_bytes = target_data_bits(target) / 8;
2499 /* Align up to maximum bytes. The loop condition makes sure the next pass
2500 * will have something to do with the size we leave to it. */
2501 for (size = 1;
2502 size < data_bytes && count >= size * 2 + (address & size);
2503 size *= 2) {
2504 if (address & size) {
2505 int retval = target_read_memory(target, address, size, 1, buffer);
2506 if (retval != ERROR_OK)
2507 return retval;
2508 address += size;
2509 count -= size;
2510 buffer += size;
2514 /* Read the data with as large access size as possible. */
2515 for (; size > 0; size /= 2) {
2516 uint32_t aligned = count - count % size;
2517 if (aligned > 0) {
2518 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2519 if (retval != ERROR_OK)
2520 return retval;
2521 address += aligned;
2522 count -= aligned;
2523 buffer += aligned;
2527 return ERROR_OK;
2530 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2532 uint8_t *buffer;
2533 int retval;
2534 uint32_t i;
2535 uint32_t checksum = 0;
2536 if (!target_was_examined(target)) {
2537 LOG_ERROR("Target not examined yet");
2538 return ERROR_FAIL;
2540 if (!target->type->checksum_memory) {
2541 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2542 return ERROR_FAIL;
2545 retval = target->type->checksum_memory(target, address, size, &checksum);
2546 if (retval != ERROR_OK) {
2547 buffer = malloc(size);
2548 if (!buffer) {
2549 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2550 return ERROR_COMMAND_SYNTAX_ERROR;
2552 retval = target_read_buffer(target, address, size, buffer);
2553 if (retval != ERROR_OK) {
2554 free(buffer);
2555 return retval;
2558 /* convert to target endianness */
2559 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2560 uint32_t target_data;
2561 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2562 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2565 retval = image_calculate_checksum(buffer, size, &checksum);
2566 free(buffer);
2569 *crc = checksum;
2571 return retval;
2574 int target_blank_check_memory(struct target *target,
2575 struct target_memory_check_block *blocks, int num_blocks,
2576 uint8_t erased_value)
2578 if (!target_was_examined(target)) {
2579 LOG_ERROR("Target not examined yet");
2580 return ERROR_FAIL;
2583 if (!target->type->blank_check_memory)
2584 return ERROR_NOT_IMPLEMENTED;
2586 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2589 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2591 uint8_t value_buf[8];
2592 if (!target_was_examined(target)) {
2593 LOG_ERROR("Target not examined yet");
2594 return ERROR_FAIL;
2597 int retval = target_read_memory(target, address, 8, 1, value_buf);
2599 if (retval == ERROR_OK) {
2600 *value = target_buffer_get_u64(target, value_buf);
2601 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2602 address,
2603 *value);
2604 } else {
2605 *value = 0x0;
2606 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2607 address);
2610 return retval;
2613 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2615 uint8_t value_buf[4];
2616 if (!target_was_examined(target)) {
2617 LOG_ERROR("Target not examined yet");
2618 return ERROR_FAIL;
2621 int retval = target_read_memory(target, address, 4, 1, value_buf);
2623 if (retval == ERROR_OK) {
2624 *value = target_buffer_get_u32(target, value_buf);
2625 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2626 address,
2627 *value);
2628 } else {
2629 *value = 0x0;
2630 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2631 address);
2634 return retval;
2637 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2639 uint8_t value_buf[2];
2640 if (!target_was_examined(target)) {
2641 LOG_ERROR("Target not examined yet");
2642 return ERROR_FAIL;
2645 int retval = target_read_memory(target, address, 2, 1, value_buf);
2647 if (retval == ERROR_OK) {
2648 *value = target_buffer_get_u16(target, value_buf);
2649 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2650 address,
2651 *value);
2652 } else {
2653 *value = 0x0;
2654 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2655 address);
2658 return retval;
2661 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2663 if (!target_was_examined(target)) {
2664 LOG_ERROR("Target not examined yet");
2665 return ERROR_FAIL;
2668 int retval = target_read_memory(target, address, 1, 1, value);
2670 if (retval == ERROR_OK) {
2671 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2672 address,
2673 *value);
2674 } else {
2675 *value = 0x0;
2676 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2677 address);
2680 return retval;
2683 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2685 int retval;
2686 uint8_t value_buf[8];
2687 if (!target_was_examined(target)) {
2688 LOG_ERROR("Target not examined yet");
2689 return ERROR_FAIL;
2692 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2693 address,
2694 value);
2696 target_buffer_set_u64(target, value_buf, value);
2697 retval = target_write_memory(target, address, 8, 1, value_buf);
2698 if (retval != ERROR_OK)
2699 LOG_DEBUG("failed: %i", retval);
2701 return retval;
2704 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2706 int retval;
2707 uint8_t value_buf[4];
2708 if (!target_was_examined(target)) {
2709 LOG_ERROR("Target not examined yet");
2710 return ERROR_FAIL;
2713 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2714 address,
2715 value);
2717 target_buffer_set_u32(target, value_buf, value);
2718 retval = target_write_memory(target, address, 4, 1, value_buf);
2719 if (retval != ERROR_OK)
2720 LOG_DEBUG("failed: %i", retval);
2722 return retval;
2725 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2727 int retval;
2728 uint8_t value_buf[2];
2729 if (!target_was_examined(target)) {
2730 LOG_ERROR("Target not examined yet");
2731 return ERROR_FAIL;
2734 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2735 address,
2736 value);
2738 target_buffer_set_u16(target, value_buf, value);
2739 retval = target_write_memory(target, address, 2, 1, value_buf);
2740 if (retval != ERROR_OK)
2741 LOG_DEBUG("failed: %i", retval);
2743 return retval;
2746 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2748 int retval;
2749 if (!target_was_examined(target)) {
2750 LOG_ERROR("Target not examined yet");
2751 return ERROR_FAIL;
2754 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2755 address, value);
2757 retval = target_write_memory(target, address, 1, 1, &value);
2758 if (retval != ERROR_OK)
2759 LOG_DEBUG("failed: %i", retval);
2761 return retval;
2764 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2766 int retval;
2767 uint8_t value_buf[8];
2768 if (!target_was_examined(target)) {
2769 LOG_ERROR("Target not examined yet");
2770 return ERROR_FAIL;
2773 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2774 address,
2775 value);
2777 target_buffer_set_u64(target, value_buf, value);
2778 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2779 if (retval != ERROR_OK)
2780 LOG_DEBUG("failed: %i", retval);
2782 return retval;
2785 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2787 int retval;
2788 uint8_t value_buf[4];
2789 if (!target_was_examined(target)) {
2790 LOG_ERROR("Target not examined yet");
2791 return ERROR_FAIL;
2794 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2795 address,
2796 value);
2798 target_buffer_set_u32(target, value_buf, value);
2799 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2800 if (retval != ERROR_OK)
2801 LOG_DEBUG("failed: %i", retval);
2803 return retval;
2806 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2808 int retval;
2809 uint8_t value_buf[2];
2810 if (!target_was_examined(target)) {
2811 LOG_ERROR("Target not examined yet");
2812 return ERROR_FAIL;
2815 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2816 address,
2817 value);
2819 target_buffer_set_u16(target, value_buf, value);
2820 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2821 if (retval != ERROR_OK)
2822 LOG_DEBUG("failed: %i", retval);
2824 return retval;
2827 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2829 int retval;
2830 if (!target_was_examined(target)) {
2831 LOG_ERROR("Target not examined yet");
2832 return ERROR_FAIL;
2835 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2836 address, value);
2838 retval = target_write_phys_memory(target, address, 1, 1, &value);
2839 if (retval != ERROR_OK)
2840 LOG_DEBUG("failed: %i", retval);
2842 return retval;
2845 static int find_target(struct command_invocation *cmd, const char *name)
2847 struct target *target = get_target(name);
2848 if (!target) {
2849 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2850 return ERROR_FAIL;
2852 if (!target->tap->enabled) {
2853 command_print(cmd, "Target: TAP %s is disabled, "
2854 "can't be the current target\n",
2855 target->tap->dotted_name);
2856 return ERROR_FAIL;
2859 cmd->ctx->current_target = target;
2860 if (cmd->ctx->current_target_override)
2861 cmd->ctx->current_target_override = target;
2863 return ERROR_OK;
2867 COMMAND_HANDLER(handle_targets_command)
2869 int retval = ERROR_OK;
2870 if (CMD_ARGC == 1) {
2871 retval = find_target(CMD, CMD_ARGV[0]);
2872 if (retval == ERROR_OK) {
2873 /* we're done! */
2874 return retval;
2878 struct target *target = all_targets;
2879 command_print(CMD, " TargetName Type Endian TapName State ");
2880 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2881 while (target) {
2882 const char *state;
2883 char marker = ' ';
2885 if (target->tap->enabled)
2886 state = target_state_name(target);
2887 else
2888 state = "tap-disabled";
2890 if (CMD_CTX->current_target == target)
2891 marker = '*';
2893 /* keep columns lined up to match the headers above */
2894 command_print(CMD,
2895 "%2d%c %-18s %-10s %-6s %-18s %s",
2896 target->target_number,
2897 marker,
2898 target_name(target),
2899 target_type_name(target),
2900 jim_nvp_value2name_simple(nvp_target_endian,
2901 target->endianness)->name,
2902 target->tap->dotted_name,
2903 state);
2904 target = target->next;
2907 return retval;
2910 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2912 static int power_dropout;
2913 static int srst_asserted;
2915 static int run_power_restore;
2916 static int run_power_dropout;
2917 static int run_srst_asserted;
2918 static int run_srst_deasserted;
2920 static int sense_handler(void)
2922 static int prev_srst_asserted;
2923 static int prev_power_dropout;
2925 int retval = jtag_power_dropout(&power_dropout);
2926 if (retval != ERROR_OK)
2927 return retval;
2929 int power_restored;
2930 power_restored = prev_power_dropout && !power_dropout;
2931 if (power_restored)
2932 run_power_restore = 1;
2934 int64_t current = timeval_ms();
2935 static int64_t last_power;
2936 bool wait_more = last_power + 2000 > current;
2937 if (power_dropout && !wait_more) {
2938 run_power_dropout = 1;
2939 last_power = current;
2942 retval = jtag_srst_asserted(&srst_asserted);
2943 if (retval != ERROR_OK)
2944 return retval;
2946 int srst_deasserted;
2947 srst_deasserted = prev_srst_asserted && !srst_asserted;
2949 static int64_t last_srst;
2950 wait_more = last_srst + 2000 > current;
2951 if (srst_deasserted && !wait_more) {
2952 run_srst_deasserted = 1;
2953 last_srst = current;
2956 if (!prev_srst_asserted && srst_asserted)
2957 run_srst_asserted = 1;
2959 prev_srst_asserted = srst_asserted;
2960 prev_power_dropout = power_dropout;
2962 if (srst_deasserted || power_restored) {
2963 /* Other than logging the event we can't do anything here.
2964 * Issuing a reset is a particularly bad idea as we might
2965 * be inside a reset already.
2969 return ERROR_OK;
2972 /* process target state changes */
2973 static int handle_target(void *priv)
2975 Jim_Interp *interp = (Jim_Interp *)priv;
2976 int retval = ERROR_OK;
2978 if (!is_jtag_poll_safe()) {
2979 /* polling is disabled currently */
2980 return ERROR_OK;
2983 /* we do not want to recurse here... */
2984 static int recursive;
2985 if (!recursive) {
2986 recursive = 1;
2987 sense_handler();
2988 /* danger! running these procedures can trigger srst assertions and power dropouts.
2989 * We need to avoid an infinite loop/recursion here and we do that by
2990 * clearing the flags after running these events.
2992 int did_something = 0;
2993 if (run_srst_asserted) {
2994 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2995 Jim_Eval(interp, "srst_asserted");
2996 did_something = 1;
2998 if (run_srst_deasserted) {
2999 Jim_Eval(interp, "srst_deasserted");
3000 did_something = 1;
3002 if (run_power_dropout) {
3003 LOG_INFO("Power dropout detected, running power_dropout proc.");
3004 Jim_Eval(interp, "power_dropout");
3005 did_something = 1;
3007 if (run_power_restore) {
3008 Jim_Eval(interp, "power_restore");
3009 did_something = 1;
3012 if (did_something) {
3013 /* clear detect flags */
3014 sense_handler();
3017 /* clear action flags */
3019 run_srst_asserted = 0;
3020 run_srst_deasserted = 0;
3021 run_power_restore = 0;
3022 run_power_dropout = 0;
3024 recursive = 0;
3027 /* Poll targets for state changes unless that's globally disabled.
3028 * Skip targets that are currently disabled.
3030 for (struct target *target = all_targets;
3031 is_jtag_poll_safe() && target;
3032 target = target->next) {
3034 if (!target_was_examined(target))
3035 continue;
3037 if (!target->tap->enabled)
3038 continue;
3040 if (target->backoff.times > target->backoff.count) {
3041 /* do not poll this time as we failed previously */
3042 target->backoff.count++;
3043 continue;
3045 target->backoff.count = 0;
3047 /* only poll target if we've got power and srst isn't asserted */
3048 if (!power_dropout && !srst_asserted) {
3049 /* polling may fail silently until the target has been examined */
3050 retval = target_poll(target);
3051 if (retval != ERROR_OK) {
3052 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3053 if (target->backoff.times * polling_interval < 5000) {
3054 target->backoff.times *= 2;
3055 target->backoff.times++;
3058 /* Tell GDB to halt the debugger. This allows the user to
3059 * run monitor commands to handle the situation.
3061 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3063 if (target->backoff.times > 0) {
3064 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3065 target_reset_examined(target);
3066 retval = target_examine_one(target);
3067 /* Target examination could have failed due to unstable connection,
3068 * but we set the examined flag anyway to repoll it later */
3069 if (retval != ERROR_OK) {
3070 target_set_examined(target);
3071 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3072 target->backoff.times * polling_interval);
3073 return retval;
3077 /* Since we succeeded, we reset backoff count */
3078 target->backoff.times = 0;
3082 return retval;
3085 COMMAND_HANDLER(handle_reg_command)
3087 LOG_DEBUG("-");
3089 struct target *target = get_current_target(CMD_CTX);
3090 struct reg *reg = NULL;
3092 /* list all available registers for the current target */
3093 if (CMD_ARGC == 0) {
3094 struct reg_cache *cache = target->reg_cache;
3096 unsigned int count = 0;
3097 while (cache) {
3098 unsigned i;
3100 command_print(CMD, "===== %s", cache->name);
3102 for (i = 0, reg = cache->reg_list;
3103 i < cache->num_regs;
3104 i++, reg++, count++) {
3105 if (reg->exist == false || reg->hidden)
3106 continue;
3107 /* only print cached values if they are valid */
3108 if (reg->valid) {
3109 char *value = buf_to_hex_str(reg->value,
3110 reg->size);
3111 command_print(CMD,
3112 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3113 count, reg->name,
3114 reg->size, value,
3115 reg->dirty
3116 ? " (dirty)"
3117 : "");
3118 free(value);
3119 } else {
3120 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3121 count, reg->name,
3122 reg->size);
3125 cache = cache->next;
3128 return ERROR_OK;
3131 /* access a single register by its ordinal number */
3132 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3133 unsigned num;
3134 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3136 struct reg_cache *cache = target->reg_cache;
3137 unsigned int count = 0;
3138 while (cache) {
3139 unsigned i;
3140 for (i = 0; i < cache->num_regs; i++) {
3141 if (count++ == num) {
3142 reg = &cache->reg_list[i];
3143 break;
3146 if (reg)
3147 break;
3148 cache = cache->next;
3151 if (!reg) {
3152 command_print(CMD, "%i is out of bounds, the current target "
3153 "has only %i registers (0 - %i)", num, count, count - 1);
3154 return ERROR_OK;
3156 } else {
3157 /* access a single register by its name */
3158 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3160 if (!reg)
3161 goto not_found;
3164 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3166 if (!reg->exist)
3167 goto not_found;
3169 /* display a register */
3170 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3171 && (CMD_ARGV[1][0] <= '9')))) {
3172 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3173 reg->valid = 0;
3175 if (reg->valid == 0) {
3176 int retval = reg->type->get(reg);
3177 if (retval != ERROR_OK) {
3178 LOG_ERROR("Could not read register '%s'", reg->name);
3179 return retval;
3182 char *value = buf_to_hex_str(reg->value, reg->size);
3183 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3184 free(value);
3185 return ERROR_OK;
3188 /* set register value */
3189 if (CMD_ARGC == 2) {
3190 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3191 if (!buf)
3192 return ERROR_FAIL;
3193 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3195 int retval = reg->type->set(reg, buf);
3196 if (retval != ERROR_OK) {
3197 LOG_ERROR("Could not write to register '%s'", reg->name);
3198 } else {
3199 char *value = buf_to_hex_str(reg->value, reg->size);
3200 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3201 free(value);
3204 free(buf);
3206 return retval;
3209 return ERROR_COMMAND_SYNTAX_ERROR;
3211 not_found:
3212 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3213 return ERROR_OK;
3216 COMMAND_HANDLER(handle_poll_command)
3218 int retval = ERROR_OK;
3219 struct target *target = get_current_target(CMD_CTX);
3221 if (CMD_ARGC == 0) {
3222 command_print(CMD, "background polling: %s",
3223 jtag_poll_get_enabled() ? "on" : "off");
3224 command_print(CMD, "TAP: %s (%s)",
3225 target->tap->dotted_name,
3226 target->tap->enabled ? "enabled" : "disabled");
3227 if (!target->tap->enabled)
3228 return ERROR_OK;
3229 retval = target_poll(target);
3230 if (retval != ERROR_OK)
3231 return retval;
3232 retval = target_arch_state(target);
3233 if (retval != ERROR_OK)
3234 return retval;
3235 } else if (CMD_ARGC == 1) {
3236 bool enable;
3237 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3238 jtag_poll_set_enabled(enable);
3239 } else
3240 return ERROR_COMMAND_SYNTAX_ERROR;
3242 return retval;
3245 COMMAND_HANDLER(handle_wait_halt_command)
3247 if (CMD_ARGC > 1)
3248 return ERROR_COMMAND_SYNTAX_ERROR;
3250 unsigned ms = DEFAULT_HALT_TIMEOUT;
3251 if (1 == CMD_ARGC) {
3252 int retval = parse_uint(CMD_ARGV[0], &ms);
3253 if (retval != ERROR_OK)
3254 return ERROR_COMMAND_SYNTAX_ERROR;
3257 struct target *target = get_current_target(CMD_CTX);
3258 return target_wait_state(target, TARGET_HALTED, ms);
3261 /* wait for target state to change. The trick here is to have a low
3262 * latency for short waits and not to suck up all the CPU time
3263 * on longer waits.
3265 * After 500ms, keep_alive() is invoked
3267 int target_wait_state(struct target *target, enum target_state state, int ms)
3269 int retval;
3270 int64_t then = 0, cur;
3271 bool once = true;
3273 for (;;) {
3274 retval = target_poll(target);
3275 if (retval != ERROR_OK)
3276 return retval;
3277 if (target->state == state)
3278 break;
3279 cur = timeval_ms();
3280 if (once) {
3281 once = false;
3282 then = timeval_ms();
3283 LOG_DEBUG("waiting for target %s...",
3284 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3287 if (cur-then > 500)
3288 keep_alive();
3290 if ((cur-then) > ms) {
3291 LOG_ERROR("timed out while waiting for target %s",
3292 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3293 return ERROR_FAIL;
3297 return ERROR_OK;
3300 COMMAND_HANDLER(handle_halt_command)
3302 LOG_DEBUG("-");
3304 struct target *target = get_current_target(CMD_CTX);
3306 target->verbose_halt_msg = true;
3308 int retval = target_halt(target);
3309 if (retval != ERROR_OK)
3310 return retval;
3312 if (CMD_ARGC == 1) {
3313 unsigned wait_local;
3314 retval = parse_uint(CMD_ARGV[0], &wait_local);
3315 if (retval != ERROR_OK)
3316 return ERROR_COMMAND_SYNTAX_ERROR;
3317 if (!wait_local)
3318 return ERROR_OK;
3321 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3324 COMMAND_HANDLER(handle_soft_reset_halt_command)
3326 struct target *target = get_current_target(CMD_CTX);
3328 LOG_TARGET_INFO(target, "requesting target halt and executing a soft reset");
3330 target_soft_reset_halt(target);
3332 return ERROR_OK;
3335 COMMAND_HANDLER(handle_reset_command)
3337 if (CMD_ARGC > 1)
3338 return ERROR_COMMAND_SYNTAX_ERROR;
3340 enum target_reset_mode reset_mode = RESET_RUN;
3341 if (CMD_ARGC == 1) {
3342 const struct nvp *n;
3343 n = nvp_name2value(nvp_reset_modes, CMD_ARGV[0]);
3344 if ((!n->name) || (n->value == RESET_UNKNOWN))
3345 return ERROR_COMMAND_SYNTAX_ERROR;
3346 reset_mode = n->value;
3349 /* reset *all* targets */
3350 return target_process_reset(CMD, reset_mode);
3354 COMMAND_HANDLER(handle_resume_command)
3356 int current = 1;
3357 if (CMD_ARGC > 1)
3358 return ERROR_COMMAND_SYNTAX_ERROR;
3360 struct target *target = get_current_target(CMD_CTX);
3362 /* with no CMD_ARGV, resume from current pc, addr = 0,
3363 * with one arguments, addr = CMD_ARGV[0],
3364 * handle breakpoints, not debugging */
3365 target_addr_t addr = 0;
3366 if (CMD_ARGC == 1) {
3367 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3368 current = 0;
3371 return target_resume(target, current, addr, 1, 0);
3374 COMMAND_HANDLER(handle_step_command)
3376 if (CMD_ARGC > 1)
3377 return ERROR_COMMAND_SYNTAX_ERROR;
3379 LOG_DEBUG("-");
3381 /* with no CMD_ARGV, step from current pc, addr = 0,
3382 * with one argument addr = CMD_ARGV[0],
3383 * handle breakpoints, debugging */
3384 target_addr_t addr = 0;
3385 int current_pc = 1;
3386 if (CMD_ARGC == 1) {
3387 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3388 current_pc = 0;
3391 struct target *target = get_current_target(CMD_CTX);
3393 return target_step(target, current_pc, addr, 1);
3396 void target_handle_md_output(struct command_invocation *cmd,
3397 struct target *target, target_addr_t address, unsigned size,
3398 unsigned count, const uint8_t *buffer)
3400 const unsigned line_bytecnt = 32;
3401 unsigned line_modulo = line_bytecnt / size;
3403 char output[line_bytecnt * 4 + 1];
3404 unsigned output_len = 0;
3406 const char *value_fmt;
3407 switch (size) {
3408 case 8:
3409 value_fmt = "%16.16"PRIx64" ";
3410 break;
3411 case 4:
3412 value_fmt = "%8.8"PRIx64" ";
3413 break;
3414 case 2:
3415 value_fmt = "%4.4"PRIx64" ";
3416 break;
3417 case 1:
3418 value_fmt = "%2.2"PRIx64" ";
3419 break;
3420 default:
3421 /* "can't happen", caller checked */
3422 LOG_ERROR("invalid memory read size: %u", size);
3423 return;
3426 for (unsigned i = 0; i < count; i++) {
3427 if (i % line_modulo == 0) {
3428 output_len += snprintf(output + output_len,
3429 sizeof(output) - output_len,
3430 TARGET_ADDR_FMT ": ",
3431 (address + (i * size)));
3434 uint64_t value = 0;
3435 const uint8_t *value_ptr = buffer + i * size;
3436 switch (size) {
3437 case 8:
3438 value = target_buffer_get_u64(target, value_ptr);
3439 break;
3440 case 4:
3441 value = target_buffer_get_u32(target, value_ptr);
3442 break;
3443 case 2:
3444 value = target_buffer_get_u16(target, value_ptr);
3445 break;
3446 case 1:
3447 value = *value_ptr;
3449 output_len += snprintf(output + output_len,
3450 sizeof(output) - output_len,
3451 value_fmt, value);
3453 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3454 command_print(cmd, "%s", output);
3455 output_len = 0;
3460 COMMAND_HANDLER(handle_md_command)
3462 if (CMD_ARGC < 1)
3463 return ERROR_COMMAND_SYNTAX_ERROR;
3465 unsigned size = 0;
3466 switch (CMD_NAME[2]) {
3467 case 'd':
3468 size = 8;
3469 break;
3470 case 'w':
3471 size = 4;
3472 break;
3473 case 'h':
3474 size = 2;
3475 break;
3476 case 'b':
3477 size = 1;
3478 break;
3479 default:
3480 return ERROR_COMMAND_SYNTAX_ERROR;
3483 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3484 int (*fn)(struct target *target,
3485 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3486 if (physical) {
3487 CMD_ARGC--;
3488 CMD_ARGV++;
3489 fn = target_read_phys_memory;
3490 } else
3491 fn = target_read_memory;
3492 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3493 return ERROR_COMMAND_SYNTAX_ERROR;
3495 target_addr_t address;
3496 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3498 unsigned count = 1;
3499 if (CMD_ARGC == 2)
3500 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3502 uint8_t *buffer = calloc(count, size);
3503 if (!buffer) {
3504 LOG_ERROR("Failed to allocate md read buffer");
3505 return ERROR_FAIL;
3508 struct target *target = get_current_target(CMD_CTX);
3509 int retval = fn(target, address, size, count, buffer);
3510 if (retval == ERROR_OK)
3511 target_handle_md_output(CMD, target, address, size, count, buffer);
3513 free(buffer);
3515 return retval;
3518 typedef int (*target_write_fn)(struct target *target,
3519 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3521 static int target_fill_mem(struct target *target,
3522 target_addr_t address,
3523 target_write_fn fn,
3524 unsigned data_size,
3525 /* value */
3526 uint64_t b,
3527 /* count */
3528 unsigned c)
3530 /* We have to write in reasonably large chunks to be able
3531 * to fill large memory areas with any sane speed */
3532 const unsigned chunk_size = 16384;
3533 uint8_t *target_buf = malloc(chunk_size * data_size);
3534 if (!target_buf) {
3535 LOG_ERROR("Out of memory");
3536 return ERROR_FAIL;
3539 for (unsigned i = 0; i < chunk_size; i++) {
3540 switch (data_size) {
3541 case 8:
3542 target_buffer_set_u64(target, target_buf + i * data_size, b);
3543 break;
3544 case 4:
3545 target_buffer_set_u32(target, target_buf + i * data_size, b);
3546 break;
3547 case 2:
3548 target_buffer_set_u16(target, target_buf + i * data_size, b);
3549 break;
3550 case 1:
3551 target_buffer_set_u8(target, target_buf + i * data_size, b);
3552 break;
3553 default:
3554 exit(-1);
3558 int retval = ERROR_OK;
3560 for (unsigned x = 0; x < c; x += chunk_size) {
3561 unsigned current;
3562 current = c - x;
3563 if (current > chunk_size)
3564 current = chunk_size;
3565 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3566 if (retval != ERROR_OK)
3567 break;
3568 /* avoid GDB timeouts */
3569 keep_alive();
3571 free(target_buf);
3573 return retval;
3577 COMMAND_HANDLER(handle_mw_command)
3579 if (CMD_ARGC < 2)
3580 return ERROR_COMMAND_SYNTAX_ERROR;
3581 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3582 target_write_fn fn;
3583 if (physical) {
3584 CMD_ARGC--;
3585 CMD_ARGV++;
3586 fn = target_write_phys_memory;
3587 } else
3588 fn = target_write_memory;
3589 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3590 return ERROR_COMMAND_SYNTAX_ERROR;
3592 target_addr_t address;
3593 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3595 uint64_t value;
3596 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3598 unsigned count = 1;
3599 if (CMD_ARGC == 3)
3600 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3602 struct target *target = get_current_target(CMD_CTX);
3603 unsigned wordsize;
3604 switch (CMD_NAME[2]) {
3605 case 'd':
3606 wordsize = 8;
3607 break;
3608 case 'w':
3609 wordsize = 4;
3610 break;
3611 case 'h':
3612 wordsize = 2;
3613 break;
3614 case 'b':
3615 wordsize = 1;
3616 break;
3617 default:
3618 return ERROR_COMMAND_SYNTAX_ERROR;
3621 return target_fill_mem(target, address, fn, wordsize, value, count);
3624 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3625 target_addr_t *min_address, target_addr_t *max_address)
3627 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3628 return ERROR_COMMAND_SYNTAX_ERROR;
3630 /* a base address isn't always necessary,
3631 * default to 0x0 (i.e. don't relocate) */
3632 if (CMD_ARGC >= 2) {
3633 target_addr_t addr;
3634 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3635 image->base_address = addr;
3636 image->base_address_set = true;
3637 } else
3638 image->base_address_set = false;
3640 image->start_address_set = false;
3642 if (CMD_ARGC >= 4)
3643 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3644 if (CMD_ARGC == 5) {
3645 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3646 /* use size (given) to find max (required) */
3647 *max_address += *min_address;
3650 if (*min_address > *max_address)
3651 return ERROR_COMMAND_SYNTAX_ERROR;
3653 return ERROR_OK;
3656 COMMAND_HANDLER(handle_load_image_command)
3658 uint8_t *buffer;
3659 size_t buf_cnt;
3660 uint32_t image_size;
3661 target_addr_t min_address = 0;
3662 target_addr_t max_address = -1;
3663 struct image image;
3665 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3666 &image, &min_address, &max_address);
3667 if (retval != ERROR_OK)
3668 return retval;
3670 struct target *target = get_current_target(CMD_CTX);
3672 struct duration bench;
3673 duration_start(&bench);
3675 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3676 return ERROR_FAIL;
3678 image_size = 0x0;
3679 retval = ERROR_OK;
3680 for (unsigned int i = 0; i < image.num_sections; i++) {
3681 buffer = malloc(image.sections[i].size);
3682 if (!buffer) {
3683 command_print(CMD,
3684 "error allocating buffer for section (%d bytes)",
3685 (int)(image.sections[i].size));
3686 retval = ERROR_FAIL;
3687 break;
3690 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3691 if (retval != ERROR_OK) {
3692 free(buffer);
3693 break;
3696 uint32_t offset = 0;
3697 uint32_t length = buf_cnt;
3699 /* DANGER!!! beware of unsigned comparison here!!! */
3701 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3702 (image.sections[i].base_address < max_address)) {
3704 if (image.sections[i].base_address < min_address) {
3705 /* clip addresses below */
3706 offset += min_address-image.sections[i].base_address;
3707 length -= offset;
3710 if (image.sections[i].base_address + buf_cnt > max_address)
3711 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3713 retval = target_write_buffer(target,
3714 image.sections[i].base_address + offset, length, buffer + offset);
3715 if (retval != ERROR_OK) {
3716 free(buffer);
3717 break;
3719 image_size += length;
3720 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3721 (unsigned int)length,
3722 image.sections[i].base_address + offset);
3725 free(buffer);
3728 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3729 command_print(CMD, "downloaded %" PRIu32 " bytes "
3730 "in %fs (%0.3f KiB/s)", image_size,
3731 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3734 image_close(&image);
3736 return retval;
3740 COMMAND_HANDLER(handle_dump_image_command)
3742 struct fileio *fileio;
3743 uint8_t *buffer;
3744 int retval, retvaltemp;
3745 target_addr_t address, size;
3746 struct duration bench;
3747 struct target *target = get_current_target(CMD_CTX);
3749 if (CMD_ARGC != 3)
3750 return ERROR_COMMAND_SYNTAX_ERROR;
3752 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3753 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3755 uint32_t buf_size = (size > 4096) ? 4096 : size;
3756 buffer = malloc(buf_size);
3757 if (!buffer)
3758 return ERROR_FAIL;
3760 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3761 if (retval != ERROR_OK) {
3762 free(buffer);
3763 return retval;
3766 duration_start(&bench);
3768 while (size > 0) {
3769 size_t size_written;
3770 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3771 retval = target_read_buffer(target, address, this_run_size, buffer);
3772 if (retval != ERROR_OK)
3773 break;
3775 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3776 if (retval != ERROR_OK)
3777 break;
3779 size -= this_run_size;
3780 address += this_run_size;
3783 free(buffer);
3785 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3786 size_t filesize;
3787 retval = fileio_size(fileio, &filesize);
3788 if (retval != ERROR_OK)
3789 return retval;
3790 command_print(CMD,
3791 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3792 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3795 retvaltemp = fileio_close(fileio);
3796 if (retvaltemp != ERROR_OK)
3797 return retvaltemp;
3799 return retval;
3802 enum verify_mode {
3803 IMAGE_TEST = 0,
3804 IMAGE_VERIFY = 1,
3805 IMAGE_CHECKSUM_ONLY = 2
3808 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3810 uint8_t *buffer;
3811 size_t buf_cnt;
3812 uint32_t image_size;
3813 int retval;
3814 uint32_t checksum = 0;
3815 uint32_t mem_checksum = 0;
3817 struct image image;
3819 struct target *target = get_current_target(CMD_CTX);
3821 if (CMD_ARGC < 1)
3822 return ERROR_COMMAND_SYNTAX_ERROR;
3824 if (!target) {
3825 LOG_ERROR("no target selected");
3826 return ERROR_FAIL;
3829 struct duration bench;
3830 duration_start(&bench);
3832 if (CMD_ARGC >= 2) {
3833 target_addr_t addr;
3834 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3835 image.base_address = addr;
3836 image.base_address_set = true;
3837 } else {
3838 image.base_address_set = false;
3839 image.base_address = 0x0;
3842 image.start_address_set = false;
3844 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3845 if (retval != ERROR_OK)
3846 return retval;
3848 image_size = 0x0;
3849 int diffs = 0;
3850 retval = ERROR_OK;
3851 for (unsigned int i = 0; i < image.num_sections; i++) {
3852 buffer = malloc(image.sections[i].size);
3853 if (!buffer) {
3854 command_print(CMD,
3855 "error allocating buffer for section (%" PRIu32 " bytes)",
3856 image.sections[i].size);
3857 break;
3859 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3860 if (retval != ERROR_OK) {
3861 free(buffer);
3862 break;
3865 if (verify >= IMAGE_VERIFY) {
3866 /* calculate checksum of image */
3867 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3868 if (retval != ERROR_OK) {
3869 free(buffer);
3870 break;
3873 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3874 if (retval != ERROR_OK) {
3875 free(buffer);
3876 break;
3878 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3879 LOG_ERROR("checksum mismatch");
3880 free(buffer);
3881 retval = ERROR_FAIL;
3882 goto done;
3884 if (checksum != mem_checksum) {
3885 /* failed crc checksum, fall back to a binary compare */
3886 uint8_t *data;
3888 if (diffs == 0)
3889 LOG_ERROR("checksum mismatch - attempting binary compare");
3891 data = malloc(buf_cnt);
3893 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3894 if (retval == ERROR_OK) {
3895 uint32_t t;
3896 for (t = 0; t < buf_cnt; t++) {
3897 if (data[t] != buffer[t]) {
3898 command_print(CMD,
3899 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3900 diffs,
3901 (unsigned)(t + image.sections[i].base_address),
3902 data[t],
3903 buffer[t]);
3904 if (diffs++ >= 127) {
3905 command_print(CMD, "More than 128 errors, the rest are not printed.");
3906 free(data);
3907 free(buffer);
3908 goto done;
3911 keep_alive();
3914 free(data);
3916 } else {
3917 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3918 image.sections[i].base_address,
3919 buf_cnt);
3922 free(buffer);
3923 image_size += buf_cnt;
3925 if (diffs > 0)
3926 command_print(CMD, "No more differences found.");
3927 done:
3928 if (diffs > 0)
3929 retval = ERROR_FAIL;
3930 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3931 command_print(CMD, "verified %" PRIu32 " bytes "
3932 "in %fs (%0.3f KiB/s)", image_size,
3933 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3936 image_close(&image);
3938 return retval;
3941 COMMAND_HANDLER(handle_verify_image_checksum_command)
3943 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3946 COMMAND_HANDLER(handle_verify_image_command)
3948 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3951 COMMAND_HANDLER(handle_test_image_command)
3953 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3956 static int handle_bp_command_list(struct command_invocation *cmd)
3958 struct target *target = get_current_target(cmd->ctx);
3959 struct breakpoint *breakpoint = target->breakpoints;
3960 while (breakpoint) {
3961 if (breakpoint->type == BKPT_SOFT) {
3962 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3963 breakpoint->length);
3964 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, 0x%s",
3965 breakpoint->address,
3966 breakpoint->length,
3967 buf);
3968 free(buf);
3969 } else {
3970 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3971 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %u",
3972 breakpoint->asid,
3973 breakpoint->length, breakpoint->number);
3974 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3975 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3976 breakpoint->address,
3977 breakpoint->length, breakpoint->number);
3978 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3979 breakpoint->asid);
3980 } else
3981 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3982 breakpoint->address,
3983 breakpoint->length, breakpoint->number);
3986 breakpoint = breakpoint->next;
3988 return ERROR_OK;
3991 static int handle_bp_command_set(struct command_invocation *cmd,
3992 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3994 struct target *target = get_current_target(cmd->ctx);
3995 int retval;
3997 if (asid == 0) {
3998 retval = breakpoint_add(target, addr, length, hw);
3999 /* error is always logged in breakpoint_add(), do not print it again */
4000 if (retval == ERROR_OK)
4001 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
4003 } else if (addr == 0) {
4004 if (!target->type->add_context_breakpoint) {
4005 LOG_ERROR("Context breakpoint not available");
4006 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4008 retval = context_breakpoint_add(target, asid, length, hw);
4009 /* error is always logged in context_breakpoint_add(), do not print it again */
4010 if (retval == ERROR_OK)
4011 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
4013 } else {
4014 if (!target->type->add_hybrid_breakpoint) {
4015 LOG_ERROR("Hybrid breakpoint not available");
4016 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4018 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
4019 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
4020 if (retval == ERROR_OK)
4021 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
4023 return retval;
4026 COMMAND_HANDLER(handle_bp_command)
4028 target_addr_t addr;
4029 uint32_t asid;
4030 uint32_t length;
4031 int hw = BKPT_SOFT;
4033 switch (CMD_ARGC) {
4034 case 0:
4035 return handle_bp_command_list(CMD);
4037 case 2:
4038 asid = 0;
4039 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4040 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4041 return handle_bp_command_set(CMD, addr, asid, length, hw);
4043 case 3:
4044 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4045 hw = BKPT_HARD;
4046 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4047 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4048 asid = 0;
4049 return handle_bp_command_set(CMD, addr, asid, length, hw);
4050 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4051 hw = BKPT_HARD;
4052 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4053 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4054 addr = 0;
4055 return handle_bp_command_set(CMD, addr, asid, length, hw);
4057 /* fallthrough */
4058 case 4:
4059 hw = BKPT_HARD;
4060 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4061 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4062 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4063 return handle_bp_command_set(CMD, addr, asid, length, hw);
4065 default:
4066 return ERROR_COMMAND_SYNTAX_ERROR;
4070 COMMAND_HANDLER(handle_rbp_command)
4072 if (CMD_ARGC != 1)
4073 return ERROR_COMMAND_SYNTAX_ERROR;
4075 struct target *target = get_current_target(CMD_CTX);
4077 if (!strcmp(CMD_ARGV[0], "all")) {
4078 breakpoint_remove_all(target);
4079 } else {
4080 target_addr_t addr;
4081 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4083 breakpoint_remove(target, addr);
4086 return ERROR_OK;
4089 COMMAND_HANDLER(handle_wp_command)
4091 struct target *target = get_current_target(CMD_CTX);
4093 if (CMD_ARGC == 0) {
4094 struct watchpoint *watchpoint = target->watchpoints;
4096 while (watchpoint) {
4097 command_print(CMD, "address: " TARGET_ADDR_FMT
4098 ", len: 0x%8.8" PRIx32
4099 ", r/w/a: %i, value: 0x%8.8" PRIx32
4100 ", mask: 0x%8.8" PRIx32,
4101 watchpoint->address,
4102 watchpoint->length,
4103 (int)watchpoint->rw,
4104 watchpoint->value,
4105 watchpoint->mask);
4106 watchpoint = watchpoint->next;
4108 return ERROR_OK;
4111 enum watchpoint_rw type = WPT_ACCESS;
4112 target_addr_t addr = 0;
4113 uint32_t length = 0;
4114 uint32_t data_value = 0x0;
4115 uint32_t data_mask = 0xffffffff;
4117 switch (CMD_ARGC) {
4118 case 5:
4119 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4120 /* fall through */
4121 case 4:
4122 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4123 /* fall through */
4124 case 3:
4125 switch (CMD_ARGV[2][0]) {
4126 case 'r':
4127 type = WPT_READ;
4128 break;
4129 case 'w':
4130 type = WPT_WRITE;
4131 break;
4132 case 'a':
4133 type = WPT_ACCESS;
4134 break;
4135 default:
4136 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4137 return ERROR_COMMAND_SYNTAX_ERROR;
4139 /* fall through */
4140 case 2:
4141 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4142 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4143 break;
4145 default:
4146 return ERROR_COMMAND_SYNTAX_ERROR;
4149 int retval = watchpoint_add(target, addr, length, type,
4150 data_value, data_mask);
4151 if (retval != ERROR_OK)
4152 LOG_ERROR("Failure setting watchpoints");
4154 return retval;
4157 COMMAND_HANDLER(handle_rwp_command)
4159 if (CMD_ARGC != 1)
4160 return ERROR_COMMAND_SYNTAX_ERROR;
4162 target_addr_t addr;
4163 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4165 struct target *target = get_current_target(CMD_CTX);
4166 watchpoint_remove(target, addr);
4168 return ERROR_OK;
4172 * Translate a virtual address to a physical address.
4174 * The low-level target implementation must have logged a detailed error
4175 * which is forwarded to telnet/GDB session.
4177 COMMAND_HANDLER(handle_virt2phys_command)
4179 if (CMD_ARGC != 1)
4180 return ERROR_COMMAND_SYNTAX_ERROR;
4182 target_addr_t va;
4183 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4184 target_addr_t pa;
4186 struct target *target = get_current_target(CMD_CTX);
4187 int retval = target->type->virt2phys(target, va, &pa);
4188 if (retval == ERROR_OK)
4189 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4191 return retval;
4194 static void write_data(FILE *f, const void *data, size_t len)
4196 size_t written = fwrite(data, 1, len, f);
4197 if (written != len)
4198 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4201 static void write_long(FILE *f, int l, struct target *target)
4203 uint8_t val[4];
4205 target_buffer_set_u32(target, val, l);
4206 write_data(f, val, 4);
4209 static void write_string(FILE *f, char *s)
4211 write_data(f, s, strlen(s));
4214 typedef unsigned char UNIT[2]; /* unit of profiling */
4216 /* Dump a gmon.out histogram file. */
4217 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4218 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4220 uint32_t i;
4221 FILE *f = fopen(filename, "w");
4222 if (!f)
4223 return;
4224 write_string(f, "gmon");
4225 write_long(f, 0x00000001, target); /* Version */
4226 write_long(f, 0, target); /* padding */
4227 write_long(f, 0, target); /* padding */
4228 write_long(f, 0, target); /* padding */
4230 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4231 write_data(f, &zero, 1);
4233 /* figure out bucket size */
4234 uint32_t min;
4235 uint32_t max;
4236 if (with_range) {
4237 min = start_address;
4238 max = end_address;
4239 } else {
4240 min = samples[0];
4241 max = samples[0];
4242 for (i = 0; i < sample_num; i++) {
4243 if (min > samples[i])
4244 min = samples[i];
4245 if (max < samples[i])
4246 max = samples[i];
4249 /* max should be (largest sample + 1)
4250 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4251 if (max < UINT32_MAX)
4252 max++;
4254 /* gprof requires (max - min) >= 2 */
4255 while ((max - min) < 2) {
4256 if (max < UINT32_MAX)
4257 max++;
4258 else
4259 min--;
4263 uint32_t address_space = max - min;
4265 /* FIXME: What is the reasonable number of buckets?
4266 * The profiling result will be more accurate if there are enough buckets. */
4267 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4268 uint32_t num_buckets = address_space / sizeof(UNIT);
4269 if (num_buckets > max_buckets)
4270 num_buckets = max_buckets;
4271 int *buckets = malloc(sizeof(int) * num_buckets);
4272 if (!buckets) {
4273 fclose(f);
4274 return;
4276 memset(buckets, 0, sizeof(int) * num_buckets);
4277 for (i = 0; i < sample_num; i++) {
4278 uint32_t address = samples[i];
4280 if ((address < min) || (max <= address))
4281 continue;
4283 long long a = address - min;
4284 long long b = num_buckets;
4285 long long c = address_space;
4286 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4287 buckets[index_t]++;
4290 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4291 write_long(f, min, target); /* low_pc */
4292 write_long(f, max, target); /* high_pc */
4293 write_long(f, num_buckets, target); /* # of buckets */
4294 float sample_rate = sample_num / (duration_ms / 1000.0);
4295 write_long(f, sample_rate, target);
4296 write_string(f, "seconds");
4297 for (i = 0; i < (15-strlen("seconds")); i++)
4298 write_data(f, &zero, 1);
4299 write_string(f, "s");
4301 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4303 char *data = malloc(2 * num_buckets);
4304 if (data) {
4305 for (i = 0; i < num_buckets; i++) {
4306 int val;
4307 val = buckets[i];
4308 if (val > 65535)
4309 val = 65535;
4310 data[i * 2] = val&0xff;
4311 data[i * 2 + 1] = (val >> 8) & 0xff;
4313 free(buckets);
4314 write_data(f, data, num_buckets * 2);
4315 free(data);
4316 } else
4317 free(buckets);
4319 fclose(f);
4322 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4323 * which will be used as a random sampling of PC */
4324 COMMAND_HANDLER(handle_profile_command)
4326 struct target *target = get_current_target(CMD_CTX);
4328 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4329 return ERROR_COMMAND_SYNTAX_ERROR;
4331 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4332 uint32_t offset;
4333 uint32_t num_of_samples;
4334 int retval = ERROR_OK;
4335 bool halted_before_profiling = target->state == TARGET_HALTED;
4337 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4339 uint32_t start_address = 0;
4340 uint32_t end_address = 0;
4341 bool with_range = false;
4342 if (CMD_ARGC == 4) {
4343 with_range = true;
4344 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4345 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4346 if (start_address > end_address || (end_address - start_address) < 2) {
4347 command_print(CMD, "Error: end - start < 2");
4348 return ERROR_COMMAND_ARGUMENT_INVALID;
4352 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4353 if (!samples) {
4354 LOG_ERROR("No memory to store samples.");
4355 return ERROR_FAIL;
4358 uint64_t timestart_ms = timeval_ms();
4360 * Some cores let us sample the PC without the
4361 * annoying halt/resume step; for example, ARMv7 PCSR.
4362 * Provide a way to use that more efficient mechanism.
4364 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4365 &num_of_samples, offset);
4366 if (retval != ERROR_OK) {
4367 free(samples);
4368 return retval;
4370 uint32_t duration_ms = timeval_ms() - timestart_ms;
4372 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4374 retval = target_poll(target);
4375 if (retval != ERROR_OK) {
4376 free(samples);
4377 return retval;
4380 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4381 /* The target was halted before we started and is running now. Halt it,
4382 * for consistency. */
4383 retval = target_halt(target);
4384 if (retval != ERROR_OK) {
4385 free(samples);
4386 return retval;
4388 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4389 /* The target was running before we started and is halted now. Resume
4390 * it, for consistency. */
4391 retval = target_resume(target, 1, 0, 0, 0);
4392 if (retval != ERROR_OK) {
4393 free(samples);
4394 return retval;
4398 retval = target_poll(target);
4399 if (retval != ERROR_OK) {
4400 free(samples);
4401 return retval;
4404 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4405 with_range, start_address, end_address, target, duration_ms);
4406 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4408 free(samples);
4409 return retval;
4412 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4414 char *namebuf;
4415 Jim_Obj *obj_name, *obj_val;
4416 int result;
4418 namebuf = alloc_printf("%s(%d)", varname, idx);
4419 if (!namebuf)
4420 return JIM_ERR;
4422 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4423 jim_wide wide_val = val;
4424 obj_val = Jim_NewWideObj(interp, wide_val);
4425 if (!obj_name || !obj_val) {
4426 free(namebuf);
4427 return JIM_ERR;
4430 Jim_IncrRefCount(obj_name);
4431 Jim_IncrRefCount(obj_val);
4432 result = Jim_SetVariable(interp, obj_name, obj_val);
4433 Jim_DecrRefCount(interp, obj_name);
4434 Jim_DecrRefCount(interp, obj_val);
4435 free(namebuf);
4436 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4437 return result;
4440 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4442 int e;
4444 LOG_WARNING("DEPRECATED! use 'read_memory' not 'mem2array'");
4446 /* argv[0] = name of array to receive the data
4447 * argv[1] = desired element width in bits
4448 * argv[2] = memory address
4449 * argv[3] = count of times to read
4450 * argv[4] = optional "phys"
4452 if (argc < 4 || argc > 5) {
4453 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4454 return JIM_ERR;
4457 /* Arg 0: Name of the array variable */
4458 const char *varname = Jim_GetString(argv[0], NULL);
4460 /* Arg 1: Bit width of one element */
4461 long l;
4462 e = Jim_GetLong(interp, argv[1], &l);
4463 if (e != JIM_OK)
4464 return e;
4465 const unsigned int width_bits = l;
4467 if (width_bits != 8 &&
4468 width_bits != 16 &&
4469 width_bits != 32 &&
4470 width_bits != 64) {
4471 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4472 Jim_AppendStrings(interp, Jim_GetResult(interp),
4473 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4474 return JIM_ERR;
4476 const unsigned int width = width_bits / 8;
4478 /* Arg 2: Memory address */
4479 jim_wide wide_addr;
4480 e = Jim_GetWide(interp, argv[2], &wide_addr);
4481 if (e != JIM_OK)
4482 return e;
4483 target_addr_t addr = (target_addr_t)wide_addr;
4485 /* Arg 3: Number of elements to read */
4486 e = Jim_GetLong(interp, argv[3], &l);
4487 if (e != JIM_OK)
4488 return e;
4489 size_t len = l;
4491 /* Arg 4: phys */
4492 bool is_phys = false;
4493 if (argc > 4) {
4494 int str_len = 0;
4495 const char *phys = Jim_GetString(argv[4], &str_len);
4496 if (!strncmp(phys, "phys", str_len))
4497 is_phys = true;
4498 else
4499 return JIM_ERR;
4502 /* Argument checks */
4503 if (len == 0) {
4504 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4505 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4506 return JIM_ERR;
4508 if ((addr + (len * width)) < addr) {
4509 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4510 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4511 return JIM_ERR;
4513 if (len > 65536) {
4514 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4515 Jim_AppendStrings(interp, Jim_GetResult(interp),
4516 "mem2array: too large read request, exceeds 64K items", NULL);
4517 return JIM_ERR;
4520 if ((width == 1) ||
4521 ((width == 2) && ((addr & 1) == 0)) ||
4522 ((width == 4) && ((addr & 3) == 0)) ||
4523 ((width == 8) && ((addr & 7) == 0))) {
4524 /* alignment correct */
4525 } else {
4526 char buf[100];
4527 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4528 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4529 addr,
4530 width);
4531 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4532 return JIM_ERR;
4535 /* Transfer loop */
4537 /* index counter */
4538 size_t idx = 0;
4540 const size_t buffersize = 4096;
4541 uint8_t *buffer = malloc(buffersize);
4542 if (!buffer)
4543 return JIM_ERR;
4545 /* assume ok */
4546 e = JIM_OK;
4547 while (len) {
4548 /* Slurp... in buffer size chunks */
4549 const unsigned int max_chunk_len = buffersize / width;
4550 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4552 int retval;
4553 if (is_phys)
4554 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4555 else
4556 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4557 if (retval != ERROR_OK) {
4558 /* BOO !*/
4559 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4560 addr,
4561 width,
4562 chunk_len);
4563 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4564 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4565 e = JIM_ERR;
4566 break;
4567 } else {
4568 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4569 uint64_t v = 0;
4570 switch (width) {
4571 case 8:
4572 v = target_buffer_get_u64(target, &buffer[i*width]);
4573 break;
4574 case 4:
4575 v = target_buffer_get_u32(target, &buffer[i*width]);
4576 break;
4577 case 2:
4578 v = target_buffer_get_u16(target, &buffer[i*width]);
4579 break;
4580 case 1:
4581 v = buffer[i] & 0x0ff;
4582 break;
4584 new_u64_array_element(interp, varname, idx, v);
4586 len -= chunk_len;
4587 addr += chunk_len * width;
4591 free(buffer);
4593 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4595 return e;
4598 COMMAND_HANDLER(handle_target_read_memory)
4601 * CMD_ARGV[0] = memory address
4602 * CMD_ARGV[1] = desired element width in bits
4603 * CMD_ARGV[2] = number of elements to read
4604 * CMD_ARGV[3] = optional "phys"
4607 if (CMD_ARGC < 3 || CMD_ARGC > 4)
4608 return ERROR_COMMAND_SYNTAX_ERROR;
4610 /* Arg 1: Memory address. */
4611 target_addr_t addr;
4612 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], addr);
4614 /* Arg 2: Bit width of one element. */
4615 unsigned int width_bits;
4616 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], width_bits);
4618 /* Arg 3: Number of elements to read. */
4619 unsigned int count;
4620 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
4622 /* Arg 4: Optional 'phys'. */
4623 bool is_phys = false;
4624 if (CMD_ARGC == 4) {
4625 if (strcmp(CMD_ARGV[3], "phys")) {
4626 command_print(CMD, "invalid argument '%s', must be 'phys'", CMD_ARGV[3]);
4627 return ERROR_COMMAND_ARGUMENT_INVALID;
4630 is_phys = true;
4633 switch (width_bits) {
4634 case 8:
4635 case 16:
4636 case 32:
4637 case 64:
4638 break;
4639 default:
4640 command_print(CMD, "invalid width, must be 8, 16, 32 or 64");
4641 return ERROR_COMMAND_ARGUMENT_INVALID;
4644 const unsigned int width = width_bits / 8;
4646 if ((addr + (count * width)) < addr) {
4647 command_print(CMD, "read_memory: addr + count wraps to zero");
4648 return ERROR_COMMAND_ARGUMENT_INVALID;
4651 if (count > 65536) {
4652 command_print(CMD, "read_memory: too large read request, exceeds 64K elements");
4653 return ERROR_COMMAND_ARGUMENT_INVALID;
4656 struct target *target = get_current_target(CMD_CTX);
4658 const size_t buffersize = 4096;
4659 uint8_t *buffer = malloc(buffersize);
4661 if (!buffer) {
4662 LOG_ERROR("Failed to allocate memory");
4663 return ERROR_FAIL;
4666 char *separator = "";
4667 while (count > 0) {
4668 const unsigned int max_chunk_len = buffersize / width;
4669 const size_t chunk_len = MIN(count, max_chunk_len);
4671 int retval;
4673 if (is_phys)
4674 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4675 else
4676 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4678 if (retval != ERROR_OK) {
4679 LOG_DEBUG("read_memory: read at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
4680 addr, width_bits, chunk_len);
4682 * FIXME: we append the errmsg to the list of value already read.
4683 * Add a way to flush and replace old output, but LOG_DEBUG() it
4685 command_print(CMD, "read_memory: failed to read memory");
4686 free(buffer);
4687 return retval;
4690 for (size_t i = 0; i < chunk_len ; i++) {
4691 uint64_t v = 0;
4693 switch (width) {
4694 case 8:
4695 v = target_buffer_get_u64(target, &buffer[i * width]);
4696 break;
4697 case 4:
4698 v = target_buffer_get_u32(target, &buffer[i * width]);
4699 break;
4700 case 2:
4701 v = target_buffer_get_u16(target, &buffer[i * width]);
4702 break;
4703 case 1:
4704 v = buffer[i];
4705 break;
4708 command_print_sameline(CMD, "%s0x%" PRIx64, separator, v);
4709 separator = " ";
4712 count -= chunk_len;
4713 addr += chunk_len * width;
4716 free(buffer);
4718 return ERROR_OK;
4721 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4723 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4724 if (!namebuf)
4725 return JIM_ERR;
4727 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4728 if (!obj_name) {
4729 free(namebuf);
4730 return JIM_ERR;
4733 Jim_IncrRefCount(obj_name);
4734 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4735 Jim_DecrRefCount(interp, obj_name);
4736 free(namebuf);
4737 if (!obj_val)
4738 return JIM_ERR;
4740 jim_wide wide_val;
4741 int result = Jim_GetWide(interp, obj_val, &wide_val);
4742 *val = wide_val;
4743 return result;
4746 static int target_array2mem(Jim_Interp *interp, struct target *target,
4747 int argc, Jim_Obj *const *argv)
4749 int e;
4751 LOG_WARNING("DEPRECATED! use 'write_memory' not 'array2mem'");
4753 /* argv[0] = name of array from which to read the data
4754 * argv[1] = desired element width in bits
4755 * argv[2] = memory address
4756 * argv[3] = number of elements to write
4757 * argv[4] = optional "phys"
4759 if (argc < 4 || argc > 5) {
4760 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4761 return JIM_ERR;
4764 /* Arg 0: Name of the array variable */
4765 const char *varname = Jim_GetString(argv[0], NULL);
4767 /* Arg 1: Bit width of one element */
4768 long l;
4769 e = Jim_GetLong(interp, argv[1], &l);
4770 if (e != JIM_OK)
4771 return e;
4772 const unsigned int width_bits = l;
4774 if (width_bits != 8 &&
4775 width_bits != 16 &&
4776 width_bits != 32 &&
4777 width_bits != 64) {
4778 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4779 Jim_AppendStrings(interp, Jim_GetResult(interp),
4780 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4781 return JIM_ERR;
4783 const unsigned int width = width_bits / 8;
4785 /* Arg 2: Memory address */
4786 jim_wide wide_addr;
4787 e = Jim_GetWide(interp, argv[2], &wide_addr);
4788 if (e != JIM_OK)
4789 return e;
4790 target_addr_t addr = (target_addr_t)wide_addr;
4792 /* Arg 3: Number of elements to write */
4793 e = Jim_GetLong(interp, argv[3], &l);
4794 if (e != JIM_OK)
4795 return e;
4796 size_t len = l;
4798 /* Arg 4: Phys */
4799 bool is_phys = false;
4800 if (argc > 4) {
4801 int str_len = 0;
4802 const char *phys = Jim_GetString(argv[4], &str_len);
4803 if (!strncmp(phys, "phys", str_len))
4804 is_phys = true;
4805 else
4806 return JIM_ERR;
4809 /* Argument checks */
4810 if (len == 0) {
4811 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4812 Jim_AppendStrings(interp, Jim_GetResult(interp),
4813 "array2mem: zero width read?", NULL);
4814 return JIM_ERR;
4817 if ((addr + (len * width)) < addr) {
4818 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4819 Jim_AppendStrings(interp, Jim_GetResult(interp),
4820 "array2mem: addr + len - wraps to zero?", NULL);
4821 return JIM_ERR;
4824 if (len > 65536) {
4825 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4826 Jim_AppendStrings(interp, Jim_GetResult(interp),
4827 "array2mem: too large memory write request, exceeds 64K items", NULL);
4828 return JIM_ERR;
4831 if ((width == 1) ||
4832 ((width == 2) && ((addr & 1) == 0)) ||
4833 ((width == 4) && ((addr & 3) == 0)) ||
4834 ((width == 8) && ((addr & 7) == 0))) {
4835 /* alignment correct */
4836 } else {
4837 char buf[100];
4838 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4839 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4840 addr,
4841 width);
4842 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4843 return JIM_ERR;
4846 /* Transfer loop */
4848 /* assume ok */
4849 e = JIM_OK;
4851 const size_t buffersize = 4096;
4852 uint8_t *buffer = malloc(buffersize);
4853 if (!buffer)
4854 return JIM_ERR;
4856 /* index counter */
4857 size_t idx = 0;
4859 while (len) {
4860 /* Slurp... in buffer size chunks */
4861 const unsigned int max_chunk_len = buffersize / width;
4863 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4865 /* Fill the buffer */
4866 for (size_t i = 0; i < chunk_len; i++, idx++) {
4867 uint64_t v = 0;
4868 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4869 free(buffer);
4870 return JIM_ERR;
4872 switch (width) {
4873 case 8:
4874 target_buffer_set_u64(target, &buffer[i * width], v);
4875 break;
4876 case 4:
4877 target_buffer_set_u32(target, &buffer[i * width], v);
4878 break;
4879 case 2:
4880 target_buffer_set_u16(target, &buffer[i * width], v);
4881 break;
4882 case 1:
4883 buffer[i] = v & 0x0ff;
4884 break;
4887 len -= chunk_len;
4889 /* Write the buffer to memory */
4890 int retval;
4891 if (is_phys)
4892 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4893 else
4894 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4895 if (retval != ERROR_OK) {
4896 /* BOO !*/
4897 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4898 addr,
4899 width,
4900 chunk_len);
4901 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4902 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4903 e = JIM_ERR;
4904 break;
4906 addr += chunk_len * width;
4909 free(buffer);
4911 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4913 return e;
4916 static int target_jim_write_memory(Jim_Interp *interp, int argc,
4917 Jim_Obj * const *argv)
4920 * argv[1] = memory address
4921 * argv[2] = desired element width in bits
4922 * argv[3] = list of data to write
4923 * argv[4] = optional "phys"
4926 if (argc < 4 || argc > 5) {
4927 Jim_WrongNumArgs(interp, 1, argv, "address width data ['phys']");
4928 return JIM_ERR;
4931 /* Arg 1: Memory address. */
4932 int e;
4933 jim_wide wide_addr;
4934 e = Jim_GetWide(interp, argv[1], &wide_addr);
4936 if (e != JIM_OK)
4937 return e;
4939 target_addr_t addr = (target_addr_t)wide_addr;
4941 /* Arg 2: Bit width of one element. */
4942 long l;
4943 e = Jim_GetLong(interp, argv[2], &l);
4945 if (e != JIM_OK)
4946 return e;
4948 const unsigned int width_bits = l;
4949 size_t count = Jim_ListLength(interp, argv[3]);
4951 /* Arg 4: Optional 'phys'. */
4952 bool is_phys = false;
4954 if (argc > 4) {
4955 const char *phys = Jim_GetString(argv[4], NULL);
4957 if (strcmp(phys, "phys")) {
4958 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4959 return JIM_ERR;
4962 is_phys = true;
4965 switch (width_bits) {
4966 case 8:
4967 case 16:
4968 case 32:
4969 case 64:
4970 break;
4971 default:
4972 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4973 return JIM_ERR;
4976 const unsigned int width = width_bits / 8;
4978 if ((addr + (count * width)) < addr) {
4979 Jim_SetResultString(interp, "write_memory: addr + len wraps to zero", -1);
4980 return JIM_ERR;
4983 if (count > 65536) {
4984 Jim_SetResultString(interp, "write_memory: too large memory write request, exceeds 64K elements", -1);
4985 return JIM_ERR;
4988 struct command_context *cmd_ctx = current_command_context(interp);
4989 assert(cmd_ctx != NULL);
4990 struct target *target = get_current_target(cmd_ctx);
4992 const size_t buffersize = 4096;
4993 uint8_t *buffer = malloc(buffersize);
4995 if (!buffer) {
4996 LOG_ERROR("Failed to allocate memory");
4997 return JIM_ERR;
5000 size_t j = 0;
5002 while (count > 0) {
5003 const unsigned int max_chunk_len = buffersize / width;
5004 const size_t chunk_len = MIN(count, max_chunk_len);
5006 for (size_t i = 0; i < chunk_len; i++, j++) {
5007 Jim_Obj *tmp = Jim_ListGetIndex(interp, argv[3], j);
5008 jim_wide element_wide;
5009 Jim_GetWide(interp, tmp, &element_wide);
5011 const uint64_t v = element_wide;
5013 switch (width) {
5014 case 8:
5015 target_buffer_set_u64(target, &buffer[i * width], v);
5016 break;
5017 case 4:
5018 target_buffer_set_u32(target, &buffer[i * width], v);
5019 break;
5020 case 2:
5021 target_buffer_set_u16(target, &buffer[i * width], v);
5022 break;
5023 case 1:
5024 buffer[i] = v & 0x0ff;
5025 break;
5029 count -= chunk_len;
5031 int retval;
5033 if (is_phys)
5034 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
5035 else
5036 retval = target_write_memory(target, addr, width, chunk_len, buffer);
5038 if (retval != ERROR_OK) {
5039 LOG_ERROR("write_memory: write at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
5040 addr, width_bits, chunk_len);
5041 Jim_SetResultString(interp, "write_memory: failed to write memory", -1);
5042 e = JIM_ERR;
5043 break;
5046 addr += chunk_len * width;
5049 free(buffer);
5051 return e;
5054 /* FIX? should we propagate errors here rather than printing them
5055 * and continuing?
5057 void target_handle_event(struct target *target, enum target_event e)
5059 struct target_event_action *teap;
5060 int retval;
5062 for (teap = target->event_action; teap; teap = teap->next) {
5063 if (teap->event == e) {
5064 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
5065 target->target_number,
5066 target_name(target),
5067 target_type_name(target),
5069 target_event_name(e),
5070 Jim_GetString(teap->body, NULL));
5072 /* Override current target by the target an event
5073 * is issued from (lot of scripts need it).
5074 * Return back to previous override as soon
5075 * as the handler processing is done */
5076 struct command_context *cmd_ctx = current_command_context(teap->interp);
5077 struct target *saved_target_override = cmd_ctx->current_target_override;
5078 cmd_ctx->current_target_override = target;
5080 retval = Jim_EvalObj(teap->interp, teap->body);
5082 cmd_ctx->current_target_override = saved_target_override;
5084 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
5085 return;
5087 if (retval == JIM_RETURN)
5088 retval = teap->interp->returnCode;
5090 if (retval != JIM_OK) {
5091 Jim_MakeErrorMessage(teap->interp);
5092 LOG_USER("Error executing event %s on target %s:\n%s",
5093 target_event_name(e),
5094 target_name(target),
5095 Jim_GetString(Jim_GetResult(teap->interp), NULL));
5096 /* clean both error code and stacktrace before return */
5097 Jim_Eval(teap->interp, "error \"\" \"\"");
5103 static int target_jim_get_reg(Jim_Interp *interp, int argc,
5104 Jim_Obj * const *argv)
5106 bool force = false;
5108 if (argc == 3) {
5109 const char *option = Jim_GetString(argv[1], NULL);
5111 if (!strcmp(option, "-force")) {
5112 argc--;
5113 argv++;
5114 force = true;
5115 } else {
5116 Jim_SetResultFormatted(interp, "invalid option '%s'", option);
5117 return JIM_ERR;
5121 if (argc != 2) {
5122 Jim_WrongNumArgs(interp, 1, argv, "[-force] list");
5123 return JIM_ERR;
5126 const int length = Jim_ListLength(interp, argv[1]);
5128 Jim_Obj *result_dict = Jim_NewDictObj(interp, NULL, 0);
5130 if (!result_dict)
5131 return JIM_ERR;
5133 struct command_context *cmd_ctx = current_command_context(interp);
5134 assert(cmd_ctx != NULL);
5135 const struct target *target = get_current_target(cmd_ctx);
5137 for (int i = 0; i < length; i++) {
5138 Jim_Obj *elem = Jim_ListGetIndex(interp, argv[1], i);
5140 if (!elem)
5141 return JIM_ERR;
5143 const char *reg_name = Jim_String(elem);
5145 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5146 false);
5148 if (!reg || !reg->exist) {
5149 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5150 return JIM_ERR;
5153 if (force) {
5154 int retval = reg->type->get(reg);
5156 if (retval != ERROR_OK) {
5157 Jim_SetResultFormatted(interp, "failed to read register '%s'",
5158 reg_name);
5159 return JIM_ERR;
5163 char *reg_value = buf_to_hex_str(reg->value, reg->size);
5165 if (!reg_value) {
5166 LOG_ERROR("Failed to allocate memory");
5167 return JIM_ERR;
5170 char *tmp = alloc_printf("0x%s", reg_value);
5172 free(reg_value);
5174 if (!tmp) {
5175 LOG_ERROR("Failed to allocate memory");
5176 return JIM_ERR;
5179 Jim_DictAddElement(interp, result_dict, elem,
5180 Jim_NewStringObj(interp, tmp, -1));
5182 free(tmp);
5185 Jim_SetResult(interp, result_dict);
5187 return JIM_OK;
5190 static int target_jim_set_reg(Jim_Interp *interp, int argc,
5191 Jim_Obj * const *argv)
5193 if (argc != 2) {
5194 Jim_WrongNumArgs(interp, 1, argv, "dict");
5195 return JIM_ERR;
5198 int tmp;
5199 #if JIM_VERSION >= 80
5200 Jim_Obj **dict = Jim_DictPairs(interp, argv[1], &tmp);
5202 if (!dict)
5203 return JIM_ERR;
5204 #else
5205 Jim_Obj **dict;
5206 int ret = Jim_DictPairs(interp, argv[1], &dict, &tmp);
5208 if (ret != JIM_OK)
5209 return ret;
5210 #endif
5212 const unsigned int length = tmp;
5213 struct command_context *cmd_ctx = current_command_context(interp);
5214 assert(cmd_ctx);
5215 const struct target *target = get_current_target(cmd_ctx);
5217 for (unsigned int i = 0; i < length; i += 2) {
5218 const char *reg_name = Jim_String(dict[i]);
5219 const char *reg_value = Jim_String(dict[i + 1]);
5220 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5221 false);
5223 if (!reg || !reg->exist) {
5224 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5225 return JIM_ERR;
5228 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
5230 if (!buf) {
5231 LOG_ERROR("Failed to allocate memory");
5232 return JIM_ERR;
5235 str_to_buf(reg_value, strlen(reg_value), buf, reg->size, 0);
5236 int retval = reg->type->set(reg, buf);
5237 free(buf);
5239 if (retval != ERROR_OK) {
5240 Jim_SetResultFormatted(interp, "failed to set '%s' to register '%s'",
5241 reg_value, reg_name);
5242 return JIM_ERR;
5246 return JIM_OK;
5250 * Returns true only if the target has a handler for the specified event.
5252 bool target_has_event_action(struct target *target, enum target_event event)
5254 struct target_event_action *teap;
5256 for (teap = target->event_action; teap; teap = teap->next) {
5257 if (teap->event == event)
5258 return true;
5260 return false;
5263 enum target_cfg_param {
5264 TCFG_TYPE,
5265 TCFG_EVENT,
5266 TCFG_WORK_AREA_VIRT,
5267 TCFG_WORK_AREA_PHYS,
5268 TCFG_WORK_AREA_SIZE,
5269 TCFG_WORK_AREA_BACKUP,
5270 TCFG_ENDIAN,
5271 TCFG_COREID,
5272 TCFG_CHAIN_POSITION,
5273 TCFG_DBGBASE,
5274 TCFG_RTOS,
5275 TCFG_DEFER_EXAMINE,
5276 TCFG_GDB_PORT,
5277 TCFG_GDB_MAX_CONNECTIONS,
5280 static struct jim_nvp nvp_config_opts[] = {
5281 { .name = "-type", .value = TCFG_TYPE },
5282 { .name = "-event", .value = TCFG_EVENT },
5283 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
5284 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
5285 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
5286 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
5287 { .name = "-endian", .value = TCFG_ENDIAN },
5288 { .name = "-coreid", .value = TCFG_COREID },
5289 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
5290 { .name = "-dbgbase", .value = TCFG_DBGBASE },
5291 { .name = "-rtos", .value = TCFG_RTOS },
5292 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
5293 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
5294 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
5295 { .name = NULL, .value = -1 }
5298 static int target_configure(struct jim_getopt_info *goi, struct target *target)
5300 struct jim_nvp *n;
5301 Jim_Obj *o;
5302 jim_wide w;
5303 int e;
5305 /* parse config or cget options ... */
5306 while (goi->argc > 0) {
5307 Jim_SetEmptyResult(goi->interp);
5308 /* jim_getopt_debug(goi); */
5310 if (target->type->target_jim_configure) {
5311 /* target defines a configure function */
5312 /* target gets first dibs on parameters */
5313 e = (*(target->type->target_jim_configure))(target, goi);
5314 if (e == JIM_OK) {
5315 /* more? */
5316 continue;
5318 if (e == JIM_ERR) {
5319 /* An error */
5320 return e;
5322 /* otherwise we 'continue' below */
5324 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
5325 if (e != JIM_OK) {
5326 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
5327 return e;
5329 switch (n->value) {
5330 case TCFG_TYPE:
5331 /* not settable */
5332 if (goi->isconfigure) {
5333 Jim_SetResultFormatted(goi->interp,
5334 "not settable: %s", n->name);
5335 return JIM_ERR;
5336 } else {
5337 no_params:
5338 if (goi->argc != 0) {
5339 Jim_WrongNumArgs(goi->interp,
5340 goi->argc, goi->argv,
5341 "NO PARAMS");
5342 return JIM_ERR;
5345 Jim_SetResultString(goi->interp,
5346 target_type_name(target), -1);
5347 /* loop for more */
5348 break;
5349 case TCFG_EVENT:
5350 if (goi->argc == 0) {
5351 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
5352 return JIM_ERR;
5355 e = jim_getopt_nvp(goi, nvp_target_event, &n);
5356 if (e != JIM_OK) {
5357 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
5358 return e;
5361 if (goi->isconfigure) {
5362 if (goi->argc != 1) {
5363 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
5364 return JIM_ERR;
5366 } else {
5367 if (goi->argc != 0) {
5368 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
5369 return JIM_ERR;
5374 struct target_event_action *teap;
5376 teap = target->event_action;
5377 /* replace existing? */
5378 while (teap) {
5379 if (teap->event == (enum target_event)n->value)
5380 break;
5381 teap = teap->next;
5384 if (goi->isconfigure) {
5385 /* START_DEPRECATED_TPIU */
5386 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5387 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5388 /* END_DEPRECATED_TPIU */
5390 bool replace = true;
5391 if (!teap) {
5392 /* create new */
5393 teap = calloc(1, sizeof(*teap));
5394 replace = false;
5396 teap->event = n->value;
5397 teap->interp = goi->interp;
5398 jim_getopt_obj(goi, &o);
5399 if (teap->body)
5400 Jim_DecrRefCount(teap->interp, teap->body);
5401 teap->body = Jim_DuplicateObj(goi->interp, o);
5403 * FIXME:
5404 * Tcl/TK - "tk events" have a nice feature.
5405 * See the "BIND" command.
5406 * We should support that here.
5407 * You can specify %X and %Y in the event code.
5408 * The idea is: %T - target name.
5409 * The idea is: %N - target number
5410 * The idea is: %E - event name.
5412 Jim_IncrRefCount(teap->body);
5414 if (!replace) {
5415 /* add to head of event list */
5416 teap->next = target->event_action;
5417 target->event_action = teap;
5419 Jim_SetEmptyResult(goi->interp);
5420 } else {
5421 /* get */
5422 if (!teap)
5423 Jim_SetEmptyResult(goi->interp);
5424 else
5425 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5428 /* loop for more */
5429 break;
5431 case TCFG_WORK_AREA_VIRT:
5432 if (goi->isconfigure) {
5433 target_free_all_working_areas(target);
5434 e = jim_getopt_wide(goi, &w);
5435 if (e != JIM_OK)
5436 return e;
5437 target->working_area_virt = w;
5438 target->working_area_virt_spec = true;
5439 } else {
5440 if (goi->argc != 0)
5441 goto no_params;
5443 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5444 /* loop for more */
5445 break;
5447 case TCFG_WORK_AREA_PHYS:
5448 if (goi->isconfigure) {
5449 target_free_all_working_areas(target);
5450 e = jim_getopt_wide(goi, &w);
5451 if (e != JIM_OK)
5452 return e;
5453 target->working_area_phys = w;
5454 target->working_area_phys_spec = true;
5455 } else {
5456 if (goi->argc != 0)
5457 goto no_params;
5459 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5460 /* loop for more */
5461 break;
5463 case TCFG_WORK_AREA_SIZE:
5464 if (goi->isconfigure) {
5465 target_free_all_working_areas(target);
5466 e = jim_getopt_wide(goi, &w);
5467 if (e != JIM_OK)
5468 return e;
5469 target->working_area_size = w;
5470 } else {
5471 if (goi->argc != 0)
5472 goto no_params;
5474 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5475 /* loop for more */
5476 break;
5478 case TCFG_WORK_AREA_BACKUP:
5479 if (goi->isconfigure) {
5480 target_free_all_working_areas(target);
5481 e = jim_getopt_wide(goi, &w);
5482 if (e != JIM_OK)
5483 return e;
5484 /* make this exactly 1 or 0 */
5485 target->backup_working_area = (!!w);
5486 } else {
5487 if (goi->argc != 0)
5488 goto no_params;
5490 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5491 /* loop for more e*/
5492 break;
5495 case TCFG_ENDIAN:
5496 if (goi->isconfigure) {
5497 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5498 if (e != JIM_OK) {
5499 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5500 return e;
5502 target->endianness = n->value;
5503 } else {
5504 if (goi->argc != 0)
5505 goto no_params;
5507 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5508 if (!n->name) {
5509 target->endianness = TARGET_LITTLE_ENDIAN;
5510 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5512 Jim_SetResultString(goi->interp, n->name, -1);
5513 /* loop for more */
5514 break;
5516 case TCFG_COREID:
5517 if (goi->isconfigure) {
5518 e = jim_getopt_wide(goi, &w);
5519 if (e != JIM_OK)
5520 return e;
5521 target->coreid = (int32_t)w;
5522 } else {
5523 if (goi->argc != 0)
5524 goto no_params;
5526 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5527 /* loop for more */
5528 break;
5530 case TCFG_CHAIN_POSITION:
5531 if (goi->isconfigure) {
5532 Jim_Obj *o_t;
5533 struct jtag_tap *tap;
5535 if (target->has_dap) {
5536 Jim_SetResultString(goi->interp,
5537 "target requires -dap parameter instead of -chain-position!", -1);
5538 return JIM_ERR;
5541 target_free_all_working_areas(target);
5542 e = jim_getopt_obj(goi, &o_t);
5543 if (e != JIM_OK)
5544 return e;
5545 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5546 if (!tap)
5547 return JIM_ERR;
5548 target->tap = tap;
5549 target->tap_configured = true;
5550 } else {
5551 if (goi->argc != 0)
5552 goto no_params;
5554 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5555 /* loop for more e*/
5556 break;
5557 case TCFG_DBGBASE:
5558 if (goi->isconfigure) {
5559 e = jim_getopt_wide(goi, &w);
5560 if (e != JIM_OK)
5561 return e;
5562 target->dbgbase = (uint32_t)w;
5563 target->dbgbase_set = true;
5564 } else {
5565 if (goi->argc != 0)
5566 goto no_params;
5568 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5569 /* loop for more */
5570 break;
5571 case TCFG_RTOS:
5572 /* RTOS */
5574 int result = rtos_create(goi, target);
5575 if (result != JIM_OK)
5576 return result;
5578 /* loop for more */
5579 break;
5581 case TCFG_DEFER_EXAMINE:
5582 /* DEFER_EXAMINE */
5583 target->defer_examine = true;
5584 /* loop for more */
5585 break;
5587 case TCFG_GDB_PORT:
5588 if (goi->isconfigure) {
5589 struct command_context *cmd_ctx = current_command_context(goi->interp);
5590 if (cmd_ctx->mode != COMMAND_CONFIG) {
5591 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5592 return JIM_ERR;
5595 const char *s;
5596 e = jim_getopt_string(goi, &s, NULL);
5597 if (e != JIM_OK)
5598 return e;
5599 free(target->gdb_port_override);
5600 target->gdb_port_override = strdup(s);
5601 } else {
5602 if (goi->argc != 0)
5603 goto no_params;
5605 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5606 /* loop for more */
5607 break;
5609 case TCFG_GDB_MAX_CONNECTIONS:
5610 if (goi->isconfigure) {
5611 struct command_context *cmd_ctx = current_command_context(goi->interp);
5612 if (cmd_ctx->mode != COMMAND_CONFIG) {
5613 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5614 return JIM_ERR;
5617 e = jim_getopt_wide(goi, &w);
5618 if (e != JIM_OK)
5619 return e;
5620 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5621 } else {
5622 if (goi->argc != 0)
5623 goto no_params;
5625 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5626 break;
5628 } /* while (goi->argc) */
5631 /* done - we return */
5632 return JIM_OK;
5635 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5637 struct command *c = jim_to_command(interp);
5638 struct jim_getopt_info goi;
5640 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5641 goi.isconfigure = !strcmp(c->name, "configure");
5642 if (goi.argc < 1) {
5643 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5644 "missing: -option ...");
5645 return JIM_ERR;
5647 struct command_context *cmd_ctx = current_command_context(interp);
5648 assert(cmd_ctx);
5649 struct target *target = get_current_target(cmd_ctx);
5650 return target_configure(&goi, target);
5653 static int jim_target_mem2array(Jim_Interp *interp,
5654 int argc, Jim_Obj *const *argv)
5656 struct command_context *cmd_ctx = current_command_context(interp);
5657 assert(cmd_ctx);
5658 struct target *target = get_current_target(cmd_ctx);
5659 return target_mem2array(interp, target, argc - 1, argv + 1);
5662 static int jim_target_array2mem(Jim_Interp *interp,
5663 int argc, Jim_Obj *const *argv)
5665 struct command_context *cmd_ctx = current_command_context(interp);
5666 assert(cmd_ctx);
5667 struct target *target = get_current_target(cmd_ctx);
5668 return target_array2mem(interp, target, argc - 1, argv + 1);
5671 static int jim_target_tap_disabled(Jim_Interp *interp)
5673 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5674 return JIM_ERR;
5677 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5679 bool allow_defer = false;
5681 struct jim_getopt_info goi;
5682 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5683 if (goi.argc > 1) {
5684 const char *cmd_name = Jim_GetString(argv[0], NULL);
5685 Jim_SetResultFormatted(goi.interp,
5686 "usage: %s ['allow-defer']", cmd_name);
5687 return JIM_ERR;
5689 if (goi.argc > 0 &&
5690 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5691 /* consume it */
5692 Jim_Obj *obj;
5693 int e = jim_getopt_obj(&goi, &obj);
5694 if (e != JIM_OK)
5695 return e;
5696 allow_defer = true;
5699 struct command_context *cmd_ctx = current_command_context(interp);
5700 assert(cmd_ctx);
5701 struct target *target = get_current_target(cmd_ctx);
5702 if (!target->tap->enabled)
5703 return jim_target_tap_disabled(interp);
5705 if (allow_defer && target->defer_examine) {
5706 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5707 LOG_INFO("Use arp_examine command to examine it manually!");
5708 return JIM_OK;
5711 int e = target->type->examine(target);
5712 if (e != ERROR_OK) {
5713 target_reset_examined(target);
5714 return JIM_ERR;
5717 target_set_examined(target);
5719 return JIM_OK;
5722 COMMAND_HANDLER(handle_target_was_examined)
5724 if (CMD_ARGC != 0)
5725 return ERROR_COMMAND_SYNTAX_ERROR;
5727 struct target *target = get_current_target(CMD_CTX);
5729 command_print(CMD, "%d", target_was_examined(target) ? 1 : 0);
5731 return ERROR_OK;
5734 COMMAND_HANDLER(handle_target_examine_deferred)
5736 if (CMD_ARGC != 0)
5737 return ERROR_COMMAND_SYNTAX_ERROR;
5739 struct target *target = get_current_target(CMD_CTX);
5741 command_print(CMD, "%d", target->defer_examine ? 1 : 0);
5743 return ERROR_OK;
5746 COMMAND_HANDLER(handle_target_halt_gdb)
5748 if (CMD_ARGC != 0)
5749 return ERROR_COMMAND_SYNTAX_ERROR;
5751 struct target *target = get_current_target(CMD_CTX);
5753 return target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
5756 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5758 if (argc != 1) {
5759 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5760 return JIM_ERR;
5762 struct command_context *cmd_ctx = current_command_context(interp);
5763 assert(cmd_ctx);
5764 struct target *target = get_current_target(cmd_ctx);
5765 if (!target->tap->enabled)
5766 return jim_target_tap_disabled(interp);
5768 int e;
5769 if (!(target_was_examined(target)))
5770 e = ERROR_TARGET_NOT_EXAMINED;
5771 else
5772 e = target->type->poll(target);
5773 if (e != ERROR_OK)
5774 return JIM_ERR;
5775 return JIM_OK;
5778 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5780 struct jim_getopt_info goi;
5781 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5783 if (goi.argc != 2) {
5784 Jim_WrongNumArgs(interp, 0, argv,
5785 "([tT]|[fF]|assert|deassert) BOOL");
5786 return JIM_ERR;
5789 struct jim_nvp *n;
5790 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5791 if (e != JIM_OK) {
5792 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5793 return e;
5795 /* the halt or not param */
5796 jim_wide a;
5797 e = jim_getopt_wide(&goi, &a);
5798 if (e != JIM_OK)
5799 return e;
5801 struct command_context *cmd_ctx = current_command_context(interp);
5802 assert(cmd_ctx);
5803 struct target *target = get_current_target(cmd_ctx);
5804 if (!target->tap->enabled)
5805 return jim_target_tap_disabled(interp);
5807 if (!target->type->assert_reset || !target->type->deassert_reset) {
5808 Jim_SetResultFormatted(interp,
5809 "No target-specific reset for %s",
5810 target_name(target));
5811 return JIM_ERR;
5814 if (target->defer_examine)
5815 target_reset_examined(target);
5817 /* determine if we should halt or not. */
5818 target->reset_halt = (a != 0);
5819 /* When this happens - all workareas are invalid. */
5820 target_free_all_working_areas_restore(target, 0);
5822 /* do the assert */
5823 if (n->value == NVP_ASSERT)
5824 e = target->type->assert_reset(target);
5825 else
5826 e = target->type->deassert_reset(target);
5827 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5830 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5832 if (argc != 1) {
5833 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5834 return JIM_ERR;
5836 struct command_context *cmd_ctx = current_command_context(interp);
5837 assert(cmd_ctx);
5838 struct target *target = get_current_target(cmd_ctx);
5839 if (!target->tap->enabled)
5840 return jim_target_tap_disabled(interp);
5841 int e = target->type->halt(target);
5842 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5845 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5847 struct jim_getopt_info goi;
5848 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5850 /* params: <name> statename timeoutmsecs */
5851 if (goi.argc != 2) {
5852 const char *cmd_name = Jim_GetString(argv[0], NULL);
5853 Jim_SetResultFormatted(goi.interp,
5854 "%s <state_name> <timeout_in_msec>", cmd_name);
5855 return JIM_ERR;
5858 struct jim_nvp *n;
5859 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5860 if (e != JIM_OK) {
5861 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5862 return e;
5864 jim_wide a;
5865 e = jim_getopt_wide(&goi, &a);
5866 if (e != JIM_OK)
5867 return e;
5868 struct command_context *cmd_ctx = current_command_context(interp);
5869 assert(cmd_ctx);
5870 struct target *target = get_current_target(cmd_ctx);
5871 if (!target->tap->enabled)
5872 return jim_target_tap_disabled(interp);
5874 e = target_wait_state(target, n->value, a);
5875 if (e != ERROR_OK) {
5876 Jim_Obj *obj = Jim_NewIntObj(interp, e);
5877 Jim_SetResultFormatted(goi.interp,
5878 "target: %s wait %s fails (%#s) %s",
5879 target_name(target), n->name,
5880 obj, target_strerror_safe(e));
5881 return JIM_ERR;
5883 return JIM_OK;
5885 /* List for human, Events defined for this target.
5886 * scripts/programs should use 'name cget -event NAME'
5888 COMMAND_HANDLER(handle_target_event_list)
5890 struct target *target = get_current_target(CMD_CTX);
5891 struct target_event_action *teap = target->event_action;
5893 command_print(CMD, "Event actions for target (%d) %s\n",
5894 target->target_number,
5895 target_name(target));
5896 command_print(CMD, "%-25s | Body", "Event");
5897 command_print(CMD, "------------------------- | "
5898 "----------------------------------------");
5899 while (teap) {
5900 command_print(CMD, "%-25s | %s",
5901 target_event_name(teap->event),
5902 Jim_GetString(teap->body, NULL));
5903 teap = teap->next;
5905 command_print(CMD, "***END***");
5906 return ERROR_OK;
5909 COMMAND_HANDLER(handle_target_current_state)
5911 if (CMD_ARGC != 0)
5912 return ERROR_COMMAND_SYNTAX_ERROR;
5914 struct target *target = get_current_target(CMD_CTX);
5916 command_print(CMD, "%s", target_state_name(target));
5918 return ERROR_OK;
5921 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5923 struct jim_getopt_info goi;
5924 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5925 if (goi.argc != 1) {
5926 const char *cmd_name = Jim_GetString(argv[0], NULL);
5927 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5928 return JIM_ERR;
5930 struct jim_nvp *n;
5931 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5932 if (e != JIM_OK) {
5933 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5934 return e;
5936 struct command_context *cmd_ctx = current_command_context(interp);
5937 assert(cmd_ctx);
5938 struct target *target = get_current_target(cmd_ctx);
5939 target_handle_event(target, n->value);
5940 return JIM_OK;
5943 static const struct command_registration target_instance_command_handlers[] = {
5945 .name = "configure",
5946 .mode = COMMAND_ANY,
5947 .jim_handler = jim_target_configure,
5948 .help = "configure a new target for use",
5949 .usage = "[target_attribute ...]",
5952 .name = "cget",
5953 .mode = COMMAND_ANY,
5954 .jim_handler = jim_target_configure,
5955 .help = "returns the specified target attribute",
5956 .usage = "target_attribute",
5959 .name = "mwd",
5960 .handler = handle_mw_command,
5961 .mode = COMMAND_EXEC,
5962 .help = "Write 64-bit word(s) to target memory",
5963 .usage = "address data [count]",
5966 .name = "mww",
5967 .handler = handle_mw_command,
5968 .mode = COMMAND_EXEC,
5969 .help = "Write 32-bit word(s) to target memory",
5970 .usage = "address data [count]",
5973 .name = "mwh",
5974 .handler = handle_mw_command,
5975 .mode = COMMAND_EXEC,
5976 .help = "Write 16-bit half-word(s) to target memory",
5977 .usage = "address data [count]",
5980 .name = "mwb",
5981 .handler = handle_mw_command,
5982 .mode = COMMAND_EXEC,
5983 .help = "Write byte(s) to target memory",
5984 .usage = "address data [count]",
5987 .name = "mdd",
5988 .handler = handle_md_command,
5989 .mode = COMMAND_EXEC,
5990 .help = "Display target memory as 64-bit words",
5991 .usage = "address [count]",
5994 .name = "mdw",
5995 .handler = handle_md_command,
5996 .mode = COMMAND_EXEC,
5997 .help = "Display target memory as 32-bit words",
5998 .usage = "address [count]",
6001 .name = "mdh",
6002 .handler = handle_md_command,
6003 .mode = COMMAND_EXEC,
6004 .help = "Display target memory as 16-bit half-words",
6005 .usage = "address [count]",
6008 .name = "mdb",
6009 .handler = handle_md_command,
6010 .mode = COMMAND_EXEC,
6011 .help = "Display target memory as 8-bit bytes",
6012 .usage = "address [count]",
6015 .name = "array2mem",
6016 .mode = COMMAND_EXEC,
6017 .jim_handler = jim_target_array2mem,
6018 .help = "Writes Tcl array of 8/16/32 bit numbers "
6019 "to target memory",
6020 .usage = "arrayname bitwidth address count",
6023 .name = "mem2array",
6024 .mode = COMMAND_EXEC,
6025 .jim_handler = jim_target_mem2array,
6026 .help = "Loads Tcl array of 8/16/32 bit numbers "
6027 "from target memory",
6028 .usage = "arrayname bitwidth address count",
6031 .name = "get_reg",
6032 .mode = COMMAND_EXEC,
6033 .jim_handler = target_jim_get_reg,
6034 .help = "Get register values from the target",
6035 .usage = "list",
6038 .name = "set_reg",
6039 .mode = COMMAND_EXEC,
6040 .jim_handler = target_jim_set_reg,
6041 .help = "Set target register values",
6042 .usage = "dict",
6045 .name = "read_memory",
6046 .mode = COMMAND_EXEC,
6047 .handler = handle_target_read_memory,
6048 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
6049 .usage = "address width count ['phys']",
6052 .name = "write_memory",
6053 .mode = COMMAND_EXEC,
6054 .jim_handler = target_jim_write_memory,
6055 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
6056 .usage = "address width data ['phys']",
6059 .name = "eventlist",
6060 .handler = handle_target_event_list,
6061 .mode = COMMAND_EXEC,
6062 .help = "displays a table of events defined for this target",
6063 .usage = "",
6066 .name = "curstate",
6067 .mode = COMMAND_EXEC,
6068 .handler = handle_target_current_state,
6069 .help = "displays the current state of this target",
6070 .usage = "",
6073 .name = "arp_examine",
6074 .mode = COMMAND_EXEC,
6075 .jim_handler = jim_target_examine,
6076 .help = "used internally for reset processing",
6077 .usage = "['allow-defer']",
6080 .name = "was_examined",
6081 .mode = COMMAND_EXEC,
6082 .handler = handle_target_was_examined,
6083 .help = "used internally for reset processing",
6084 .usage = "",
6087 .name = "examine_deferred",
6088 .mode = COMMAND_EXEC,
6089 .handler = handle_target_examine_deferred,
6090 .help = "used internally for reset processing",
6091 .usage = "",
6094 .name = "arp_halt_gdb",
6095 .mode = COMMAND_EXEC,
6096 .handler = handle_target_halt_gdb,
6097 .help = "used internally for reset processing to halt GDB",
6098 .usage = "",
6101 .name = "arp_poll",
6102 .mode = COMMAND_EXEC,
6103 .jim_handler = jim_target_poll,
6104 .help = "used internally for reset processing",
6107 .name = "arp_reset",
6108 .mode = COMMAND_EXEC,
6109 .jim_handler = jim_target_reset,
6110 .help = "used internally for reset processing",
6113 .name = "arp_halt",
6114 .mode = COMMAND_EXEC,
6115 .jim_handler = jim_target_halt,
6116 .help = "used internally for reset processing",
6119 .name = "arp_waitstate",
6120 .mode = COMMAND_EXEC,
6121 .jim_handler = jim_target_wait_state,
6122 .help = "used internally for reset processing",
6125 .name = "invoke-event",
6126 .mode = COMMAND_EXEC,
6127 .jim_handler = jim_target_invoke_event,
6128 .help = "invoke handler for specified event",
6129 .usage = "event_name",
6131 COMMAND_REGISTRATION_DONE
6134 static int target_create(struct jim_getopt_info *goi)
6136 Jim_Obj *new_cmd;
6137 Jim_Cmd *cmd;
6138 const char *cp;
6139 int e;
6140 int x;
6141 struct target *target;
6142 struct command_context *cmd_ctx;
6144 cmd_ctx = current_command_context(goi->interp);
6145 assert(cmd_ctx);
6147 if (goi->argc < 3) {
6148 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
6149 return JIM_ERR;
6152 /* COMMAND */
6153 jim_getopt_obj(goi, &new_cmd);
6154 /* does this command exist? */
6155 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
6156 if (cmd) {
6157 cp = Jim_GetString(new_cmd, NULL);
6158 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
6159 return JIM_ERR;
6162 /* TYPE */
6163 e = jim_getopt_string(goi, &cp, NULL);
6164 if (e != JIM_OK)
6165 return e;
6166 struct transport *tr = get_current_transport();
6167 if (tr->override_target) {
6168 e = tr->override_target(&cp);
6169 if (e != ERROR_OK) {
6170 LOG_ERROR("The selected transport doesn't support this target");
6171 return JIM_ERR;
6173 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
6175 /* now does target type exist */
6176 for (x = 0 ; target_types[x] ; x++) {
6177 if (strcmp(cp, target_types[x]->name) == 0) {
6178 /* found */
6179 break;
6182 if (!target_types[x]) {
6183 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
6184 for (x = 0 ; target_types[x] ; x++) {
6185 if (target_types[x + 1]) {
6186 Jim_AppendStrings(goi->interp,
6187 Jim_GetResult(goi->interp),
6188 target_types[x]->name,
6189 ", ", NULL);
6190 } else {
6191 Jim_AppendStrings(goi->interp,
6192 Jim_GetResult(goi->interp),
6193 " or ",
6194 target_types[x]->name, NULL);
6197 return JIM_ERR;
6200 /* Create it */
6201 target = calloc(1, sizeof(struct target));
6202 if (!target) {
6203 LOG_ERROR("Out of memory");
6204 return JIM_ERR;
6207 /* set empty smp cluster */
6208 target->smp_targets = &empty_smp_targets;
6210 /* set target number */
6211 target->target_number = new_target_number();
6213 /* allocate memory for each unique target type */
6214 target->type = malloc(sizeof(struct target_type));
6215 if (!target->type) {
6216 LOG_ERROR("Out of memory");
6217 free(target);
6218 return JIM_ERR;
6221 memcpy(target->type, target_types[x], sizeof(struct target_type));
6223 /* default to first core, override with -coreid */
6224 target->coreid = 0;
6226 target->working_area = 0x0;
6227 target->working_area_size = 0x0;
6228 target->working_areas = NULL;
6229 target->backup_working_area = 0;
6231 target->state = TARGET_UNKNOWN;
6232 target->debug_reason = DBG_REASON_UNDEFINED;
6233 target->reg_cache = NULL;
6234 target->breakpoints = NULL;
6235 target->watchpoints = NULL;
6236 target->next = NULL;
6237 target->arch_info = NULL;
6239 target->verbose_halt_msg = true;
6241 target->halt_issued = false;
6243 /* initialize trace information */
6244 target->trace_info = calloc(1, sizeof(struct trace));
6245 if (!target->trace_info) {
6246 LOG_ERROR("Out of memory");
6247 free(target->type);
6248 free(target);
6249 return JIM_ERR;
6252 target->dbgmsg = NULL;
6253 target->dbg_msg_enabled = 0;
6255 target->endianness = TARGET_ENDIAN_UNKNOWN;
6257 target->rtos = NULL;
6258 target->rtos_auto_detect = false;
6260 target->gdb_port_override = NULL;
6261 target->gdb_max_connections = 1;
6263 /* Do the rest as "configure" options */
6264 goi->isconfigure = 1;
6265 e = target_configure(goi, target);
6267 if (e == JIM_OK) {
6268 if (target->has_dap) {
6269 if (!target->dap_configured) {
6270 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
6271 e = JIM_ERR;
6273 } else {
6274 if (!target->tap_configured) {
6275 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
6276 e = JIM_ERR;
6279 /* tap must be set after target was configured */
6280 if (!target->tap)
6281 e = JIM_ERR;
6284 if (e != JIM_OK) {
6285 rtos_destroy(target);
6286 free(target->gdb_port_override);
6287 free(target->trace_info);
6288 free(target->type);
6289 free(target);
6290 return e;
6293 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
6294 /* default endian to little if not specified */
6295 target->endianness = TARGET_LITTLE_ENDIAN;
6298 cp = Jim_GetString(new_cmd, NULL);
6299 target->cmd_name = strdup(cp);
6300 if (!target->cmd_name) {
6301 LOG_ERROR("Out of memory");
6302 rtos_destroy(target);
6303 free(target->gdb_port_override);
6304 free(target->trace_info);
6305 free(target->type);
6306 free(target);
6307 return JIM_ERR;
6310 if (target->type->target_create) {
6311 e = (*(target->type->target_create))(target, goi->interp);
6312 if (e != ERROR_OK) {
6313 LOG_DEBUG("target_create failed");
6314 free(target->cmd_name);
6315 rtos_destroy(target);
6316 free(target->gdb_port_override);
6317 free(target->trace_info);
6318 free(target->type);
6319 free(target);
6320 return JIM_ERR;
6324 /* create the target specific commands */
6325 if (target->type->commands) {
6326 e = register_commands(cmd_ctx, NULL, target->type->commands);
6327 if (e != ERROR_OK)
6328 LOG_ERROR("unable to register '%s' commands", cp);
6331 /* now - create the new target name command */
6332 const struct command_registration target_subcommands[] = {
6334 .chain = target_instance_command_handlers,
6337 .chain = target->type->commands,
6339 COMMAND_REGISTRATION_DONE
6341 const struct command_registration target_commands[] = {
6343 .name = cp,
6344 .mode = COMMAND_ANY,
6345 .help = "target command group",
6346 .usage = "",
6347 .chain = target_subcommands,
6349 COMMAND_REGISTRATION_DONE
6351 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
6352 if (e != ERROR_OK) {
6353 if (target->type->deinit_target)
6354 target->type->deinit_target(target);
6355 free(target->cmd_name);
6356 rtos_destroy(target);
6357 free(target->gdb_port_override);
6358 free(target->trace_info);
6359 free(target->type);
6360 free(target);
6361 return JIM_ERR;
6364 /* append to end of list */
6365 append_to_list_all_targets(target);
6367 cmd_ctx->current_target = target;
6368 return JIM_OK;
6371 COMMAND_HANDLER(handle_target_current)
6373 if (CMD_ARGC != 0)
6374 return ERROR_COMMAND_SYNTAX_ERROR;
6376 struct target *target = get_current_target_or_null(CMD_CTX);
6377 if (target)
6378 command_print(CMD, "%s", target_name(target));
6380 return ERROR_OK;
6383 COMMAND_HANDLER(handle_target_types)
6385 if (CMD_ARGC != 0)
6386 return ERROR_COMMAND_SYNTAX_ERROR;
6388 for (unsigned int x = 0; target_types[x]; x++)
6389 command_print(CMD, "%s", target_types[x]->name);
6391 return ERROR_OK;
6394 COMMAND_HANDLER(handle_target_names)
6396 if (CMD_ARGC != 0)
6397 return ERROR_COMMAND_SYNTAX_ERROR;
6399 struct target *target = all_targets;
6400 while (target) {
6401 command_print(CMD, "%s", target_name(target));
6402 target = target->next;
6405 return ERROR_OK;
6408 static struct target_list *
6409 __attribute__((warn_unused_result))
6410 create_target_list_node(const char *targetname)
6412 struct target *target = get_target(targetname);
6413 LOG_DEBUG("%s ", targetname);
6414 if (!target)
6415 return NULL;
6417 struct target_list *new = malloc(sizeof(struct target_list));
6418 if (!new) {
6419 LOG_ERROR("Out of memory");
6420 return new;
6423 new->target = target;
6424 return new;
6427 static int get_target_with_common_rtos_type(struct command_invocation *cmd,
6428 struct list_head *lh, struct target **result)
6430 struct target *target = NULL;
6431 struct target_list *curr;
6432 foreach_smp_target(curr, lh) {
6433 struct rtos *curr_rtos = curr->target->rtos;
6434 if (curr_rtos) {
6435 if (target && target->rtos && target->rtos->type != curr_rtos->type) {
6436 command_print(cmd, "Different rtos types in members of one smp target!");
6437 return ERROR_FAIL;
6439 target = curr->target;
6442 *result = target;
6443 return ERROR_OK;
6446 COMMAND_HANDLER(handle_target_smp)
6448 static int smp_group = 1;
6450 if (CMD_ARGC == 0) {
6451 LOG_DEBUG("Empty SMP target");
6452 return ERROR_OK;
6454 LOG_DEBUG("%d", CMD_ARGC);
6455 /* CMD_ARGC[0] = target to associate in smp
6456 * CMD_ARGC[1] = target to associate in smp
6457 * CMD_ARGC[2] ...
6460 struct list_head *lh = malloc(sizeof(*lh));
6461 if (!lh) {
6462 LOG_ERROR("Out of memory");
6463 return ERROR_FAIL;
6465 INIT_LIST_HEAD(lh);
6467 for (unsigned int i = 0; i < CMD_ARGC; i++) {
6468 struct target_list *new = create_target_list_node(CMD_ARGV[i]);
6469 if (new)
6470 list_add_tail(&new->lh, lh);
6472 /* now parse the list of cpu and put the target in smp mode*/
6473 struct target_list *curr;
6474 foreach_smp_target(curr, lh) {
6475 struct target *target = curr->target;
6476 target->smp = smp_group;
6477 target->smp_targets = lh;
6479 smp_group++;
6481 struct target *rtos_target;
6482 int retval = get_target_with_common_rtos_type(CMD, lh, &rtos_target);
6483 if (retval == ERROR_OK && rtos_target)
6484 retval = rtos_smp_init(rtos_target);
6486 return retval;
6489 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6491 struct jim_getopt_info goi;
6492 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6493 if (goi.argc < 3) {
6494 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6495 "<name> <target_type> [<target_options> ...]");
6496 return JIM_ERR;
6498 return target_create(&goi);
6501 static const struct command_registration target_subcommand_handlers[] = {
6503 .name = "init",
6504 .mode = COMMAND_CONFIG,
6505 .handler = handle_target_init_command,
6506 .help = "initialize targets",
6507 .usage = "",
6510 .name = "create",
6511 .mode = COMMAND_CONFIG,
6512 .jim_handler = jim_target_create,
6513 .usage = "name type '-chain-position' name [options ...]",
6514 .help = "Creates and selects a new target",
6517 .name = "current",
6518 .mode = COMMAND_ANY,
6519 .handler = handle_target_current,
6520 .help = "Returns the currently selected target",
6521 .usage = "",
6524 .name = "types",
6525 .mode = COMMAND_ANY,
6526 .handler = handle_target_types,
6527 .help = "Returns the available target types as "
6528 "a list of strings",
6529 .usage = "",
6532 .name = "names",
6533 .mode = COMMAND_ANY,
6534 .handler = handle_target_names,
6535 .help = "Returns the names of all targets as a list of strings",
6536 .usage = "",
6539 .name = "smp",
6540 .mode = COMMAND_ANY,
6541 .handler = handle_target_smp,
6542 .usage = "targetname1 targetname2 ...",
6543 .help = "gather several target in a smp list"
6546 COMMAND_REGISTRATION_DONE
6549 struct fast_load {
6550 target_addr_t address;
6551 uint8_t *data;
6552 int length;
6556 static int fastload_num;
6557 static struct fast_load *fastload;
6559 static void free_fastload(void)
6561 if (fastload) {
6562 for (int i = 0; i < fastload_num; i++)
6563 free(fastload[i].data);
6564 free(fastload);
6565 fastload = NULL;
6569 COMMAND_HANDLER(handle_fast_load_image_command)
6571 uint8_t *buffer;
6572 size_t buf_cnt;
6573 uint32_t image_size;
6574 target_addr_t min_address = 0;
6575 target_addr_t max_address = -1;
6577 struct image image;
6579 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6580 &image, &min_address, &max_address);
6581 if (retval != ERROR_OK)
6582 return retval;
6584 struct duration bench;
6585 duration_start(&bench);
6587 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6588 if (retval != ERROR_OK)
6589 return retval;
6591 image_size = 0x0;
6592 retval = ERROR_OK;
6593 fastload_num = image.num_sections;
6594 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6595 if (!fastload) {
6596 command_print(CMD, "out of memory");
6597 image_close(&image);
6598 return ERROR_FAIL;
6600 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6601 for (unsigned int i = 0; i < image.num_sections; i++) {
6602 buffer = malloc(image.sections[i].size);
6603 if (!buffer) {
6604 command_print(CMD, "error allocating buffer for section (%d bytes)",
6605 (int)(image.sections[i].size));
6606 retval = ERROR_FAIL;
6607 break;
6610 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6611 if (retval != ERROR_OK) {
6612 free(buffer);
6613 break;
6616 uint32_t offset = 0;
6617 uint32_t length = buf_cnt;
6619 /* DANGER!!! beware of unsigned comparison here!!! */
6621 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6622 (image.sections[i].base_address < max_address)) {
6623 if (image.sections[i].base_address < min_address) {
6624 /* clip addresses below */
6625 offset += min_address-image.sections[i].base_address;
6626 length -= offset;
6629 if (image.sections[i].base_address + buf_cnt > max_address)
6630 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6632 fastload[i].address = image.sections[i].base_address + offset;
6633 fastload[i].data = malloc(length);
6634 if (!fastload[i].data) {
6635 free(buffer);
6636 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6637 length);
6638 retval = ERROR_FAIL;
6639 break;
6641 memcpy(fastload[i].data, buffer + offset, length);
6642 fastload[i].length = length;
6644 image_size += length;
6645 command_print(CMD, "%u bytes written at address 0x%8.8x",
6646 (unsigned int)length,
6647 ((unsigned int)(image.sections[i].base_address + offset)));
6650 free(buffer);
6653 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6654 command_print(CMD, "Loaded %" PRIu32 " bytes "
6655 "in %fs (%0.3f KiB/s)", image_size,
6656 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6658 command_print(CMD,
6659 "WARNING: image has not been loaded to target!"
6660 "You can issue a 'fast_load' to finish loading.");
6663 image_close(&image);
6665 if (retval != ERROR_OK)
6666 free_fastload();
6668 return retval;
6671 COMMAND_HANDLER(handle_fast_load_command)
6673 if (CMD_ARGC > 0)
6674 return ERROR_COMMAND_SYNTAX_ERROR;
6675 if (!fastload) {
6676 LOG_ERROR("No image in memory");
6677 return ERROR_FAIL;
6679 int i;
6680 int64_t ms = timeval_ms();
6681 int size = 0;
6682 int retval = ERROR_OK;
6683 for (i = 0; i < fastload_num; i++) {
6684 struct target *target = get_current_target(CMD_CTX);
6685 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6686 (unsigned int)(fastload[i].address),
6687 (unsigned int)(fastload[i].length));
6688 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6689 if (retval != ERROR_OK)
6690 break;
6691 size += fastload[i].length;
6693 if (retval == ERROR_OK) {
6694 int64_t after = timeval_ms();
6695 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6697 return retval;
6700 static const struct command_registration target_command_handlers[] = {
6702 .name = "targets",
6703 .handler = handle_targets_command,
6704 .mode = COMMAND_ANY,
6705 .help = "change current default target (one parameter) "
6706 "or prints table of all targets (no parameters)",
6707 .usage = "[target]",
6710 .name = "target",
6711 .mode = COMMAND_CONFIG,
6712 .help = "configure target",
6713 .chain = target_subcommand_handlers,
6714 .usage = "",
6716 COMMAND_REGISTRATION_DONE
6719 int target_register_commands(struct command_context *cmd_ctx)
6721 return register_commands(cmd_ctx, NULL, target_command_handlers);
6724 static bool target_reset_nag = true;
6726 bool get_target_reset_nag(void)
6728 return target_reset_nag;
6731 COMMAND_HANDLER(handle_target_reset_nag)
6733 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6734 &target_reset_nag, "Nag after each reset about options to improve "
6735 "performance");
6738 COMMAND_HANDLER(handle_ps_command)
6740 struct target *target = get_current_target(CMD_CTX);
6741 char *display;
6742 if (target->state != TARGET_HALTED) {
6743 LOG_INFO("target not halted !!");
6744 return ERROR_OK;
6747 if ((target->rtos) && (target->rtos->type)
6748 && (target->rtos->type->ps_command)) {
6749 display = target->rtos->type->ps_command(target);
6750 command_print(CMD, "%s", display);
6751 free(display);
6752 return ERROR_OK;
6753 } else {
6754 LOG_INFO("failed");
6755 return ERROR_TARGET_FAILURE;
6759 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6761 if (text)
6762 command_print_sameline(cmd, "%s", text);
6763 for (int i = 0; i < size; i++)
6764 command_print_sameline(cmd, " %02x", buf[i]);
6765 command_print(cmd, " ");
6768 COMMAND_HANDLER(handle_test_mem_access_command)
6770 struct target *target = get_current_target(CMD_CTX);
6771 uint32_t test_size;
6772 int retval = ERROR_OK;
6774 if (target->state != TARGET_HALTED) {
6775 LOG_INFO("target not halted !!");
6776 return ERROR_FAIL;
6779 if (CMD_ARGC != 1)
6780 return ERROR_COMMAND_SYNTAX_ERROR;
6782 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6784 /* Test reads */
6785 size_t num_bytes = test_size + 4;
6787 struct working_area *wa = NULL;
6788 retval = target_alloc_working_area(target, num_bytes, &wa);
6789 if (retval != ERROR_OK) {
6790 LOG_ERROR("Not enough working area");
6791 return ERROR_FAIL;
6794 uint8_t *test_pattern = malloc(num_bytes);
6796 for (size_t i = 0; i < num_bytes; i++)
6797 test_pattern[i] = rand();
6799 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6800 if (retval != ERROR_OK) {
6801 LOG_ERROR("Test pattern write failed");
6802 goto out;
6805 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6806 for (int size = 1; size <= 4; size *= 2) {
6807 for (int offset = 0; offset < 4; offset++) {
6808 uint32_t count = test_size / size;
6809 size_t host_bufsiz = (count + 2) * size + host_offset;
6810 uint8_t *read_ref = malloc(host_bufsiz);
6811 uint8_t *read_buf = malloc(host_bufsiz);
6813 for (size_t i = 0; i < host_bufsiz; i++) {
6814 read_ref[i] = rand();
6815 read_buf[i] = read_ref[i];
6817 command_print_sameline(CMD,
6818 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6819 size, offset, host_offset ? "un" : "");
6821 struct duration bench;
6822 duration_start(&bench);
6824 retval = target_read_memory(target, wa->address + offset, size, count,
6825 read_buf + size + host_offset);
6827 duration_measure(&bench);
6829 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6830 command_print(CMD, "Unsupported alignment");
6831 goto next;
6832 } else if (retval != ERROR_OK) {
6833 command_print(CMD, "Memory read failed");
6834 goto next;
6837 /* replay on host */
6838 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6840 /* check result */
6841 int result = memcmp(read_ref, read_buf, host_bufsiz);
6842 if (result == 0) {
6843 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6844 duration_elapsed(&bench),
6845 duration_kbps(&bench, count * size));
6846 } else {
6847 command_print(CMD, "Compare failed");
6848 binprint(CMD, "ref:", read_ref, host_bufsiz);
6849 binprint(CMD, "buf:", read_buf, host_bufsiz);
6851 next:
6852 free(read_ref);
6853 free(read_buf);
6858 out:
6859 free(test_pattern);
6861 target_free_working_area(target, wa);
6863 /* Test writes */
6864 num_bytes = test_size + 4 + 4 + 4;
6866 retval = target_alloc_working_area(target, num_bytes, &wa);
6867 if (retval != ERROR_OK) {
6868 LOG_ERROR("Not enough working area");
6869 return ERROR_FAIL;
6872 test_pattern = malloc(num_bytes);
6874 for (size_t i = 0; i < num_bytes; i++)
6875 test_pattern[i] = rand();
6877 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6878 for (int size = 1; size <= 4; size *= 2) {
6879 for (int offset = 0; offset < 4; offset++) {
6880 uint32_t count = test_size / size;
6881 size_t host_bufsiz = count * size + host_offset;
6882 uint8_t *read_ref = malloc(num_bytes);
6883 uint8_t *read_buf = malloc(num_bytes);
6884 uint8_t *write_buf = malloc(host_bufsiz);
6886 for (size_t i = 0; i < host_bufsiz; i++)
6887 write_buf[i] = rand();
6888 command_print_sameline(CMD,
6889 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6890 size, offset, host_offset ? "un" : "");
6892 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6893 if (retval != ERROR_OK) {
6894 command_print(CMD, "Test pattern write failed");
6895 goto nextw;
6898 /* replay on host */
6899 memcpy(read_ref, test_pattern, num_bytes);
6900 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6902 struct duration bench;
6903 duration_start(&bench);
6905 retval = target_write_memory(target, wa->address + size + offset, size, count,
6906 write_buf + host_offset);
6908 duration_measure(&bench);
6910 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6911 command_print(CMD, "Unsupported alignment");
6912 goto nextw;
6913 } else if (retval != ERROR_OK) {
6914 command_print(CMD, "Memory write failed");
6915 goto nextw;
6918 /* read back */
6919 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6920 if (retval != ERROR_OK) {
6921 command_print(CMD, "Test pattern write failed");
6922 goto nextw;
6925 /* check result */
6926 int result = memcmp(read_ref, read_buf, num_bytes);
6927 if (result == 0) {
6928 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6929 duration_elapsed(&bench),
6930 duration_kbps(&bench, count * size));
6931 } else {
6932 command_print(CMD, "Compare failed");
6933 binprint(CMD, "ref:", read_ref, num_bytes);
6934 binprint(CMD, "buf:", read_buf, num_bytes);
6936 nextw:
6937 free(read_ref);
6938 free(read_buf);
6943 free(test_pattern);
6945 target_free_working_area(target, wa);
6946 return retval;
6949 static const struct command_registration target_exec_command_handlers[] = {
6951 .name = "fast_load_image",
6952 .handler = handle_fast_load_image_command,
6953 .mode = COMMAND_ANY,
6954 .help = "Load image into server memory for later use by "
6955 "fast_load; primarily for profiling",
6956 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6957 "[min_address [max_length]]",
6960 .name = "fast_load",
6961 .handler = handle_fast_load_command,
6962 .mode = COMMAND_EXEC,
6963 .help = "loads active fast load image to current target "
6964 "- mainly for profiling purposes",
6965 .usage = "",
6968 .name = "profile",
6969 .handler = handle_profile_command,
6970 .mode = COMMAND_EXEC,
6971 .usage = "seconds filename [start end]",
6972 .help = "profiling samples the CPU PC",
6974 /** @todo don't register virt2phys() unless target supports it */
6976 .name = "virt2phys",
6977 .handler = handle_virt2phys_command,
6978 .mode = COMMAND_ANY,
6979 .help = "translate a virtual address into a physical address",
6980 .usage = "virtual_address",
6983 .name = "reg",
6984 .handler = handle_reg_command,
6985 .mode = COMMAND_EXEC,
6986 .help = "display (reread from target with \"force\") or set a register; "
6987 "with no arguments, displays all registers and their values",
6988 .usage = "[(register_number|register_name) [(value|'force')]]",
6991 .name = "poll",
6992 .handler = handle_poll_command,
6993 .mode = COMMAND_EXEC,
6994 .help = "poll target state; or reconfigure background polling",
6995 .usage = "['on'|'off']",
6998 .name = "wait_halt",
6999 .handler = handle_wait_halt_command,
7000 .mode = COMMAND_EXEC,
7001 .help = "wait up to the specified number of milliseconds "
7002 "(default 5000) for a previously requested halt",
7003 .usage = "[milliseconds]",
7006 .name = "halt",
7007 .handler = handle_halt_command,
7008 .mode = COMMAND_EXEC,
7009 .help = "request target to halt, then wait up to the specified "
7010 "number of milliseconds (default 5000) for it to complete",
7011 .usage = "[milliseconds]",
7014 .name = "resume",
7015 .handler = handle_resume_command,
7016 .mode = COMMAND_EXEC,
7017 .help = "resume target execution from current PC or address",
7018 .usage = "[address]",
7021 .name = "reset",
7022 .handler = handle_reset_command,
7023 .mode = COMMAND_EXEC,
7024 .usage = "[run|halt|init]",
7025 .help = "Reset all targets into the specified mode. "
7026 "Default reset mode is run, if not given.",
7029 .name = "soft_reset_halt",
7030 .handler = handle_soft_reset_halt_command,
7031 .mode = COMMAND_EXEC,
7032 .usage = "",
7033 .help = "halt the target and do a soft reset",
7036 .name = "step",
7037 .handler = handle_step_command,
7038 .mode = COMMAND_EXEC,
7039 .help = "step one instruction from current PC or address",
7040 .usage = "[address]",
7043 .name = "mdd",
7044 .handler = handle_md_command,
7045 .mode = COMMAND_EXEC,
7046 .help = "display memory double-words",
7047 .usage = "['phys'] address [count]",
7050 .name = "mdw",
7051 .handler = handle_md_command,
7052 .mode = COMMAND_EXEC,
7053 .help = "display memory words",
7054 .usage = "['phys'] address [count]",
7057 .name = "mdh",
7058 .handler = handle_md_command,
7059 .mode = COMMAND_EXEC,
7060 .help = "display memory half-words",
7061 .usage = "['phys'] address [count]",
7064 .name = "mdb",
7065 .handler = handle_md_command,
7066 .mode = COMMAND_EXEC,
7067 .help = "display memory bytes",
7068 .usage = "['phys'] address [count]",
7071 .name = "mwd",
7072 .handler = handle_mw_command,
7073 .mode = COMMAND_EXEC,
7074 .help = "write memory double-word",
7075 .usage = "['phys'] address value [count]",
7078 .name = "mww",
7079 .handler = handle_mw_command,
7080 .mode = COMMAND_EXEC,
7081 .help = "write memory word",
7082 .usage = "['phys'] address value [count]",
7085 .name = "mwh",
7086 .handler = handle_mw_command,
7087 .mode = COMMAND_EXEC,
7088 .help = "write memory half-word",
7089 .usage = "['phys'] address value [count]",
7092 .name = "mwb",
7093 .handler = handle_mw_command,
7094 .mode = COMMAND_EXEC,
7095 .help = "write memory byte",
7096 .usage = "['phys'] address value [count]",
7099 .name = "bp",
7100 .handler = handle_bp_command,
7101 .mode = COMMAND_EXEC,
7102 .help = "list or set hardware or software breakpoint",
7103 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
7106 .name = "rbp",
7107 .handler = handle_rbp_command,
7108 .mode = COMMAND_EXEC,
7109 .help = "remove breakpoint",
7110 .usage = "'all' | address",
7113 .name = "wp",
7114 .handler = handle_wp_command,
7115 .mode = COMMAND_EXEC,
7116 .help = "list (no params) or create watchpoints",
7117 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
7120 .name = "rwp",
7121 .handler = handle_rwp_command,
7122 .mode = COMMAND_EXEC,
7123 .help = "remove watchpoint",
7124 .usage = "address",
7127 .name = "load_image",
7128 .handler = handle_load_image_command,
7129 .mode = COMMAND_EXEC,
7130 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
7131 "[min_address] [max_length]",
7134 .name = "dump_image",
7135 .handler = handle_dump_image_command,
7136 .mode = COMMAND_EXEC,
7137 .usage = "filename address size",
7140 .name = "verify_image_checksum",
7141 .handler = handle_verify_image_checksum_command,
7142 .mode = COMMAND_EXEC,
7143 .usage = "filename [offset [type]]",
7146 .name = "verify_image",
7147 .handler = handle_verify_image_command,
7148 .mode = COMMAND_EXEC,
7149 .usage = "filename [offset [type]]",
7152 .name = "test_image",
7153 .handler = handle_test_image_command,
7154 .mode = COMMAND_EXEC,
7155 .usage = "filename [offset [type]]",
7158 .name = "get_reg",
7159 .mode = COMMAND_EXEC,
7160 .jim_handler = target_jim_get_reg,
7161 .help = "Get register values from the target",
7162 .usage = "list",
7165 .name = "set_reg",
7166 .mode = COMMAND_EXEC,
7167 .jim_handler = target_jim_set_reg,
7168 .help = "Set target register values",
7169 .usage = "dict",
7172 .name = "read_memory",
7173 .mode = COMMAND_EXEC,
7174 .handler = handle_target_read_memory,
7175 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
7176 .usage = "address width count ['phys']",
7179 .name = "write_memory",
7180 .mode = COMMAND_EXEC,
7181 .jim_handler = target_jim_write_memory,
7182 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
7183 .usage = "address width data ['phys']",
7186 .name = "reset_nag",
7187 .handler = handle_target_reset_nag,
7188 .mode = COMMAND_ANY,
7189 .help = "Nag after each reset about options that could have been "
7190 "enabled to improve performance.",
7191 .usage = "['enable'|'disable']",
7194 .name = "ps",
7195 .handler = handle_ps_command,
7196 .mode = COMMAND_EXEC,
7197 .help = "list all tasks",
7198 .usage = "",
7201 .name = "test_mem_access",
7202 .handler = handle_test_mem_access_command,
7203 .mode = COMMAND_EXEC,
7204 .help = "Test the target's memory access functions",
7205 .usage = "size",
7208 COMMAND_REGISTRATION_DONE
7210 static int target_register_user_commands(struct command_context *cmd_ctx)
7212 int retval = ERROR_OK;
7213 retval = target_request_register_commands(cmd_ctx);
7214 if (retval != ERROR_OK)
7215 return retval;
7217 retval = trace_register_commands(cmd_ctx);
7218 if (retval != ERROR_OK)
7219 return retval;
7222 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);