target: rewrite command 'arp_halt_gdb' as COMMAND_HANDLER
[openocd.git] / src / target / target.c
blob4e29146482a4ca23a9fedc710bed8ceef9f8805c
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2007-2010 Øyvind Harboe *
8 * oyvind.harboe@zylin.com *
9 * *
10 * Copyright (C) 2008, Duane Ellis *
11 * openocd@duaneeellis.com *
12 * *
13 * Copyright (C) 2008 by Spencer Oliver *
14 * spen@spen-soft.co.uk *
15 * *
16 * Copyright (C) 2008 by Rick Altherr *
17 * kc8apf@kc8apf.net> *
18 * *
19 * Copyright (C) 2011 by Broadcom Corporation *
20 * Evan Hunter - ehunter@broadcom.com *
21 * *
22 * Copyright (C) ST-Ericsson SA 2011 *
23 * michel.jaouen@stericsson.com : smp minimum support *
24 * *
25 * Copyright (C) 2011 Andreas Fritiofson *
26 * andreas.fritiofson@gmail.com *
27 ***************************************************************************/
29 #ifdef HAVE_CONFIG_H
30 #include "config.h"
31 #endif
33 #include <helper/align.h>
34 #include <helper/time_support.h>
35 #include <jtag/jtag.h>
36 #include <flash/nor/core.h>
38 #include "target.h"
39 #include "target_type.h"
40 #include "target_request.h"
41 #include "breakpoints.h"
42 #include "register.h"
43 #include "trace.h"
44 #include "image.h"
45 #include "rtos/rtos.h"
46 #include "transport/transport.h"
47 #include "arm_cti.h"
48 #include "smp.h"
49 #include "semihosting_common.h"
51 /* default halt wait timeout (ms) */
52 #define DEFAULT_HALT_TIMEOUT 5000
54 static int target_read_buffer_default(struct target *target, target_addr_t address,
55 uint32_t count, uint8_t *buffer);
56 static int target_write_buffer_default(struct target *target, target_addr_t address,
57 uint32_t count, const uint8_t *buffer);
58 static int target_array2mem(Jim_Interp *interp, struct target *target,
59 int argc, Jim_Obj * const *argv);
60 static int target_mem2array(Jim_Interp *interp, struct target *target,
61 int argc, Jim_Obj * const *argv);
62 static int target_register_user_commands(struct command_context *cmd_ctx);
63 static int target_get_gdb_fileio_info_default(struct target *target,
64 struct gdb_fileio_info *fileio_info);
65 static int target_gdb_fileio_end_default(struct target *target, int retcode,
66 int fileio_errno, bool ctrl_c);
68 /* targets */
69 extern struct target_type arm7tdmi_target;
70 extern struct target_type arm720t_target;
71 extern struct target_type arm9tdmi_target;
72 extern struct target_type arm920t_target;
73 extern struct target_type arm966e_target;
74 extern struct target_type arm946e_target;
75 extern struct target_type arm926ejs_target;
76 extern struct target_type fa526_target;
77 extern struct target_type feroceon_target;
78 extern struct target_type dragonite_target;
79 extern struct target_type xscale_target;
80 extern struct target_type xtensa_chip_target;
81 extern struct target_type cortexm_target;
82 extern struct target_type cortexa_target;
83 extern struct target_type aarch64_target;
84 extern struct target_type cortexr4_target;
85 extern struct target_type arm11_target;
86 extern struct target_type ls1_sap_target;
87 extern struct target_type mips_m4k_target;
88 extern struct target_type mips_mips64_target;
89 extern struct target_type avr_target;
90 extern struct target_type dsp563xx_target;
91 extern struct target_type dsp5680xx_target;
92 extern struct target_type testee_target;
93 extern struct target_type avr32_ap7k_target;
94 extern struct target_type hla_target;
95 extern struct target_type esp32_target;
96 extern struct target_type esp32s2_target;
97 extern struct target_type esp32s3_target;
98 extern struct target_type or1k_target;
99 extern struct target_type quark_x10xx_target;
100 extern struct target_type quark_d20xx_target;
101 extern struct target_type stm8_target;
102 extern struct target_type riscv_target;
103 extern struct target_type mem_ap_target;
104 extern struct target_type esirisc_target;
105 extern struct target_type arcv2_target;
107 static struct target_type *target_types[] = {
108 &arm7tdmi_target,
109 &arm9tdmi_target,
110 &arm920t_target,
111 &arm720t_target,
112 &arm966e_target,
113 &arm946e_target,
114 &arm926ejs_target,
115 &fa526_target,
116 &feroceon_target,
117 &dragonite_target,
118 &xscale_target,
119 &xtensa_chip_target,
120 &cortexm_target,
121 &cortexa_target,
122 &cortexr4_target,
123 &arm11_target,
124 &ls1_sap_target,
125 &mips_m4k_target,
126 &avr_target,
127 &dsp563xx_target,
128 &dsp5680xx_target,
129 &testee_target,
130 &avr32_ap7k_target,
131 &hla_target,
132 &esp32_target,
133 &esp32s2_target,
134 &esp32s3_target,
135 &or1k_target,
136 &quark_x10xx_target,
137 &quark_d20xx_target,
138 &stm8_target,
139 &riscv_target,
140 &mem_ap_target,
141 &esirisc_target,
142 &arcv2_target,
143 &aarch64_target,
144 &mips_mips64_target,
145 NULL,
148 struct target *all_targets;
149 static struct target_event_callback *target_event_callbacks;
150 static struct target_timer_callback *target_timer_callbacks;
151 static int64_t target_timer_next_event_value;
152 static LIST_HEAD(target_reset_callback_list);
153 static LIST_HEAD(target_trace_callback_list);
154 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
155 static LIST_HEAD(empty_smp_targets);
157 static const struct jim_nvp nvp_assert[] = {
158 { .name = "assert", NVP_ASSERT },
159 { .name = "deassert", NVP_DEASSERT },
160 { .name = "T", NVP_ASSERT },
161 { .name = "F", NVP_DEASSERT },
162 { .name = "t", NVP_ASSERT },
163 { .name = "f", NVP_DEASSERT },
164 { .name = NULL, .value = -1 }
167 static const struct jim_nvp nvp_error_target[] = {
168 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
169 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
170 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
171 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
172 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
173 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
174 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
175 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
176 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
177 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
178 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
179 { .value = -1, .name = NULL }
182 static const char *target_strerror_safe(int err)
184 const struct jim_nvp *n;
186 n = jim_nvp_value2name_simple(nvp_error_target, err);
187 if (!n->name)
188 return "unknown";
189 else
190 return n->name;
193 static const struct jim_nvp nvp_target_event[] = {
195 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
196 { .value = TARGET_EVENT_HALTED, .name = "halted" },
197 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
198 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
199 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
200 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
201 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
203 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
204 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
206 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
207 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
208 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
209 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
210 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
211 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
212 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
213 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
215 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
216 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
217 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
219 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
220 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
222 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
223 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
225 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
226 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
228 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
229 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
231 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
233 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X100, .name = "semihosting-user-cmd-0x100" },
234 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X101, .name = "semihosting-user-cmd-0x101" },
235 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X102, .name = "semihosting-user-cmd-0x102" },
236 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X103, .name = "semihosting-user-cmd-0x103" },
237 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X104, .name = "semihosting-user-cmd-0x104" },
238 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X105, .name = "semihosting-user-cmd-0x105" },
239 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X106, .name = "semihosting-user-cmd-0x106" },
240 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X107, .name = "semihosting-user-cmd-0x107" },
242 { .name = NULL, .value = -1 }
245 static const struct jim_nvp nvp_target_state[] = {
246 { .name = "unknown", .value = TARGET_UNKNOWN },
247 { .name = "running", .value = TARGET_RUNNING },
248 { .name = "halted", .value = TARGET_HALTED },
249 { .name = "reset", .value = TARGET_RESET },
250 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
251 { .name = NULL, .value = -1 },
254 static const struct jim_nvp nvp_target_debug_reason[] = {
255 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
256 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
257 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
258 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
259 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
260 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
261 { .name = "program-exit", .value = DBG_REASON_EXIT },
262 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
263 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
264 { .name = NULL, .value = -1 },
267 static const struct jim_nvp nvp_target_endian[] = {
268 { .name = "big", .value = TARGET_BIG_ENDIAN },
269 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
270 { .name = "be", .value = TARGET_BIG_ENDIAN },
271 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
272 { .name = NULL, .value = -1 },
275 static const struct jim_nvp nvp_reset_modes[] = {
276 { .name = "unknown", .value = RESET_UNKNOWN },
277 { .name = "run", .value = RESET_RUN },
278 { .name = "halt", .value = RESET_HALT },
279 { .name = "init", .value = RESET_INIT },
280 { .name = NULL, .value = -1 },
283 const char *debug_reason_name(struct target *t)
285 const char *cp;
287 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
288 t->debug_reason)->name;
289 if (!cp) {
290 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
291 cp = "(*BUG*unknown*BUG*)";
293 return cp;
296 const char *target_state_name(struct target *t)
298 const char *cp;
299 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
300 if (!cp) {
301 LOG_ERROR("Invalid target state: %d", (int)(t->state));
302 cp = "(*BUG*unknown*BUG*)";
305 if (!target_was_examined(t) && t->defer_examine)
306 cp = "examine deferred";
308 return cp;
311 const char *target_event_name(enum target_event event)
313 const char *cp;
314 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
315 if (!cp) {
316 LOG_ERROR("Invalid target event: %d", (int)(event));
317 cp = "(*BUG*unknown*BUG*)";
319 return cp;
322 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
324 const char *cp;
325 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
326 if (!cp) {
327 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
328 cp = "(*BUG*unknown*BUG*)";
330 return cp;
333 /* determine the number of the new target */
334 static int new_target_number(void)
336 struct target *t;
337 int x;
339 /* number is 0 based */
340 x = -1;
341 t = all_targets;
342 while (t) {
343 if (x < t->target_number)
344 x = t->target_number;
345 t = t->next;
347 return x + 1;
350 static void append_to_list_all_targets(struct target *target)
352 struct target **t = &all_targets;
354 while (*t)
355 t = &((*t)->next);
356 *t = target;
359 /* read a uint64_t from a buffer in target memory endianness */
360 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
362 if (target->endianness == TARGET_LITTLE_ENDIAN)
363 return le_to_h_u64(buffer);
364 else
365 return be_to_h_u64(buffer);
368 /* read a uint32_t from a buffer in target memory endianness */
369 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
371 if (target->endianness == TARGET_LITTLE_ENDIAN)
372 return le_to_h_u32(buffer);
373 else
374 return be_to_h_u32(buffer);
377 /* read a uint24_t from a buffer in target memory endianness */
378 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
380 if (target->endianness == TARGET_LITTLE_ENDIAN)
381 return le_to_h_u24(buffer);
382 else
383 return be_to_h_u24(buffer);
386 /* read a uint16_t from a buffer in target memory endianness */
387 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
389 if (target->endianness == TARGET_LITTLE_ENDIAN)
390 return le_to_h_u16(buffer);
391 else
392 return be_to_h_u16(buffer);
395 /* write a uint64_t to a buffer in target memory endianness */
396 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
398 if (target->endianness == TARGET_LITTLE_ENDIAN)
399 h_u64_to_le(buffer, value);
400 else
401 h_u64_to_be(buffer, value);
404 /* write a uint32_t to a buffer in target memory endianness */
405 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
407 if (target->endianness == TARGET_LITTLE_ENDIAN)
408 h_u32_to_le(buffer, value);
409 else
410 h_u32_to_be(buffer, value);
413 /* write a uint24_t to a buffer in target memory endianness */
414 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
416 if (target->endianness == TARGET_LITTLE_ENDIAN)
417 h_u24_to_le(buffer, value);
418 else
419 h_u24_to_be(buffer, value);
422 /* write a uint16_t to a buffer in target memory endianness */
423 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
425 if (target->endianness == TARGET_LITTLE_ENDIAN)
426 h_u16_to_le(buffer, value);
427 else
428 h_u16_to_be(buffer, value);
431 /* write a uint8_t to a buffer in target memory endianness */
432 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
434 *buffer = value;
437 /* write a uint64_t array to a buffer in target memory endianness */
438 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
440 uint32_t i;
441 for (i = 0; i < count; i++)
442 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
445 /* write a uint32_t array to a buffer in target memory endianness */
446 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
448 uint32_t i;
449 for (i = 0; i < count; i++)
450 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
453 /* write a uint16_t array to a buffer in target memory endianness */
454 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
456 uint32_t i;
457 for (i = 0; i < count; i++)
458 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
461 /* write a uint64_t array to a buffer in target memory endianness */
462 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
464 uint32_t i;
465 for (i = 0; i < count; i++)
466 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
469 /* write a uint32_t array to a buffer in target memory endianness */
470 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
472 uint32_t i;
473 for (i = 0; i < count; i++)
474 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
477 /* write a uint16_t array to a buffer in target memory endianness */
478 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
480 uint32_t i;
481 for (i = 0; i < count; i++)
482 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
485 /* return a pointer to a configured target; id is name or number */
486 struct target *get_target(const char *id)
488 struct target *target;
490 /* try as tcltarget name */
491 for (target = all_targets; target; target = target->next) {
492 if (!target_name(target))
493 continue;
494 if (strcmp(id, target_name(target)) == 0)
495 return target;
498 /* It's OK to remove this fallback sometime after August 2010 or so */
500 /* no match, try as number */
501 unsigned num;
502 if (parse_uint(id, &num) != ERROR_OK)
503 return NULL;
505 for (target = all_targets; target; target = target->next) {
506 if (target->target_number == (int)num) {
507 LOG_WARNING("use '%s' as target identifier, not '%u'",
508 target_name(target), num);
509 return target;
513 return NULL;
516 /* returns a pointer to the n-th configured target */
517 struct target *get_target_by_num(int num)
519 struct target *target = all_targets;
521 while (target) {
522 if (target->target_number == num)
523 return target;
524 target = target->next;
527 return NULL;
530 struct target *get_current_target(struct command_context *cmd_ctx)
532 struct target *target = get_current_target_or_null(cmd_ctx);
534 if (!target) {
535 LOG_ERROR("BUG: current_target out of bounds");
536 exit(-1);
539 return target;
542 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
544 return cmd_ctx->current_target_override
545 ? cmd_ctx->current_target_override
546 : cmd_ctx->current_target;
549 int target_poll(struct target *target)
551 int retval;
553 /* We can't poll until after examine */
554 if (!target_was_examined(target)) {
555 /* Fail silently lest we pollute the log */
556 return ERROR_FAIL;
559 retval = target->type->poll(target);
560 if (retval != ERROR_OK)
561 return retval;
563 if (target->halt_issued) {
564 if (target->state == TARGET_HALTED)
565 target->halt_issued = false;
566 else {
567 int64_t t = timeval_ms() - target->halt_issued_time;
568 if (t > DEFAULT_HALT_TIMEOUT) {
569 target->halt_issued = false;
570 LOG_INFO("Halt timed out, wake up GDB.");
571 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
576 return ERROR_OK;
579 int target_halt(struct target *target)
581 int retval;
582 /* We can't poll until after examine */
583 if (!target_was_examined(target)) {
584 LOG_ERROR("Target not examined yet");
585 return ERROR_FAIL;
588 retval = target->type->halt(target);
589 if (retval != ERROR_OK)
590 return retval;
592 target->halt_issued = true;
593 target->halt_issued_time = timeval_ms();
595 return ERROR_OK;
599 * Make the target (re)start executing using its saved execution
600 * context (possibly with some modifications).
602 * @param target Which target should start executing.
603 * @param current True to use the target's saved program counter instead
604 * of the address parameter
605 * @param address Optionally used as the program counter.
606 * @param handle_breakpoints True iff breakpoints at the resumption PC
607 * should be skipped. (For example, maybe execution was stopped by
608 * such a breakpoint, in which case it would be counterproductive to
609 * let it re-trigger.
610 * @param debug_execution False if all working areas allocated by OpenOCD
611 * should be released and/or restored to their original contents.
612 * (This would for example be true to run some downloaded "helper"
613 * algorithm code, which resides in one such working buffer and uses
614 * another for data storage.)
616 * @todo Resolve the ambiguity about what the "debug_execution" flag
617 * signifies. For example, Target implementations don't agree on how
618 * it relates to invalidation of the register cache, or to whether
619 * breakpoints and watchpoints should be enabled. (It would seem wrong
620 * to enable breakpoints when running downloaded "helper" algorithms
621 * (debug_execution true), since the breakpoints would be set to match
622 * target firmware being debugged, not the helper algorithm.... and
623 * enabling them could cause such helpers to malfunction (for example,
624 * by overwriting data with a breakpoint instruction. On the other
625 * hand the infrastructure for running such helpers might use this
626 * procedure but rely on hardware breakpoint to detect termination.)
628 int target_resume(struct target *target, int current, target_addr_t address,
629 int handle_breakpoints, int debug_execution)
631 int retval;
633 /* We can't poll until after examine */
634 if (!target_was_examined(target)) {
635 LOG_ERROR("Target not examined yet");
636 return ERROR_FAIL;
639 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
641 /* note that resume *must* be asynchronous. The CPU can halt before
642 * we poll. The CPU can even halt at the current PC as a result of
643 * a software breakpoint being inserted by (a bug?) the application.
646 * resume() triggers the event 'resumed'. The execution of TCL commands
647 * in the event handler causes the polling of targets. If the target has
648 * already halted for a breakpoint, polling will run the 'halted' event
649 * handler before the pending 'resumed' handler.
650 * Disable polling during resume() to guarantee the execution of handlers
651 * in the correct order.
653 bool save_poll_mask = jtag_poll_mask();
654 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
655 jtag_poll_unmask(save_poll_mask);
657 if (retval != ERROR_OK)
658 return retval;
660 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
662 return retval;
665 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
667 char buf[100];
668 int retval;
669 struct jim_nvp *n;
670 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
671 if (!n->name) {
672 LOG_ERROR("invalid reset mode");
673 return ERROR_FAIL;
676 struct target *target;
677 for (target = all_targets; target; target = target->next)
678 target_call_reset_callbacks(target, reset_mode);
680 /* disable polling during reset to make reset event scripts
681 * more predictable, i.e. dr/irscan & pathmove in events will
682 * not have JTAG operations injected into the middle of a sequence.
684 bool save_poll_mask = jtag_poll_mask();
686 sprintf(buf, "ocd_process_reset %s", n->name);
687 retval = Jim_Eval(cmd->ctx->interp, buf);
689 jtag_poll_unmask(save_poll_mask);
691 if (retval != JIM_OK) {
692 Jim_MakeErrorMessage(cmd->ctx->interp);
693 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
694 return ERROR_FAIL;
697 /* We want any events to be processed before the prompt */
698 retval = target_call_timer_callbacks_now();
700 for (target = all_targets; target; target = target->next) {
701 target->type->check_reset(target);
702 target->running_alg = false;
705 return retval;
708 static int identity_virt2phys(struct target *target,
709 target_addr_t virtual, target_addr_t *physical)
711 *physical = virtual;
712 return ERROR_OK;
715 static int no_mmu(struct target *target, int *enabled)
717 *enabled = 0;
718 return ERROR_OK;
722 * Reset the @c examined flag for the given target.
723 * Pure paranoia -- targets are zeroed on allocation.
725 static inline void target_reset_examined(struct target *target)
727 target->examined = false;
730 static int default_examine(struct target *target)
732 target_set_examined(target);
733 return ERROR_OK;
736 /* no check by default */
737 static int default_check_reset(struct target *target)
739 return ERROR_OK;
742 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
743 * Keep in sync */
744 int target_examine_one(struct target *target)
746 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
748 int retval = target->type->examine(target);
749 if (retval != ERROR_OK) {
750 target_reset_examined(target);
751 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
752 return retval;
755 target_set_examined(target);
756 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
758 return ERROR_OK;
761 static int jtag_enable_callback(enum jtag_event event, void *priv)
763 struct target *target = priv;
765 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
766 return ERROR_OK;
768 jtag_unregister_event_callback(jtag_enable_callback, target);
770 return target_examine_one(target);
773 /* Targets that correctly implement init + examine, i.e.
774 * no communication with target during init:
776 * XScale
778 int target_examine(void)
780 int retval = ERROR_OK;
781 struct target *target;
783 for (target = all_targets; target; target = target->next) {
784 /* defer examination, but don't skip it */
785 if (!target->tap->enabled) {
786 jtag_register_event_callback(jtag_enable_callback,
787 target);
788 continue;
791 if (target->defer_examine)
792 continue;
794 int retval2 = target_examine_one(target);
795 if (retval2 != ERROR_OK) {
796 LOG_WARNING("target %s examination failed", target_name(target));
797 retval = retval2;
800 return retval;
803 const char *target_type_name(struct target *target)
805 return target->type->name;
808 static int target_soft_reset_halt(struct target *target)
810 if (!target_was_examined(target)) {
811 LOG_ERROR("Target not examined yet");
812 return ERROR_FAIL;
814 if (!target->type->soft_reset_halt) {
815 LOG_ERROR("Target %s does not support soft_reset_halt",
816 target_name(target));
817 return ERROR_FAIL;
819 return target->type->soft_reset_halt(target);
823 * Downloads a target-specific native code algorithm to the target,
824 * and executes it. * Note that some targets may need to set up, enable,
825 * and tear down a breakpoint (hard or * soft) to detect algorithm
826 * termination, while others may support lower overhead schemes where
827 * soft breakpoints embedded in the algorithm automatically terminate the
828 * algorithm.
830 * @param target used to run the algorithm
831 * @param num_mem_params
832 * @param mem_params
833 * @param num_reg_params
834 * @param reg_param
835 * @param entry_point
836 * @param exit_point
837 * @param timeout_ms
838 * @param arch_info target-specific description of the algorithm.
840 int target_run_algorithm(struct target *target,
841 int num_mem_params, struct mem_param *mem_params,
842 int num_reg_params, struct reg_param *reg_param,
843 target_addr_t entry_point, target_addr_t exit_point,
844 int timeout_ms, void *arch_info)
846 int retval = ERROR_FAIL;
848 if (!target_was_examined(target)) {
849 LOG_ERROR("Target not examined yet");
850 goto done;
852 if (!target->type->run_algorithm) {
853 LOG_ERROR("Target type '%s' does not support %s",
854 target_type_name(target), __func__);
855 goto done;
858 target->running_alg = true;
859 retval = target->type->run_algorithm(target,
860 num_mem_params, mem_params,
861 num_reg_params, reg_param,
862 entry_point, exit_point, timeout_ms, arch_info);
863 target->running_alg = false;
865 done:
866 return retval;
870 * Executes a target-specific native code algorithm and leaves it running.
872 * @param target used to run the algorithm
873 * @param num_mem_params
874 * @param mem_params
875 * @param num_reg_params
876 * @param reg_params
877 * @param entry_point
878 * @param exit_point
879 * @param arch_info target-specific description of the algorithm.
881 int target_start_algorithm(struct target *target,
882 int num_mem_params, struct mem_param *mem_params,
883 int num_reg_params, struct reg_param *reg_params,
884 target_addr_t entry_point, target_addr_t exit_point,
885 void *arch_info)
887 int retval = ERROR_FAIL;
889 if (!target_was_examined(target)) {
890 LOG_ERROR("Target not examined yet");
891 goto done;
893 if (!target->type->start_algorithm) {
894 LOG_ERROR("Target type '%s' does not support %s",
895 target_type_name(target), __func__);
896 goto done;
898 if (target->running_alg) {
899 LOG_ERROR("Target is already running an algorithm");
900 goto done;
903 target->running_alg = true;
904 retval = target->type->start_algorithm(target,
905 num_mem_params, mem_params,
906 num_reg_params, reg_params,
907 entry_point, exit_point, arch_info);
909 done:
910 return retval;
914 * Waits for an algorithm started with target_start_algorithm() to complete.
916 * @param target used to run the algorithm
917 * @param num_mem_params
918 * @param mem_params
919 * @param num_reg_params
920 * @param reg_params
921 * @param exit_point
922 * @param timeout_ms
923 * @param arch_info target-specific description of the algorithm.
925 int target_wait_algorithm(struct target *target,
926 int num_mem_params, struct mem_param *mem_params,
927 int num_reg_params, struct reg_param *reg_params,
928 target_addr_t exit_point, int timeout_ms,
929 void *arch_info)
931 int retval = ERROR_FAIL;
933 if (!target->type->wait_algorithm) {
934 LOG_ERROR("Target type '%s' does not support %s",
935 target_type_name(target), __func__);
936 goto done;
938 if (!target->running_alg) {
939 LOG_ERROR("Target is not running an algorithm");
940 goto done;
943 retval = target->type->wait_algorithm(target,
944 num_mem_params, mem_params,
945 num_reg_params, reg_params,
946 exit_point, timeout_ms, arch_info);
947 if (retval != ERROR_TARGET_TIMEOUT)
948 target->running_alg = false;
950 done:
951 return retval;
955 * Streams data to a circular buffer on target intended for consumption by code
956 * running asynchronously on target.
958 * This is intended for applications where target-specific native code runs
959 * on the target, receives data from the circular buffer, does something with
960 * it (most likely writing it to a flash memory), and advances the circular
961 * buffer pointer.
963 * This assumes that the helper algorithm has already been loaded to the target,
964 * but has not been started yet. Given memory and register parameters are passed
965 * to the algorithm.
967 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
968 * following format:
970 * [buffer_start + 0, buffer_start + 4):
971 * Write Pointer address (aka head). Written and updated by this
972 * routine when new data is written to the circular buffer.
973 * [buffer_start + 4, buffer_start + 8):
974 * Read Pointer address (aka tail). Updated by code running on the
975 * target after it consumes data.
976 * [buffer_start + 8, buffer_start + buffer_size):
977 * Circular buffer contents.
979 * See contrib/loaders/flash/stm32f1x.S for an example.
981 * @param target used to run the algorithm
982 * @param buffer address on the host where data to be sent is located
983 * @param count number of blocks to send
984 * @param block_size size in bytes of each block
985 * @param num_mem_params count of memory-based params to pass to algorithm
986 * @param mem_params memory-based params to pass to algorithm
987 * @param num_reg_params count of register-based params to pass to algorithm
988 * @param reg_params memory-based params to pass to algorithm
989 * @param buffer_start address on the target of the circular buffer structure
990 * @param buffer_size size of the circular buffer structure
991 * @param entry_point address on the target to execute to start the algorithm
992 * @param exit_point address at which to set a breakpoint to catch the
993 * end of the algorithm; can be 0 if target triggers a breakpoint itself
994 * @param arch_info
997 int target_run_flash_async_algorithm(struct target *target,
998 const uint8_t *buffer, uint32_t count, int block_size,
999 int num_mem_params, struct mem_param *mem_params,
1000 int num_reg_params, struct reg_param *reg_params,
1001 uint32_t buffer_start, uint32_t buffer_size,
1002 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1004 int retval;
1005 int timeout = 0;
1007 const uint8_t *buffer_orig = buffer;
1009 /* Set up working area. First word is write pointer, second word is read pointer,
1010 * rest is fifo data area. */
1011 uint32_t wp_addr = buffer_start;
1012 uint32_t rp_addr = buffer_start + 4;
1013 uint32_t fifo_start_addr = buffer_start + 8;
1014 uint32_t fifo_end_addr = buffer_start + buffer_size;
1016 uint32_t wp = fifo_start_addr;
1017 uint32_t rp = fifo_start_addr;
1019 /* validate block_size is 2^n */
1020 assert(IS_PWR_OF_2(block_size));
1022 retval = target_write_u32(target, wp_addr, wp);
1023 if (retval != ERROR_OK)
1024 return retval;
1025 retval = target_write_u32(target, rp_addr, rp);
1026 if (retval != ERROR_OK)
1027 return retval;
1029 /* Start up algorithm on target and let it idle while writing the first chunk */
1030 retval = target_start_algorithm(target, num_mem_params, mem_params,
1031 num_reg_params, reg_params,
1032 entry_point,
1033 exit_point,
1034 arch_info);
1036 if (retval != ERROR_OK) {
1037 LOG_ERROR("error starting target flash write algorithm");
1038 return retval;
1041 while (count > 0) {
1043 retval = target_read_u32(target, rp_addr, &rp);
1044 if (retval != ERROR_OK) {
1045 LOG_ERROR("failed to get read pointer");
1046 break;
1049 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1050 (size_t) (buffer - buffer_orig), count, wp, rp);
1052 if (rp == 0) {
1053 LOG_ERROR("flash write algorithm aborted by target");
1054 retval = ERROR_FLASH_OPERATION_FAILED;
1055 break;
1058 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1059 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1060 break;
1063 /* Count the number of bytes available in the fifo without
1064 * crossing the wrap around. Make sure to not fill it completely,
1065 * because that would make wp == rp and that's the empty condition. */
1066 uint32_t thisrun_bytes;
1067 if (rp > wp)
1068 thisrun_bytes = rp - wp - block_size;
1069 else if (rp > fifo_start_addr)
1070 thisrun_bytes = fifo_end_addr - wp;
1071 else
1072 thisrun_bytes = fifo_end_addr - wp - block_size;
1074 if (thisrun_bytes == 0) {
1075 /* Throttle polling a bit if transfer is (much) faster than flash
1076 * programming. The exact delay shouldn't matter as long as it's
1077 * less than buffer size / flash speed. This is very unlikely to
1078 * run when using high latency connections such as USB. */
1079 alive_sleep(2);
1081 /* to stop an infinite loop on some targets check and increment a timeout
1082 * this issue was observed on a stellaris using the new ICDI interface */
1083 if (timeout++ >= 2500) {
1084 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1085 return ERROR_FLASH_OPERATION_FAILED;
1087 continue;
1090 /* reset our timeout */
1091 timeout = 0;
1093 /* Limit to the amount of data we actually want to write */
1094 if (thisrun_bytes > count * block_size)
1095 thisrun_bytes = count * block_size;
1097 /* Force end of large blocks to be word aligned */
1098 if (thisrun_bytes >= 16)
1099 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1101 /* Write data to fifo */
1102 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1103 if (retval != ERROR_OK)
1104 break;
1106 /* Update counters and wrap write pointer */
1107 buffer += thisrun_bytes;
1108 count -= thisrun_bytes / block_size;
1109 wp += thisrun_bytes;
1110 if (wp >= fifo_end_addr)
1111 wp = fifo_start_addr;
1113 /* Store updated write pointer to target */
1114 retval = target_write_u32(target, wp_addr, wp);
1115 if (retval != ERROR_OK)
1116 break;
1118 /* Avoid GDB timeouts */
1119 keep_alive();
1122 if (retval != ERROR_OK) {
1123 /* abort flash write algorithm on target */
1124 target_write_u32(target, wp_addr, 0);
1127 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1128 num_reg_params, reg_params,
1129 exit_point,
1130 10000,
1131 arch_info);
1133 if (retval2 != ERROR_OK) {
1134 LOG_ERROR("error waiting for target flash write algorithm");
1135 retval = retval2;
1138 if (retval == ERROR_OK) {
1139 /* check if algorithm set rp = 0 after fifo writer loop finished */
1140 retval = target_read_u32(target, rp_addr, &rp);
1141 if (retval == ERROR_OK && rp == 0) {
1142 LOG_ERROR("flash write algorithm aborted by target");
1143 retval = ERROR_FLASH_OPERATION_FAILED;
1147 return retval;
1150 int target_run_read_async_algorithm(struct target *target,
1151 uint8_t *buffer, uint32_t count, int block_size,
1152 int num_mem_params, struct mem_param *mem_params,
1153 int num_reg_params, struct reg_param *reg_params,
1154 uint32_t buffer_start, uint32_t buffer_size,
1155 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1157 int retval;
1158 int timeout = 0;
1160 const uint8_t *buffer_orig = buffer;
1162 /* Set up working area. First word is write pointer, second word is read pointer,
1163 * rest is fifo data area. */
1164 uint32_t wp_addr = buffer_start;
1165 uint32_t rp_addr = buffer_start + 4;
1166 uint32_t fifo_start_addr = buffer_start + 8;
1167 uint32_t fifo_end_addr = buffer_start + buffer_size;
1169 uint32_t wp = fifo_start_addr;
1170 uint32_t rp = fifo_start_addr;
1172 /* validate block_size is 2^n */
1173 assert(IS_PWR_OF_2(block_size));
1175 retval = target_write_u32(target, wp_addr, wp);
1176 if (retval != ERROR_OK)
1177 return retval;
1178 retval = target_write_u32(target, rp_addr, rp);
1179 if (retval != ERROR_OK)
1180 return retval;
1182 /* Start up algorithm on target */
1183 retval = target_start_algorithm(target, num_mem_params, mem_params,
1184 num_reg_params, reg_params,
1185 entry_point,
1186 exit_point,
1187 arch_info);
1189 if (retval != ERROR_OK) {
1190 LOG_ERROR("error starting target flash read algorithm");
1191 return retval;
1194 while (count > 0) {
1195 retval = target_read_u32(target, wp_addr, &wp);
1196 if (retval != ERROR_OK) {
1197 LOG_ERROR("failed to get write pointer");
1198 break;
1201 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1202 (size_t)(buffer - buffer_orig), count, wp, rp);
1204 if (wp == 0) {
1205 LOG_ERROR("flash read algorithm aborted by target");
1206 retval = ERROR_FLASH_OPERATION_FAILED;
1207 break;
1210 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1211 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1212 break;
1215 /* Count the number of bytes available in the fifo without
1216 * crossing the wrap around. */
1217 uint32_t thisrun_bytes;
1218 if (wp >= rp)
1219 thisrun_bytes = wp - rp;
1220 else
1221 thisrun_bytes = fifo_end_addr - rp;
1223 if (thisrun_bytes == 0) {
1224 /* Throttle polling a bit if transfer is (much) faster than flash
1225 * reading. The exact delay shouldn't matter as long as it's
1226 * less than buffer size / flash speed. This is very unlikely to
1227 * run when using high latency connections such as USB. */
1228 alive_sleep(2);
1230 /* to stop an infinite loop on some targets check and increment a timeout
1231 * this issue was observed on a stellaris using the new ICDI interface */
1232 if (timeout++ >= 2500) {
1233 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1234 return ERROR_FLASH_OPERATION_FAILED;
1236 continue;
1239 /* Reset our timeout */
1240 timeout = 0;
1242 /* Limit to the amount of data we actually want to read */
1243 if (thisrun_bytes > count * block_size)
1244 thisrun_bytes = count * block_size;
1246 /* Force end of large blocks to be word aligned */
1247 if (thisrun_bytes >= 16)
1248 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1250 /* Read data from fifo */
1251 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1252 if (retval != ERROR_OK)
1253 break;
1255 /* Update counters and wrap write pointer */
1256 buffer += thisrun_bytes;
1257 count -= thisrun_bytes / block_size;
1258 rp += thisrun_bytes;
1259 if (rp >= fifo_end_addr)
1260 rp = fifo_start_addr;
1262 /* Store updated write pointer to target */
1263 retval = target_write_u32(target, rp_addr, rp);
1264 if (retval != ERROR_OK)
1265 break;
1267 /* Avoid GDB timeouts */
1268 keep_alive();
1272 if (retval != ERROR_OK) {
1273 /* abort flash write algorithm on target */
1274 target_write_u32(target, rp_addr, 0);
1277 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1278 num_reg_params, reg_params,
1279 exit_point,
1280 10000,
1281 arch_info);
1283 if (retval2 != ERROR_OK) {
1284 LOG_ERROR("error waiting for target flash write algorithm");
1285 retval = retval2;
1288 if (retval == ERROR_OK) {
1289 /* check if algorithm set wp = 0 after fifo writer loop finished */
1290 retval = target_read_u32(target, wp_addr, &wp);
1291 if (retval == ERROR_OK && wp == 0) {
1292 LOG_ERROR("flash read algorithm aborted by target");
1293 retval = ERROR_FLASH_OPERATION_FAILED;
1297 return retval;
1300 int target_read_memory(struct target *target,
1301 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1303 if (!target_was_examined(target)) {
1304 LOG_ERROR("Target not examined yet");
1305 return ERROR_FAIL;
1307 if (!target->type->read_memory) {
1308 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1309 return ERROR_FAIL;
1311 return target->type->read_memory(target, address, size, count, buffer);
1314 int target_read_phys_memory(struct target *target,
1315 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1317 if (!target_was_examined(target)) {
1318 LOG_ERROR("Target not examined yet");
1319 return ERROR_FAIL;
1321 if (!target->type->read_phys_memory) {
1322 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1323 return ERROR_FAIL;
1325 return target->type->read_phys_memory(target, address, size, count, buffer);
1328 int target_write_memory(struct target *target,
1329 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1331 if (!target_was_examined(target)) {
1332 LOG_ERROR("Target not examined yet");
1333 return ERROR_FAIL;
1335 if (!target->type->write_memory) {
1336 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1337 return ERROR_FAIL;
1339 return target->type->write_memory(target, address, size, count, buffer);
1342 int target_write_phys_memory(struct target *target,
1343 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1345 if (!target_was_examined(target)) {
1346 LOG_ERROR("Target not examined yet");
1347 return ERROR_FAIL;
1349 if (!target->type->write_phys_memory) {
1350 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1351 return ERROR_FAIL;
1353 return target->type->write_phys_memory(target, address, size, count, buffer);
1356 int target_add_breakpoint(struct target *target,
1357 struct breakpoint *breakpoint)
1359 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1360 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1361 return ERROR_TARGET_NOT_HALTED;
1363 return target->type->add_breakpoint(target, breakpoint);
1366 int target_add_context_breakpoint(struct target *target,
1367 struct breakpoint *breakpoint)
1369 if (target->state != TARGET_HALTED) {
1370 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1371 return ERROR_TARGET_NOT_HALTED;
1373 return target->type->add_context_breakpoint(target, breakpoint);
1376 int target_add_hybrid_breakpoint(struct target *target,
1377 struct breakpoint *breakpoint)
1379 if (target->state != TARGET_HALTED) {
1380 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1381 return ERROR_TARGET_NOT_HALTED;
1383 return target->type->add_hybrid_breakpoint(target, breakpoint);
1386 int target_remove_breakpoint(struct target *target,
1387 struct breakpoint *breakpoint)
1389 return target->type->remove_breakpoint(target, breakpoint);
1392 int target_add_watchpoint(struct target *target,
1393 struct watchpoint *watchpoint)
1395 if (target->state != TARGET_HALTED) {
1396 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1397 return ERROR_TARGET_NOT_HALTED;
1399 return target->type->add_watchpoint(target, watchpoint);
1401 int target_remove_watchpoint(struct target *target,
1402 struct watchpoint *watchpoint)
1404 return target->type->remove_watchpoint(target, watchpoint);
1406 int target_hit_watchpoint(struct target *target,
1407 struct watchpoint **hit_watchpoint)
1409 if (target->state != TARGET_HALTED) {
1410 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1411 return ERROR_TARGET_NOT_HALTED;
1414 if (!target->type->hit_watchpoint) {
1415 /* For backward compatible, if hit_watchpoint is not implemented,
1416 * return ERROR_FAIL such that gdb_server will not take the nonsense
1417 * information. */
1418 return ERROR_FAIL;
1421 return target->type->hit_watchpoint(target, hit_watchpoint);
1424 const char *target_get_gdb_arch(struct target *target)
1426 if (!target->type->get_gdb_arch)
1427 return NULL;
1428 return target->type->get_gdb_arch(target);
1431 int target_get_gdb_reg_list(struct target *target,
1432 struct reg **reg_list[], int *reg_list_size,
1433 enum target_register_class reg_class)
1435 int result = ERROR_FAIL;
1437 if (!target_was_examined(target)) {
1438 LOG_ERROR("Target not examined yet");
1439 goto done;
1442 result = target->type->get_gdb_reg_list(target, reg_list,
1443 reg_list_size, reg_class);
1445 done:
1446 if (result != ERROR_OK) {
1447 *reg_list = NULL;
1448 *reg_list_size = 0;
1450 return result;
1453 int target_get_gdb_reg_list_noread(struct target *target,
1454 struct reg **reg_list[], int *reg_list_size,
1455 enum target_register_class reg_class)
1457 if (target->type->get_gdb_reg_list_noread &&
1458 target->type->get_gdb_reg_list_noread(target, reg_list,
1459 reg_list_size, reg_class) == ERROR_OK)
1460 return ERROR_OK;
1461 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1464 bool target_supports_gdb_connection(struct target *target)
1467 * exclude all the targets that don't provide get_gdb_reg_list
1468 * or that have explicit gdb_max_connection == 0
1470 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1473 int target_step(struct target *target,
1474 int current, target_addr_t address, int handle_breakpoints)
1476 int retval;
1478 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1480 retval = target->type->step(target, current, address, handle_breakpoints);
1481 if (retval != ERROR_OK)
1482 return retval;
1484 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1486 return retval;
1489 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1491 if (target->state != TARGET_HALTED) {
1492 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1493 return ERROR_TARGET_NOT_HALTED;
1495 return target->type->get_gdb_fileio_info(target, fileio_info);
1498 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1500 if (target->state != TARGET_HALTED) {
1501 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1502 return ERROR_TARGET_NOT_HALTED;
1504 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1507 target_addr_t target_address_max(struct target *target)
1509 unsigned bits = target_address_bits(target);
1510 if (sizeof(target_addr_t) * 8 == bits)
1511 return (target_addr_t) -1;
1512 else
1513 return (((target_addr_t) 1) << bits) - 1;
1516 unsigned target_address_bits(struct target *target)
1518 if (target->type->address_bits)
1519 return target->type->address_bits(target);
1520 return 32;
1523 unsigned int target_data_bits(struct target *target)
1525 if (target->type->data_bits)
1526 return target->type->data_bits(target);
1527 return 32;
1530 static int target_profiling(struct target *target, uint32_t *samples,
1531 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1533 return target->type->profiling(target, samples, max_num_samples,
1534 num_samples, seconds);
1537 static int handle_target(void *priv);
1539 static int target_init_one(struct command_context *cmd_ctx,
1540 struct target *target)
1542 target_reset_examined(target);
1544 struct target_type *type = target->type;
1545 if (!type->examine)
1546 type->examine = default_examine;
1548 if (!type->check_reset)
1549 type->check_reset = default_check_reset;
1551 assert(type->init_target);
1553 int retval = type->init_target(cmd_ctx, target);
1554 if (retval != ERROR_OK) {
1555 LOG_ERROR("target '%s' init failed", target_name(target));
1556 return retval;
1559 /* Sanity-check MMU support ... stub in what we must, to help
1560 * implement it in stages, but warn if we need to do so.
1562 if (type->mmu) {
1563 if (!type->virt2phys) {
1564 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1565 type->virt2phys = identity_virt2phys;
1567 } else {
1568 /* Make sure no-MMU targets all behave the same: make no
1569 * distinction between physical and virtual addresses, and
1570 * ensure that virt2phys() is always an identity mapping.
1572 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1573 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1575 type->mmu = no_mmu;
1576 type->write_phys_memory = type->write_memory;
1577 type->read_phys_memory = type->read_memory;
1578 type->virt2phys = identity_virt2phys;
1581 if (!target->type->read_buffer)
1582 target->type->read_buffer = target_read_buffer_default;
1584 if (!target->type->write_buffer)
1585 target->type->write_buffer = target_write_buffer_default;
1587 if (!target->type->get_gdb_fileio_info)
1588 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1590 if (!target->type->gdb_fileio_end)
1591 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1593 if (!target->type->profiling)
1594 target->type->profiling = target_profiling_default;
1596 return ERROR_OK;
1599 static int target_init(struct command_context *cmd_ctx)
1601 struct target *target;
1602 int retval;
1604 for (target = all_targets; target; target = target->next) {
1605 retval = target_init_one(cmd_ctx, target);
1606 if (retval != ERROR_OK)
1607 return retval;
1610 if (!all_targets)
1611 return ERROR_OK;
1613 retval = target_register_user_commands(cmd_ctx);
1614 if (retval != ERROR_OK)
1615 return retval;
1617 retval = target_register_timer_callback(&handle_target,
1618 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1619 if (retval != ERROR_OK)
1620 return retval;
1622 return ERROR_OK;
1625 COMMAND_HANDLER(handle_target_init_command)
1627 int retval;
1629 if (CMD_ARGC != 0)
1630 return ERROR_COMMAND_SYNTAX_ERROR;
1632 static bool target_initialized;
1633 if (target_initialized) {
1634 LOG_INFO("'target init' has already been called");
1635 return ERROR_OK;
1637 target_initialized = true;
1639 retval = command_run_line(CMD_CTX, "init_targets");
1640 if (retval != ERROR_OK)
1641 return retval;
1643 retval = command_run_line(CMD_CTX, "init_target_events");
1644 if (retval != ERROR_OK)
1645 return retval;
1647 retval = command_run_line(CMD_CTX, "init_board");
1648 if (retval != ERROR_OK)
1649 return retval;
1651 LOG_DEBUG("Initializing targets...");
1652 return target_init(CMD_CTX);
1655 int target_register_event_callback(int (*callback)(struct target *target,
1656 enum target_event event, void *priv), void *priv)
1658 struct target_event_callback **callbacks_p = &target_event_callbacks;
1660 if (!callback)
1661 return ERROR_COMMAND_SYNTAX_ERROR;
1663 if (*callbacks_p) {
1664 while ((*callbacks_p)->next)
1665 callbacks_p = &((*callbacks_p)->next);
1666 callbacks_p = &((*callbacks_p)->next);
1669 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1670 (*callbacks_p)->callback = callback;
1671 (*callbacks_p)->priv = priv;
1672 (*callbacks_p)->next = NULL;
1674 return ERROR_OK;
1677 int target_register_reset_callback(int (*callback)(struct target *target,
1678 enum target_reset_mode reset_mode, void *priv), void *priv)
1680 struct target_reset_callback *entry;
1682 if (!callback)
1683 return ERROR_COMMAND_SYNTAX_ERROR;
1685 entry = malloc(sizeof(struct target_reset_callback));
1686 if (!entry) {
1687 LOG_ERROR("error allocating buffer for reset callback entry");
1688 return ERROR_COMMAND_SYNTAX_ERROR;
1691 entry->callback = callback;
1692 entry->priv = priv;
1693 list_add(&entry->list, &target_reset_callback_list);
1696 return ERROR_OK;
1699 int target_register_trace_callback(int (*callback)(struct target *target,
1700 size_t len, uint8_t *data, void *priv), void *priv)
1702 struct target_trace_callback *entry;
1704 if (!callback)
1705 return ERROR_COMMAND_SYNTAX_ERROR;
1707 entry = malloc(sizeof(struct target_trace_callback));
1708 if (!entry) {
1709 LOG_ERROR("error allocating buffer for trace callback entry");
1710 return ERROR_COMMAND_SYNTAX_ERROR;
1713 entry->callback = callback;
1714 entry->priv = priv;
1715 list_add(&entry->list, &target_trace_callback_list);
1718 return ERROR_OK;
1721 int target_register_timer_callback(int (*callback)(void *priv),
1722 unsigned int time_ms, enum target_timer_type type, void *priv)
1724 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1726 if (!callback)
1727 return ERROR_COMMAND_SYNTAX_ERROR;
1729 if (*callbacks_p) {
1730 while ((*callbacks_p)->next)
1731 callbacks_p = &((*callbacks_p)->next);
1732 callbacks_p = &((*callbacks_p)->next);
1735 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1736 (*callbacks_p)->callback = callback;
1737 (*callbacks_p)->type = type;
1738 (*callbacks_p)->time_ms = time_ms;
1739 (*callbacks_p)->removed = false;
1741 (*callbacks_p)->when = timeval_ms() + time_ms;
1742 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1744 (*callbacks_p)->priv = priv;
1745 (*callbacks_p)->next = NULL;
1747 return ERROR_OK;
1750 int target_unregister_event_callback(int (*callback)(struct target *target,
1751 enum target_event event, void *priv), void *priv)
1753 struct target_event_callback **p = &target_event_callbacks;
1754 struct target_event_callback *c = target_event_callbacks;
1756 if (!callback)
1757 return ERROR_COMMAND_SYNTAX_ERROR;
1759 while (c) {
1760 struct target_event_callback *next = c->next;
1761 if ((c->callback == callback) && (c->priv == priv)) {
1762 *p = next;
1763 free(c);
1764 return ERROR_OK;
1765 } else
1766 p = &(c->next);
1767 c = next;
1770 return ERROR_OK;
1773 int target_unregister_reset_callback(int (*callback)(struct target *target,
1774 enum target_reset_mode reset_mode, void *priv), void *priv)
1776 struct target_reset_callback *entry;
1778 if (!callback)
1779 return ERROR_COMMAND_SYNTAX_ERROR;
1781 list_for_each_entry(entry, &target_reset_callback_list, list) {
1782 if (entry->callback == callback && entry->priv == priv) {
1783 list_del(&entry->list);
1784 free(entry);
1785 break;
1789 return ERROR_OK;
1792 int target_unregister_trace_callback(int (*callback)(struct target *target,
1793 size_t len, uint8_t *data, void *priv), void *priv)
1795 struct target_trace_callback *entry;
1797 if (!callback)
1798 return ERROR_COMMAND_SYNTAX_ERROR;
1800 list_for_each_entry(entry, &target_trace_callback_list, list) {
1801 if (entry->callback == callback && entry->priv == priv) {
1802 list_del(&entry->list);
1803 free(entry);
1804 break;
1808 return ERROR_OK;
1811 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1813 if (!callback)
1814 return ERROR_COMMAND_SYNTAX_ERROR;
1816 for (struct target_timer_callback *c = target_timer_callbacks;
1817 c; c = c->next) {
1818 if ((c->callback == callback) && (c->priv == priv)) {
1819 c->removed = true;
1820 return ERROR_OK;
1824 return ERROR_FAIL;
1827 int target_call_event_callbacks(struct target *target, enum target_event event)
1829 struct target_event_callback *callback = target_event_callbacks;
1830 struct target_event_callback *next_callback;
1832 if (event == TARGET_EVENT_HALTED) {
1833 /* execute early halted first */
1834 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1837 LOG_DEBUG("target event %i (%s) for core %s", event,
1838 target_event_name(event),
1839 target_name(target));
1841 target_handle_event(target, event);
1843 while (callback) {
1844 next_callback = callback->next;
1845 callback->callback(target, event, callback->priv);
1846 callback = next_callback;
1849 return ERROR_OK;
1852 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1854 struct target_reset_callback *callback;
1856 LOG_DEBUG("target reset %i (%s)", reset_mode,
1857 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1859 list_for_each_entry(callback, &target_reset_callback_list, list)
1860 callback->callback(target, reset_mode, callback->priv);
1862 return ERROR_OK;
1865 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1867 struct target_trace_callback *callback;
1869 list_for_each_entry(callback, &target_trace_callback_list, list)
1870 callback->callback(target, len, data, callback->priv);
1872 return ERROR_OK;
1875 static int target_timer_callback_periodic_restart(
1876 struct target_timer_callback *cb, int64_t *now)
1878 cb->when = *now + cb->time_ms;
1879 return ERROR_OK;
1882 static int target_call_timer_callback(struct target_timer_callback *cb,
1883 int64_t *now)
1885 cb->callback(cb->priv);
1887 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1888 return target_timer_callback_periodic_restart(cb, now);
1890 return target_unregister_timer_callback(cb->callback, cb->priv);
1893 static int target_call_timer_callbacks_check_time(int checktime)
1895 static bool callback_processing;
1897 /* Do not allow nesting */
1898 if (callback_processing)
1899 return ERROR_OK;
1901 callback_processing = true;
1903 keep_alive();
1905 int64_t now = timeval_ms();
1907 /* Initialize to a default value that's a ways into the future.
1908 * The loop below will make it closer to now if there are
1909 * callbacks that want to be called sooner. */
1910 target_timer_next_event_value = now + 1000;
1912 /* Store an address of the place containing a pointer to the
1913 * next item; initially, that's a standalone "root of the
1914 * list" variable. */
1915 struct target_timer_callback **callback = &target_timer_callbacks;
1916 while (callback && *callback) {
1917 if ((*callback)->removed) {
1918 struct target_timer_callback *p = *callback;
1919 *callback = (*callback)->next;
1920 free(p);
1921 continue;
1924 bool call_it = (*callback)->callback &&
1925 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1926 now >= (*callback)->when);
1928 if (call_it)
1929 target_call_timer_callback(*callback, &now);
1931 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1932 target_timer_next_event_value = (*callback)->when;
1934 callback = &(*callback)->next;
1937 callback_processing = false;
1938 return ERROR_OK;
1941 int target_call_timer_callbacks()
1943 return target_call_timer_callbacks_check_time(1);
1946 /* invoke periodic callbacks immediately */
1947 int target_call_timer_callbacks_now()
1949 return target_call_timer_callbacks_check_time(0);
1952 int64_t target_timer_next_event(void)
1954 return target_timer_next_event_value;
1957 /* Prints the working area layout for debug purposes */
1958 static void print_wa_layout(struct target *target)
1960 struct working_area *c = target->working_areas;
1962 while (c) {
1963 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1964 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1965 c->address, c->address + c->size - 1, c->size);
1966 c = c->next;
1970 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1971 static void target_split_working_area(struct working_area *area, uint32_t size)
1973 assert(area->free); /* Shouldn't split an allocated area */
1974 assert(size <= area->size); /* Caller should guarantee this */
1976 /* Split only if not already the right size */
1977 if (size < area->size) {
1978 struct working_area *new_wa = malloc(sizeof(*new_wa));
1980 if (!new_wa)
1981 return;
1983 new_wa->next = area->next;
1984 new_wa->size = area->size - size;
1985 new_wa->address = area->address + size;
1986 new_wa->backup = NULL;
1987 new_wa->user = NULL;
1988 new_wa->free = true;
1990 area->next = new_wa;
1991 area->size = size;
1993 /* If backup memory was allocated to this area, it has the wrong size
1994 * now so free it and it will be reallocated if/when needed */
1995 free(area->backup);
1996 area->backup = NULL;
2000 /* Merge all adjacent free areas into one */
2001 static void target_merge_working_areas(struct target *target)
2003 struct working_area *c = target->working_areas;
2005 while (c && c->next) {
2006 assert(c->next->address == c->address + c->size); /* This is an invariant */
2008 /* Find two adjacent free areas */
2009 if (c->free && c->next->free) {
2010 /* Merge the last into the first */
2011 c->size += c->next->size;
2013 /* Remove the last */
2014 struct working_area *to_be_freed = c->next;
2015 c->next = c->next->next;
2016 free(to_be_freed->backup);
2017 free(to_be_freed);
2019 /* If backup memory was allocated to the remaining area, it's has
2020 * the wrong size now */
2021 free(c->backup);
2022 c->backup = NULL;
2023 } else {
2024 c = c->next;
2029 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2031 /* Reevaluate working area address based on MMU state*/
2032 if (!target->working_areas) {
2033 int retval;
2034 int enabled;
2036 retval = target->type->mmu(target, &enabled);
2037 if (retval != ERROR_OK)
2038 return retval;
2040 if (!enabled) {
2041 if (target->working_area_phys_spec) {
2042 LOG_DEBUG("MMU disabled, using physical "
2043 "address for working memory " TARGET_ADDR_FMT,
2044 target->working_area_phys);
2045 target->working_area = target->working_area_phys;
2046 } else {
2047 LOG_ERROR("No working memory available. "
2048 "Specify -work-area-phys to target.");
2049 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2051 } else {
2052 if (target->working_area_virt_spec) {
2053 LOG_DEBUG("MMU enabled, using virtual "
2054 "address for working memory " TARGET_ADDR_FMT,
2055 target->working_area_virt);
2056 target->working_area = target->working_area_virt;
2057 } else {
2058 LOG_ERROR("No working memory available. "
2059 "Specify -work-area-virt to target.");
2060 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2064 /* Set up initial working area on first call */
2065 struct working_area *new_wa = malloc(sizeof(*new_wa));
2066 if (new_wa) {
2067 new_wa->next = NULL;
2068 new_wa->size = ALIGN_DOWN(target->working_area_size, 4); /* 4-byte align */
2069 new_wa->address = target->working_area;
2070 new_wa->backup = NULL;
2071 new_wa->user = NULL;
2072 new_wa->free = true;
2075 target->working_areas = new_wa;
2078 /* only allocate multiples of 4 byte */
2079 size = ALIGN_UP(size, 4);
2081 struct working_area *c = target->working_areas;
2083 /* Find the first large enough working area */
2084 while (c) {
2085 if (c->free && c->size >= size)
2086 break;
2087 c = c->next;
2090 if (!c)
2091 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2093 /* Split the working area into the requested size */
2094 target_split_working_area(c, size);
2096 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2097 size, c->address);
2099 if (target->backup_working_area) {
2100 if (!c->backup) {
2101 c->backup = malloc(c->size);
2102 if (!c->backup)
2103 return ERROR_FAIL;
2106 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2107 if (retval != ERROR_OK)
2108 return retval;
2111 /* mark as used, and return the new (reused) area */
2112 c->free = false;
2113 *area = c;
2115 /* user pointer */
2116 c->user = area;
2118 print_wa_layout(target);
2120 return ERROR_OK;
2123 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2125 int retval;
2127 retval = target_alloc_working_area_try(target, size, area);
2128 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2129 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2130 return retval;
2134 static int target_restore_working_area(struct target *target, struct working_area *area)
2136 int retval = ERROR_OK;
2138 if (target->backup_working_area && area->backup) {
2139 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2140 if (retval != ERROR_OK)
2141 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2142 area->size, area->address);
2145 return retval;
2148 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2149 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2151 if (!area || area->free)
2152 return ERROR_OK;
2154 int retval = ERROR_OK;
2155 if (restore) {
2156 retval = target_restore_working_area(target, area);
2157 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2158 if (retval != ERROR_OK)
2159 return retval;
2162 area->free = true;
2164 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2165 area->size, area->address);
2167 /* mark user pointer invalid */
2168 /* TODO: Is this really safe? It points to some previous caller's memory.
2169 * How could we know that the area pointer is still in that place and not
2170 * some other vital data? What's the purpose of this, anyway? */
2171 *area->user = NULL;
2172 area->user = NULL;
2174 target_merge_working_areas(target);
2176 print_wa_layout(target);
2178 return retval;
2181 int target_free_working_area(struct target *target, struct working_area *area)
2183 return target_free_working_area_restore(target, area, 1);
2186 /* free resources and restore memory, if restoring memory fails,
2187 * free up resources anyway
2189 static void target_free_all_working_areas_restore(struct target *target, int restore)
2191 struct working_area *c = target->working_areas;
2193 LOG_DEBUG("freeing all working areas");
2195 /* Loop through all areas, restoring the allocated ones and marking them as free */
2196 while (c) {
2197 if (!c->free) {
2198 if (restore)
2199 target_restore_working_area(target, c);
2200 c->free = true;
2201 *c->user = NULL; /* Same as above */
2202 c->user = NULL;
2204 c = c->next;
2207 /* Run a merge pass to combine all areas into one */
2208 target_merge_working_areas(target);
2210 print_wa_layout(target);
2213 void target_free_all_working_areas(struct target *target)
2215 target_free_all_working_areas_restore(target, 1);
2217 /* Now we have none or only one working area marked as free */
2218 if (target->working_areas) {
2219 /* Free the last one to allow on-the-fly moving and resizing */
2220 free(target->working_areas->backup);
2221 free(target->working_areas);
2222 target->working_areas = NULL;
2226 /* Find the largest number of bytes that can be allocated */
2227 uint32_t target_get_working_area_avail(struct target *target)
2229 struct working_area *c = target->working_areas;
2230 uint32_t max_size = 0;
2232 if (!c)
2233 return ALIGN_DOWN(target->working_area_size, 4);
2235 while (c) {
2236 if (c->free && max_size < c->size)
2237 max_size = c->size;
2239 c = c->next;
2242 return max_size;
2245 static void target_destroy(struct target *target)
2247 if (target->type->deinit_target)
2248 target->type->deinit_target(target);
2250 if (target->semihosting)
2251 free(target->semihosting->basedir);
2252 free(target->semihosting);
2254 jtag_unregister_event_callback(jtag_enable_callback, target);
2256 struct target_event_action *teap = target->event_action;
2257 while (teap) {
2258 struct target_event_action *next = teap->next;
2259 Jim_DecrRefCount(teap->interp, teap->body);
2260 free(teap);
2261 teap = next;
2264 target_free_all_working_areas(target);
2266 /* release the targets SMP list */
2267 if (target->smp) {
2268 struct target_list *head, *tmp;
2270 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2271 list_del(&head->lh);
2272 head->target->smp = 0;
2273 free(head);
2275 if (target->smp_targets != &empty_smp_targets)
2276 free(target->smp_targets);
2277 target->smp = 0;
2280 rtos_destroy(target);
2282 free(target->gdb_port_override);
2283 free(target->type);
2284 free(target->trace_info);
2285 free(target->fileio_info);
2286 free(target->cmd_name);
2287 free(target);
2290 void target_quit(void)
2292 struct target_event_callback *pe = target_event_callbacks;
2293 while (pe) {
2294 struct target_event_callback *t = pe->next;
2295 free(pe);
2296 pe = t;
2298 target_event_callbacks = NULL;
2300 struct target_timer_callback *pt = target_timer_callbacks;
2301 while (pt) {
2302 struct target_timer_callback *t = pt->next;
2303 free(pt);
2304 pt = t;
2306 target_timer_callbacks = NULL;
2308 for (struct target *target = all_targets; target;) {
2309 struct target *tmp;
2311 tmp = target->next;
2312 target_destroy(target);
2313 target = tmp;
2316 all_targets = NULL;
2319 int target_arch_state(struct target *target)
2321 int retval;
2322 if (!target) {
2323 LOG_WARNING("No target has been configured");
2324 return ERROR_OK;
2327 if (target->state != TARGET_HALTED)
2328 return ERROR_OK;
2330 retval = target->type->arch_state(target);
2331 return retval;
2334 static int target_get_gdb_fileio_info_default(struct target *target,
2335 struct gdb_fileio_info *fileio_info)
2337 /* If target does not support semi-hosting function, target
2338 has no need to provide .get_gdb_fileio_info callback.
2339 It just return ERROR_FAIL and gdb_server will return "Txx"
2340 as target halted every time. */
2341 return ERROR_FAIL;
2344 static int target_gdb_fileio_end_default(struct target *target,
2345 int retcode, int fileio_errno, bool ctrl_c)
2347 return ERROR_OK;
2350 int target_profiling_default(struct target *target, uint32_t *samples,
2351 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2353 struct timeval timeout, now;
2355 gettimeofday(&timeout, NULL);
2356 timeval_add_time(&timeout, seconds, 0);
2358 LOG_INFO("Starting profiling. Halting and resuming the"
2359 " target as often as we can...");
2361 uint32_t sample_count = 0;
2362 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2363 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2365 int retval = ERROR_OK;
2366 for (;;) {
2367 target_poll(target);
2368 if (target->state == TARGET_HALTED) {
2369 uint32_t t = buf_get_u32(reg->value, 0, 32);
2370 samples[sample_count++] = t;
2371 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2372 retval = target_resume(target, 1, 0, 0, 0);
2373 target_poll(target);
2374 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2375 } else if (target->state == TARGET_RUNNING) {
2376 /* We want to quickly sample the PC. */
2377 retval = target_halt(target);
2378 } else {
2379 LOG_INFO("Target not halted or running");
2380 retval = ERROR_OK;
2381 break;
2384 if (retval != ERROR_OK)
2385 break;
2387 gettimeofday(&now, NULL);
2388 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2389 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2390 break;
2394 *num_samples = sample_count;
2395 return retval;
2398 /* Single aligned words are guaranteed to use 16 or 32 bit access
2399 * mode respectively, otherwise data is handled as quickly as
2400 * possible
2402 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2404 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2405 size, address);
2407 if (!target_was_examined(target)) {
2408 LOG_ERROR("Target not examined yet");
2409 return ERROR_FAIL;
2412 if (size == 0)
2413 return ERROR_OK;
2415 if ((address + size - 1) < address) {
2416 /* GDB can request this when e.g. PC is 0xfffffffc */
2417 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2418 address,
2419 size);
2420 return ERROR_FAIL;
2423 return target->type->write_buffer(target, address, size, buffer);
2426 static int target_write_buffer_default(struct target *target,
2427 target_addr_t address, uint32_t count, const uint8_t *buffer)
2429 uint32_t size;
2430 unsigned int data_bytes = target_data_bits(target) / 8;
2432 /* Align up to maximum bytes. The loop condition makes sure the next pass
2433 * will have something to do with the size we leave to it. */
2434 for (size = 1;
2435 size < data_bytes && count >= size * 2 + (address & size);
2436 size *= 2) {
2437 if (address & size) {
2438 int retval = target_write_memory(target, address, size, 1, buffer);
2439 if (retval != ERROR_OK)
2440 return retval;
2441 address += size;
2442 count -= size;
2443 buffer += size;
2447 /* Write the data with as large access size as possible. */
2448 for (; size > 0; size /= 2) {
2449 uint32_t aligned = count - count % size;
2450 if (aligned > 0) {
2451 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2452 if (retval != ERROR_OK)
2453 return retval;
2454 address += aligned;
2455 count -= aligned;
2456 buffer += aligned;
2460 return ERROR_OK;
2463 /* Single aligned words are guaranteed to use 16 or 32 bit access
2464 * mode respectively, otherwise data is handled as quickly as
2465 * possible
2467 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2469 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2470 size, address);
2472 if (!target_was_examined(target)) {
2473 LOG_ERROR("Target not examined yet");
2474 return ERROR_FAIL;
2477 if (size == 0)
2478 return ERROR_OK;
2480 if ((address + size - 1) < address) {
2481 /* GDB can request this when e.g. PC is 0xfffffffc */
2482 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2483 address,
2484 size);
2485 return ERROR_FAIL;
2488 return target->type->read_buffer(target, address, size, buffer);
2491 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2493 uint32_t size;
2494 unsigned int data_bytes = target_data_bits(target) / 8;
2496 /* Align up to maximum bytes. The loop condition makes sure the next pass
2497 * will have something to do with the size we leave to it. */
2498 for (size = 1;
2499 size < data_bytes && count >= size * 2 + (address & size);
2500 size *= 2) {
2501 if (address & size) {
2502 int retval = target_read_memory(target, address, size, 1, buffer);
2503 if (retval != ERROR_OK)
2504 return retval;
2505 address += size;
2506 count -= size;
2507 buffer += size;
2511 /* Read the data with as large access size as possible. */
2512 for (; size > 0; size /= 2) {
2513 uint32_t aligned = count - count % size;
2514 if (aligned > 0) {
2515 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2516 if (retval != ERROR_OK)
2517 return retval;
2518 address += aligned;
2519 count -= aligned;
2520 buffer += aligned;
2524 return ERROR_OK;
2527 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2529 uint8_t *buffer;
2530 int retval;
2531 uint32_t i;
2532 uint32_t checksum = 0;
2533 if (!target_was_examined(target)) {
2534 LOG_ERROR("Target not examined yet");
2535 return ERROR_FAIL;
2537 if (!target->type->checksum_memory) {
2538 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2539 return ERROR_FAIL;
2542 retval = target->type->checksum_memory(target, address, size, &checksum);
2543 if (retval != ERROR_OK) {
2544 buffer = malloc(size);
2545 if (!buffer) {
2546 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2547 return ERROR_COMMAND_SYNTAX_ERROR;
2549 retval = target_read_buffer(target, address, size, buffer);
2550 if (retval != ERROR_OK) {
2551 free(buffer);
2552 return retval;
2555 /* convert to target endianness */
2556 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2557 uint32_t target_data;
2558 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2559 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2562 retval = image_calculate_checksum(buffer, size, &checksum);
2563 free(buffer);
2566 *crc = checksum;
2568 return retval;
2571 int target_blank_check_memory(struct target *target,
2572 struct target_memory_check_block *blocks, int num_blocks,
2573 uint8_t erased_value)
2575 if (!target_was_examined(target)) {
2576 LOG_ERROR("Target not examined yet");
2577 return ERROR_FAIL;
2580 if (!target->type->blank_check_memory)
2581 return ERROR_NOT_IMPLEMENTED;
2583 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2586 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2588 uint8_t value_buf[8];
2589 if (!target_was_examined(target)) {
2590 LOG_ERROR("Target not examined yet");
2591 return ERROR_FAIL;
2594 int retval = target_read_memory(target, address, 8, 1, value_buf);
2596 if (retval == ERROR_OK) {
2597 *value = target_buffer_get_u64(target, value_buf);
2598 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2599 address,
2600 *value);
2601 } else {
2602 *value = 0x0;
2603 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2604 address);
2607 return retval;
2610 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2612 uint8_t value_buf[4];
2613 if (!target_was_examined(target)) {
2614 LOG_ERROR("Target not examined yet");
2615 return ERROR_FAIL;
2618 int retval = target_read_memory(target, address, 4, 1, value_buf);
2620 if (retval == ERROR_OK) {
2621 *value = target_buffer_get_u32(target, value_buf);
2622 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2623 address,
2624 *value);
2625 } else {
2626 *value = 0x0;
2627 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2628 address);
2631 return retval;
2634 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2636 uint8_t value_buf[2];
2637 if (!target_was_examined(target)) {
2638 LOG_ERROR("Target not examined yet");
2639 return ERROR_FAIL;
2642 int retval = target_read_memory(target, address, 2, 1, value_buf);
2644 if (retval == ERROR_OK) {
2645 *value = target_buffer_get_u16(target, value_buf);
2646 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2647 address,
2648 *value);
2649 } else {
2650 *value = 0x0;
2651 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2652 address);
2655 return retval;
2658 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2660 if (!target_was_examined(target)) {
2661 LOG_ERROR("Target not examined yet");
2662 return ERROR_FAIL;
2665 int retval = target_read_memory(target, address, 1, 1, value);
2667 if (retval == ERROR_OK) {
2668 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2669 address,
2670 *value);
2671 } else {
2672 *value = 0x0;
2673 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2674 address);
2677 return retval;
2680 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2682 int retval;
2683 uint8_t value_buf[8];
2684 if (!target_was_examined(target)) {
2685 LOG_ERROR("Target not examined yet");
2686 return ERROR_FAIL;
2689 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2690 address,
2691 value);
2693 target_buffer_set_u64(target, value_buf, value);
2694 retval = target_write_memory(target, address, 8, 1, value_buf);
2695 if (retval != ERROR_OK)
2696 LOG_DEBUG("failed: %i", retval);
2698 return retval;
2701 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2703 int retval;
2704 uint8_t value_buf[4];
2705 if (!target_was_examined(target)) {
2706 LOG_ERROR("Target not examined yet");
2707 return ERROR_FAIL;
2710 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2711 address,
2712 value);
2714 target_buffer_set_u32(target, value_buf, value);
2715 retval = target_write_memory(target, address, 4, 1, value_buf);
2716 if (retval != ERROR_OK)
2717 LOG_DEBUG("failed: %i", retval);
2719 return retval;
2722 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2724 int retval;
2725 uint8_t value_buf[2];
2726 if (!target_was_examined(target)) {
2727 LOG_ERROR("Target not examined yet");
2728 return ERROR_FAIL;
2731 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2732 address,
2733 value);
2735 target_buffer_set_u16(target, value_buf, value);
2736 retval = target_write_memory(target, address, 2, 1, value_buf);
2737 if (retval != ERROR_OK)
2738 LOG_DEBUG("failed: %i", retval);
2740 return retval;
2743 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2745 int retval;
2746 if (!target_was_examined(target)) {
2747 LOG_ERROR("Target not examined yet");
2748 return ERROR_FAIL;
2751 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2752 address, value);
2754 retval = target_write_memory(target, address, 1, 1, &value);
2755 if (retval != ERROR_OK)
2756 LOG_DEBUG("failed: %i", retval);
2758 return retval;
2761 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2763 int retval;
2764 uint8_t value_buf[8];
2765 if (!target_was_examined(target)) {
2766 LOG_ERROR("Target not examined yet");
2767 return ERROR_FAIL;
2770 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2771 address,
2772 value);
2774 target_buffer_set_u64(target, value_buf, value);
2775 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2776 if (retval != ERROR_OK)
2777 LOG_DEBUG("failed: %i", retval);
2779 return retval;
2782 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2784 int retval;
2785 uint8_t value_buf[4];
2786 if (!target_was_examined(target)) {
2787 LOG_ERROR("Target not examined yet");
2788 return ERROR_FAIL;
2791 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2792 address,
2793 value);
2795 target_buffer_set_u32(target, value_buf, value);
2796 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2797 if (retval != ERROR_OK)
2798 LOG_DEBUG("failed: %i", retval);
2800 return retval;
2803 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2805 int retval;
2806 uint8_t value_buf[2];
2807 if (!target_was_examined(target)) {
2808 LOG_ERROR("Target not examined yet");
2809 return ERROR_FAIL;
2812 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2813 address,
2814 value);
2816 target_buffer_set_u16(target, value_buf, value);
2817 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2818 if (retval != ERROR_OK)
2819 LOG_DEBUG("failed: %i", retval);
2821 return retval;
2824 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2826 int retval;
2827 if (!target_was_examined(target)) {
2828 LOG_ERROR("Target not examined yet");
2829 return ERROR_FAIL;
2832 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2833 address, value);
2835 retval = target_write_phys_memory(target, address, 1, 1, &value);
2836 if (retval != ERROR_OK)
2837 LOG_DEBUG("failed: %i", retval);
2839 return retval;
2842 static int find_target(struct command_invocation *cmd, const char *name)
2844 struct target *target = get_target(name);
2845 if (!target) {
2846 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2847 return ERROR_FAIL;
2849 if (!target->tap->enabled) {
2850 command_print(cmd, "Target: TAP %s is disabled, "
2851 "can't be the current target\n",
2852 target->tap->dotted_name);
2853 return ERROR_FAIL;
2856 cmd->ctx->current_target = target;
2857 if (cmd->ctx->current_target_override)
2858 cmd->ctx->current_target_override = target;
2860 return ERROR_OK;
2864 COMMAND_HANDLER(handle_targets_command)
2866 int retval = ERROR_OK;
2867 if (CMD_ARGC == 1) {
2868 retval = find_target(CMD, CMD_ARGV[0]);
2869 if (retval == ERROR_OK) {
2870 /* we're done! */
2871 return retval;
2875 struct target *target = all_targets;
2876 command_print(CMD, " TargetName Type Endian TapName State ");
2877 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2878 while (target) {
2879 const char *state;
2880 char marker = ' ';
2882 if (target->tap->enabled)
2883 state = target_state_name(target);
2884 else
2885 state = "tap-disabled";
2887 if (CMD_CTX->current_target == target)
2888 marker = '*';
2890 /* keep columns lined up to match the headers above */
2891 command_print(CMD,
2892 "%2d%c %-18s %-10s %-6s %-18s %s",
2893 target->target_number,
2894 marker,
2895 target_name(target),
2896 target_type_name(target),
2897 jim_nvp_value2name_simple(nvp_target_endian,
2898 target->endianness)->name,
2899 target->tap->dotted_name,
2900 state);
2901 target = target->next;
2904 return retval;
2907 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2909 static int power_dropout;
2910 static int srst_asserted;
2912 static int run_power_restore;
2913 static int run_power_dropout;
2914 static int run_srst_asserted;
2915 static int run_srst_deasserted;
2917 static int sense_handler(void)
2919 static int prev_srst_asserted;
2920 static int prev_power_dropout;
2922 int retval = jtag_power_dropout(&power_dropout);
2923 if (retval != ERROR_OK)
2924 return retval;
2926 int power_restored;
2927 power_restored = prev_power_dropout && !power_dropout;
2928 if (power_restored)
2929 run_power_restore = 1;
2931 int64_t current = timeval_ms();
2932 static int64_t last_power;
2933 bool wait_more = last_power + 2000 > current;
2934 if (power_dropout && !wait_more) {
2935 run_power_dropout = 1;
2936 last_power = current;
2939 retval = jtag_srst_asserted(&srst_asserted);
2940 if (retval != ERROR_OK)
2941 return retval;
2943 int srst_deasserted;
2944 srst_deasserted = prev_srst_asserted && !srst_asserted;
2946 static int64_t last_srst;
2947 wait_more = last_srst + 2000 > current;
2948 if (srst_deasserted && !wait_more) {
2949 run_srst_deasserted = 1;
2950 last_srst = current;
2953 if (!prev_srst_asserted && srst_asserted)
2954 run_srst_asserted = 1;
2956 prev_srst_asserted = srst_asserted;
2957 prev_power_dropout = power_dropout;
2959 if (srst_deasserted || power_restored) {
2960 /* Other than logging the event we can't do anything here.
2961 * Issuing a reset is a particularly bad idea as we might
2962 * be inside a reset already.
2966 return ERROR_OK;
2969 /* process target state changes */
2970 static int handle_target(void *priv)
2972 Jim_Interp *interp = (Jim_Interp *)priv;
2973 int retval = ERROR_OK;
2975 if (!is_jtag_poll_safe()) {
2976 /* polling is disabled currently */
2977 return ERROR_OK;
2980 /* we do not want to recurse here... */
2981 static int recursive;
2982 if (!recursive) {
2983 recursive = 1;
2984 sense_handler();
2985 /* danger! running these procedures can trigger srst assertions and power dropouts.
2986 * We need to avoid an infinite loop/recursion here and we do that by
2987 * clearing the flags after running these events.
2989 int did_something = 0;
2990 if (run_srst_asserted) {
2991 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2992 Jim_Eval(interp, "srst_asserted");
2993 did_something = 1;
2995 if (run_srst_deasserted) {
2996 Jim_Eval(interp, "srst_deasserted");
2997 did_something = 1;
2999 if (run_power_dropout) {
3000 LOG_INFO("Power dropout detected, running power_dropout proc.");
3001 Jim_Eval(interp, "power_dropout");
3002 did_something = 1;
3004 if (run_power_restore) {
3005 Jim_Eval(interp, "power_restore");
3006 did_something = 1;
3009 if (did_something) {
3010 /* clear detect flags */
3011 sense_handler();
3014 /* clear action flags */
3016 run_srst_asserted = 0;
3017 run_srst_deasserted = 0;
3018 run_power_restore = 0;
3019 run_power_dropout = 0;
3021 recursive = 0;
3024 /* Poll targets for state changes unless that's globally disabled.
3025 * Skip targets that are currently disabled.
3027 for (struct target *target = all_targets;
3028 is_jtag_poll_safe() && target;
3029 target = target->next) {
3031 if (!target_was_examined(target))
3032 continue;
3034 if (!target->tap->enabled)
3035 continue;
3037 if (target->backoff.times > target->backoff.count) {
3038 /* do not poll this time as we failed previously */
3039 target->backoff.count++;
3040 continue;
3042 target->backoff.count = 0;
3044 /* only poll target if we've got power and srst isn't asserted */
3045 if (!power_dropout && !srst_asserted) {
3046 /* polling may fail silently until the target has been examined */
3047 retval = target_poll(target);
3048 if (retval != ERROR_OK) {
3049 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3050 if (target->backoff.times * polling_interval < 5000) {
3051 target->backoff.times *= 2;
3052 target->backoff.times++;
3055 /* Tell GDB to halt the debugger. This allows the user to
3056 * run monitor commands to handle the situation.
3058 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3060 if (target->backoff.times > 0) {
3061 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3062 target_reset_examined(target);
3063 retval = target_examine_one(target);
3064 /* Target examination could have failed due to unstable connection,
3065 * but we set the examined flag anyway to repoll it later */
3066 if (retval != ERROR_OK) {
3067 target_set_examined(target);
3068 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3069 target->backoff.times * polling_interval);
3070 return retval;
3074 /* Since we succeeded, we reset backoff count */
3075 target->backoff.times = 0;
3079 return retval;
3082 COMMAND_HANDLER(handle_reg_command)
3084 LOG_DEBUG("-");
3086 struct target *target = get_current_target(CMD_CTX);
3087 struct reg *reg = NULL;
3089 /* list all available registers for the current target */
3090 if (CMD_ARGC == 0) {
3091 struct reg_cache *cache = target->reg_cache;
3093 unsigned int count = 0;
3094 while (cache) {
3095 unsigned i;
3097 command_print(CMD, "===== %s", cache->name);
3099 for (i = 0, reg = cache->reg_list;
3100 i < cache->num_regs;
3101 i++, reg++, count++) {
3102 if (reg->exist == false || reg->hidden)
3103 continue;
3104 /* only print cached values if they are valid */
3105 if (reg->valid) {
3106 char *value = buf_to_hex_str(reg->value,
3107 reg->size);
3108 command_print(CMD,
3109 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3110 count, reg->name,
3111 reg->size, value,
3112 reg->dirty
3113 ? " (dirty)"
3114 : "");
3115 free(value);
3116 } else {
3117 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3118 count, reg->name,
3119 reg->size);
3122 cache = cache->next;
3125 return ERROR_OK;
3128 /* access a single register by its ordinal number */
3129 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3130 unsigned num;
3131 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3133 struct reg_cache *cache = target->reg_cache;
3134 unsigned int count = 0;
3135 while (cache) {
3136 unsigned i;
3137 for (i = 0; i < cache->num_regs; i++) {
3138 if (count++ == num) {
3139 reg = &cache->reg_list[i];
3140 break;
3143 if (reg)
3144 break;
3145 cache = cache->next;
3148 if (!reg) {
3149 command_print(CMD, "%i is out of bounds, the current target "
3150 "has only %i registers (0 - %i)", num, count, count - 1);
3151 return ERROR_OK;
3153 } else {
3154 /* access a single register by its name */
3155 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3157 if (!reg)
3158 goto not_found;
3161 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3163 if (!reg->exist)
3164 goto not_found;
3166 /* display a register */
3167 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3168 && (CMD_ARGV[1][0] <= '9')))) {
3169 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3170 reg->valid = 0;
3172 if (reg->valid == 0) {
3173 int retval = reg->type->get(reg);
3174 if (retval != ERROR_OK) {
3175 LOG_ERROR("Could not read register '%s'", reg->name);
3176 return retval;
3179 char *value = buf_to_hex_str(reg->value, reg->size);
3180 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3181 free(value);
3182 return ERROR_OK;
3185 /* set register value */
3186 if (CMD_ARGC == 2) {
3187 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3188 if (!buf)
3189 return ERROR_FAIL;
3190 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3192 int retval = reg->type->set(reg, buf);
3193 if (retval != ERROR_OK) {
3194 LOG_ERROR("Could not write to register '%s'", reg->name);
3195 } else {
3196 char *value = buf_to_hex_str(reg->value, reg->size);
3197 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3198 free(value);
3201 free(buf);
3203 return retval;
3206 return ERROR_COMMAND_SYNTAX_ERROR;
3208 not_found:
3209 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3210 return ERROR_OK;
3213 COMMAND_HANDLER(handle_poll_command)
3215 int retval = ERROR_OK;
3216 struct target *target = get_current_target(CMD_CTX);
3218 if (CMD_ARGC == 0) {
3219 command_print(CMD, "background polling: %s",
3220 jtag_poll_get_enabled() ? "on" : "off");
3221 command_print(CMD, "TAP: %s (%s)",
3222 target->tap->dotted_name,
3223 target->tap->enabled ? "enabled" : "disabled");
3224 if (!target->tap->enabled)
3225 return ERROR_OK;
3226 retval = target_poll(target);
3227 if (retval != ERROR_OK)
3228 return retval;
3229 retval = target_arch_state(target);
3230 if (retval != ERROR_OK)
3231 return retval;
3232 } else if (CMD_ARGC == 1) {
3233 bool enable;
3234 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3235 jtag_poll_set_enabled(enable);
3236 } else
3237 return ERROR_COMMAND_SYNTAX_ERROR;
3239 return retval;
3242 COMMAND_HANDLER(handle_wait_halt_command)
3244 if (CMD_ARGC > 1)
3245 return ERROR_COMMAND_SYNTAX_ERROR;
3247 unsigned ms = DEFAULT_HALT_TIMEOUT;
3248 if (1 == CMD_ARGC) {
3249 int retval = parse_uint(CMD_ARGV[0], &ms);
3250 if (retval != ERROR_OK)
3251 return ERROR_COMMAND_SYNTAX_ERROR;
3254 struct target *target = get_current_target(CMD_CTX);
3255 return target_wait_state(target, TARGET_HALTED, ms);
3258 /* wait for target state to change. The trick here is to have a low
3259 * latency for short waits and not to suck up all the CPU time
3260 * on longer waits.
3262 * After 500ms, keep_alive() is invoked
3264 int target_wait_state(struct target *target, enum target_state state, int ms)
3266 int retval;
3267 int64_t then = 0, cur;
3268 bool once = true;
3270 for (;;) {
3271 retval = target_poll(target);
3272 if (retval != ERROR_OK)
3273 return retval;
3274 if (target->state == state)
3275 break;
3276 cur = timeval_ms();
3277 if (once) {
3278 once = false;
3279 then = timeval_ms();
3280 LOG_DEBUG("waiting for target %s...",
3281 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3284 if (cur-then > 500)
3285 keep_alive();
3287 if ((cur-then) > ms) {
3288 LOG_ERROR("timed out while waiting for target %s",
3289 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3290 return ERROR_FAIL;
3294 return ERROR_OK;
3297 COMMAND_HANDLER(handle_halt_command)
3299 LOG_DEBUG("-");
3301 struct target *target = get_current_target(CMD_CTX);
3303 target->verbose_halt_msg = true;
3305 int retval = target_halt(target);
3306 if (retval != ERROR_OK)
3307 return retval;
3309 if (CMD_ARGC == 1) {
3310 unsigned wait_local;
3311 retval = parse_uint(CMD_ARGV[0], &wait_local);
3312 if (retval != ERROR_OK)
3313 return ERROR_COMMAND_SYNTAX_ERROR;
3314 if (!wait_local)
3315 return ERROR_OK;
3318 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3321 COMMAND_HANDLER(handle_soft_reset_halt_command)
3323 struct target *target = get_current_target(CMD_CTX);
3325 LOG_TARGET_INFO(target, "requesting target halt and executing a soft reset");
3327 target_soft_reset_halt(target);
3329 return ERROR_OK;
3332 COMMAND_HANDLER(handle_reset_command)
3334 if (CMD_ARGC > 1)
3335 return ERROR_COMMAND_SYNTAX_ERROR;
3337 enum target_reset_mode reset_mode = RESET_RUN;
3338 if (CMD_ARGC == 1) {
3339 const struct jim_nvp *n;
3340 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3341 if ((!n->name) || (n->value == RESET_UNKNOWN))
3342 return ERROR_COMMAND_SYNTAX_ERROR;
3343 reset_mode = n->value;
3346 /* reset *all* targets */
3347 return target_process_reset(CMD, reset_mode);
3351 COMMAND_HANDLER(handle_resume_command)
3353 int current = 1;
3354 if (CMD_ARGC > 1)
3355 return ERROR_COMMAND_SYNTAX_ERROR;
3357 struct target *target = get_current_target(CMD_CTX);
3359 /* with no CMD_ARGV, resume from current pc, addr = 0,
3360 * with one arguments, addr = CMD_ARGV[0],
3361 * handle breakpoints, not debugging */
3362 target_addr_t addr = 0;
3363 if (CMD_ARGC == 1) {
3364 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3365 current = 0;
3368 return target_resume(target, current, addr, 1, 0);
3371 COMMAND_HANDLER(handle_step_command)
3373 if (CMD_ARGC > 1)
3374 return ERROR_COMMAND_SYNTAX_ERROR;
3376 LOG_DEBUG("-");
3378 /* with no CMD_ARGV, step from current pc, addr = 0,
3379 * with one argument addr = CMD_ARGV[0],
3380 * handle breakpoints, debugging */
3381 target_addr_t addr = 0;
3382 int current_pc = 1;
3383 if (CMD_ARGC == 1) {
3384 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3385 current_pc = 0;
3388 struct target *target = get_current_target(CMD_CTX);
3390 return target_step(target, current_pc, addr, 1);
3393 void target_handle_md_output(struct command_invocation *cmd,
3394 struct target *target, target_addr_t address, unsigned size,
3395 unsigned count, const uint8_t *buffer)
3397 const unsigned line_bytecnt = 32;
3398 unsigned line_modulo = line_bytecnt / size;
3400 char output[line_bytecnt * 4 + 1];
3401 unsigned output_len = 0;
3403 const char *value_fmt;
3404 switch (size) {
3405 case 8:
3406 value_fmt = "%16.16"PRIx64" ";
3407 break;
3408 case 4:
3409 value_fmt = "%8.8"PRIx64" ";
3410 break;
3411 case 2:
3412 value_fmt = "%4.4"PRIx64" ";
3413 break;
3414 case 1:
3415 value_fmt = "%2.2"PRIx64" ";
3416 break;
3417 default:
3418 /* "can't happen", caller checked */
3419 LOG_ERROR("invalid memory read size: %u", size);
3420 return;
3423 for (unsigned i = 0; i < count; i++) {
3424 if (i % line_modulo == 0) {
3425 output_len += snprintf(output + output_len,
3426 sizeof(output) - output_len,
3427 TARGET_ADDR_FMT ": ",
3428 (address + (i * size)));
3431 uint64_t value = 0;
3432 const uint8_t *value_ptr = buffer + i * size;
3433 switch (size) {
3434 case 8:
3435 value = target_buffer_get_u64(target, value_ptr);
3436 break;
3437 case 4:
3438 value = target_buffer_get_u32(target, value_ptr);
3439 break;
3440 case 2:
3441 value = target_buffer_get_u16(target, value_ptr);
3442 break;
3443 case 1:
3444 value = *value_ptr;
3446 output_len += snprintf(output + output_len,
3447 sizeof(output) - output_len,
3448 value_fmt, value);
3450 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3451 command_print(cmd, "%s", output);
3452 output_len = 0;
3457 COMMAND_HANDLER(handle_md_command)
3459 if (CMD_ARGC < 1)
3460 return ERROR_COMMAND_SYNTAX_ERROR;
3462 unsigned size = 0;
3463 switch (CMD_NAME[2]) {
3464 case 'd':
3465 size = 8;
3466 break;
3467 case 'w':
3468 size = 4;
3469 break;
3470 case 'h':
3471 size = 2;
3472 break;
3473 case 'b':
3474 size = 1;
3475 break;
3476 default:
3477 return ERROR_COMMAND_SYNTAX_ERROR;
3480 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3481 int (*fn)(struct target *target,
3482 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3483 if (physical) {
3484 CMD_ARGC--;
3485 CMD_ARGV++;
3486 fn = target_read_phys_memory;
3487 } else
3488 fn = target_read_memory;
3489 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3490 return ERROR_COMMAND_SYNTAX_ERROR;
3492 target_addr_t address;
3493 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3495 unsigned count = 1;
3496 if (CMD_ARGC == 2)
3497 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3499 uint8_t *buffer = calloc(count, size);
3500 if (!buffer) {
3501 LOG_ERROR("Failed to allocate md read buffer");
3502 return ERROR_FAIL;
3505 struct target *target = get_current_target(CMD_CTX);
3506 int retval = fn(target, address, size, count, buffer);
3507 if (retval == ERROR_OK)
3508 target_handle_md_output(CMD, target, address, size, count, buffer);
3510 free(buffer);
3512 return retval;
3515 typedef int (*target_write_fn)(struct target *target,
3516 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3518 static int target_fill_mem(struct target *target,
3519 target_addr_t address,
3520 target_write_fn fn,
3521 unsigned data_size,
3522 /* value */
3523 uint64_t b,
3524 /* count */
3525 unsigned c)
3527 /* We have to write in reasonably large chunks to be able
3528 * to fill large memory areas with any sane speed */
3529 const unsigned chunk_size = 16384;
3530 uint8_t *target_buf = malloc(chunk_size * data_size);
3531 if (!target_buf) {
3532 LOG_ERROR("Out of memory");
3533 return ERROR_FAIL;
3536 for (unsigned i = 0; i < chunk_size; i++) {
3537 switch (data_size) {
3538 case 8:
3539 target_buffer_set_u64(target, target_buf + i * data_size, b);
3540 break;
3541 case 4:
3542 target_buffer_set_u32(target, target_buf + i * data_size, b);
3543 break;
3544 case 2:
3545 target_buffer_set_u16(target, target_buf + i * data_size, b);
3546 break;
3547 case 1:
3548 target_buffer_set_u8(target, target_buf + i * data_size, b);
3549 break;
3550 default:
3551 exit(-1);
3555 int retval = ERROR_OK;
3557 for (unsigned x = 0; x < c; x += chunk_size) {
3558 unsigned current;
3559 current = c - x;
3560 if (current > chunk_size)
3561 current = chunk_size;
3562 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3563 if (retval != ERROR_OK)
3564 break;
3565 /* avoid GDB timeouts */
3566 keep_alive();
3568 free(target_buf);
3570 return retval;
3574 COMMAND_HANDLER(handle_mw_command)
3576 if (CMD_ARGC < 2)
3577 return ERROR_COMMAND_SYNTAX_ERROR;
3578 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3579 target_write_fn fn;
3580 if (physical) {
3581 CMD_ARGC--;
3582 CMD_ARGV++;
3583 fn = target_write_phys_memory;
3584 } else
3585 fn = target_write_memory;
3586 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3587 return ERROR_COMMAND_SYNTAX_ERROR;
3589 target_addr_t address;
3590 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3592 uint64_t value;
3593 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3595 unsigned count = 1;
3596 if (CMD_ARGC == 3)
3597 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3599 struct target *target = get_current_target(CMD_CTX);
3600 unsigned wordsize;
3601 switch (CMD_NAME[2]) {
3602 case 'd':
3603 wordsize = 8;
3604 break;
3605 case 'w':
3606 wordsize = 4;
3607 break;
3608 case 'h':
3609 wordsize = 2;
3610 break;
3611 case 'b':
3612 wordsize = 1;
3613 break;
3614 default:
3615 return ERROR_COMMAND_SYNTAX_ERROR;
3618 return target_fill_mem(target, address, fn, wordsize, value, count);
3621 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3622 target_addr_t *min_address, target_addr_t *max_address)
3624 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3625 return ERROR_COMMAND_SYNTAX_ERROR;
3627 /* a base address isn't always necessary,
3628 * default to 0x0 (i.e. don't relocate) */
3629 if (CMD_ARGC >= 2) {
3630 target_addr_t addr;
3631 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3632 image->base_address = addr;
3633 image->base_address_set = true;
3634 } else
3635 image->base_address_set = false;
3637 image->start_address_set = false;
3639 if (CMD_ARGC >= 4)
3640 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3641 if (CMD_ARGC == 5) {
3642 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3643 /* use size (given) to find max (required) */
3644 *max_address += *min_address;
3647 if (*min_address > *max_address)
3648 return ERROR_COMMAND_SYNTAX_ERROR;
3650 return ERROR_OK;
3653 COMMAND_HANDLER(handle_load_image_command)
3655 uint8_t *buffer;
3656 size_t buf_cnt;
3657 uint32_t image_size;
3658 target_addr_t min_address = 0;
3659 target_addr_t max_address = -1;
3660 struct image image;
3662 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3663 &image, &min_address, &max_address);
3664 if (retval != ERROR_OK)
3665 return retval;
3667 struct target *target = get_current_target(CMD_CTX);
3669 struct duration bench;
3670 duration_start(&bench);
3672 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3673 return ERROR_FAIL;
3675 image_size = 0x0;
3676 retval = ERROR_OK;
3677 for (unsigned int i = 0; i < image.num_sections; i++) {
3678 buffer = malloc(image.sections[i].size);
3679 if (!buffer) {
3680 command_print(CMD,
3681 "error allocating buffer for section (%d bytes)",
3682 (int)(image.sections[i].size));
3683 retval = ERROR_FAIL;
3684 break;
3687 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3688 if (retval != ERROR_OK) {
3689 free(buffer);
3690 break;
3693 uint32_t offset = 0;
3694 uint32_t length = buf_cnt;
3696 /* DANGER!!! beware of unsigned comparison here!!! */
3698 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3699 (image.sections[i].base_address < max_address)) {
3701 if (image.sections[i].base_address < min_address) {
3702 /* clip addresses below */
3703 offset += min_address-image.sections[i].base_address;
3704 length -= offset;
3707 if (image.sections[i].base_address + buf_cnt > max_address)
3708 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3710 retval = target_write_buffer(target,
3711 image.sections[i].base_address + offset, length, buffer + offset);
3712 if (retval != ERROR_OK) {
3713 free(buffer);
3714 break;
3716 image_size += length;
3717 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3718 (unsigned int)length,
3719 image.sections[i].base_address + offset);
3722 free(buffer);
3725 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3726 command_print(CMD, "downloaded %" PRIu32 " bytes "
3727 "in %fs (%0.3f KiB/s)", image_size,
3728 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3731 image_close(&image);
3733 return retval;
3737 COMMAND_HANDLER(handle_dump_image_command)
3739 struct fileio *fileio;
3740 uint8_t *buffer;
3741 int retval, retvaltemp;
3742 target_addr_t address, size;
3743 struct duration bench;
3744 struct target *target = get_current_target(CMD_CTX);
3746 if (CMD_ARGC != 3)
3747 return ERROR_COMMAND_SYNTAX_ERROR;
3749 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3750 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3752 uint32_t buf_size = (size > 4096) ? 4096 : size;
3753 buffer = malloc(buf_size);
3754 if (!buffer)
3755 return ERROR_FAIL;
3757 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3758 if (retval != ERROR_OK) {
3759 free(buffer);
3760 return retval;
3763 duration_start(&bench);
3765 while (size > 0) {
3766 size_t size_written;
3767 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3768 retval = target_read_buffer(target, address, this_run_size, buffer);
3769 if (retval != ERROR_OK)
3770 break;
3772 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3773 if (retval != ERROR_OK)
3774 break;
3776 size -= this_run_size;
3777 address += this_run_size;
3780 free(buffer);
3782 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3783 size_t filesize;
3784 retval = fileio_size(fileio, &filesize);
3785 if (retval != ERROR_OK)
3786 return retval;
3787 command_print(CMD,
3788 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3789 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3792 retvaltemp = fileio_close(fileio);
3793 if (retvaltemp != ERROR_OK)
3794 return retvaltemp;
3796 return retval;
3799 enum verify_mode {
3800 IMAGE_TEST = 0,
3801 IMAGE_VERIFY = 1,
3802 IMAGE_CHECKSUM_ONLY = 2
3805 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3807 uint8_t *buffer;
3808 size_t buf_cnt;
3809 uint32_t image_size;
3810 int retval;
3811 uint32_t checksum = 0;
3812 uint32_t mem_checksum = 0;
3814 struct image image;
3816 struct target *target = get_current_target(CMD_CTX);
3818 if (CMD_ARGC < 1)
3819 return ERROR_COMMAND_SYNTAX_ERROR;
3821 if (!target) {
3822 LOG_ERROR("no target selected");
3823 return ERROR_FAIL;
3826 struct duration bench;
3827 duration_start(&bench);
3829 if (CMD_ARGC >= 2) {
3830 target_addr_t addr;
3831 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3832 image.base_address = addr;
3833 image.base_address_set = true;
3834 } else {
3835 image.base_address_set = false;
3836 image.base_address = 0x0;
3839 image.start_address_set = false;
3841 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3842 if (retval != ERROR_OK)
3843 return retval;
3845 image_size = 0x0;
3846 int diffs = 0;
3847 retval = ERROR_OK;
3848 for (unsigned int i = 0; i < image.num_sections; i++) {
3849 buffer = malloc(image.sections[i].size);
3850 if (!buffer) {
3851 command_print(CMD,
3852 "error allocating buffer for section (%" PRIu32 " bytes)",
3853 image.sections[i].size);
3854 break;
3856 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3857 if (retval != ERROR_OK) {
3858 free(buffer);
3859 break;
3862 if (verify >= IMAGE_VERIFY) {
3863 /* calculate checksum of image */
3864 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3865 if (retval != ERROR_OK) {
3866 free(buffer);
3867 break;
3870 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3871 if (retval != ERROR_OK) {
3872 free(buffer);
3873 break;
3875 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3876 LOG_ERROR("checksum mismatch");
3877 free(buffer);
3878 retval = ERROR_FAIL;
3879 goto done;
3881 if (checksum != mem_checksum) {
3882 /* failed crc checksum, fall back to a binary compare */
3883 uint8_t *data;
3885 if (diffs == 0)
3886 LOG_ERROR("checksum mismatch - attempting binary compare");
3888 data = malloc(buf_cnt);
3890 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3891 if (retval == ERROR_OK) {
3892 uint32_t t;
3893 for (t = 0; t < buf_cnt; t++) {
3894 if (data[t] != buffer[t]) {
3895 command_print(CMD,
3896 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3897 diffs,
3898 (unsigned)(t + image.sections[i].base_address),
3899 data[t],
3900 buffer[t]);
3901 if (diffs++ >= 127) {
3902 command_print(CMD, "More than 128 errors, the rest are not printed.");
3903 free(data);
3904 free(buffer);
3905 goto done;
3908 keep_alive();
3911 free(data);
3913 } else {
3914 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3915 image.sections[i].base_address,
3916 buf_cnt);
3919 free(buffer);
3920 image_size += buf_cnt;
3922 if (diffs > 0)
3923 command_print(CMD, "No more differences found.");
3924 done:
3925 if (diffs > 0)
3926 retval = ERROR_FAIL;
3927 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3928 command_print(CMD, "verified %" PRIu32 " bytes "
3929 "in %fs (%0.3f KiB/s)", image_size,
3930 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3933 image_close(&image);
3935 return retval;
3938 COMMAND_HANDLER(handle_verify_image_checksum_command)
3940 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3943 COMMAND_HANDLER(handle_verify_image_command)
3945 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3948 COMMAND_HANDLER(handle_test_image_command)
3950 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3953 static int handle_bp_command_list(struct command_invocation *cmd)
3955 struct target *target = get_current_target(cmd->ctx);
3956 struct breakpoint *breakpoint = target->breakpoints;
3957 while (breakpoint) {
3958 if (breakpoint->type == BKPT_SOFT) {
3959 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3960 breakpoint->length);
3961 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, 0x%s",
3962 breakpoint->address,
3963 breakpoint->length,
3964 buf);
3965 free(buf);
3966 } else {
3967 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3968 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %u",
3969 breakpoint->asid,
3970 breakpoint->length, breakpoint->number);
3971 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3972 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3973 breakpoint->address,
3974 breakpoint->length, breakpoint->number);
3975 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3976 breakpoint->asid);
3977 } else
3978 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3979 breakpoint->address,
3980 breakpoint->length, breakpoint->number);
3983 breakpoint = breakpoint->next;
3985 return ERROR_OK;
3988 static int handle_bp_command_set(struct command_invocation *cmd,
3989 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3991 struct target *target = get_current_target(cmd->ctx);
3992 int retval;
3994 if (asid == 0) {
3995 retval = breakpoint_add(target, addr, length, hw);
3996 /* error is always logged in breakpoint_add(), do not print it again */
3997 if (retval == ERROR_OK)
3998 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
4000 } else if (addr == 0) {
4001 if (!target->type->add_context_breakpoint) {
4002 LOG_ERROR("Context breakpoint not available");
4003 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4005 retval = context_breakpoint_add(target, asid, length, hw);
4006 /* error is always logged in context_breakpoint_add(), do not print it again */
4007 if (retval == ERROR_OK)
4008 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
4010 } else {
4011 if (!target->type->add_hybrid_breakpoint) {
4012 LOG_ERROR("Hybrid breakpoint not available");
4013 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4015 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
4016 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
4017 if (retval == ERROR_OK)
4018 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
4020 return retval;
4023 COMMAND_HANDLER(handle_bp_command)
4025 target_addr_t addr;
4026 uint32_t asid;
4027 uint32_t length;
4028 int hw = BKPT_SOFT;
4030 switch (CMD_ARGC) {
4031 case 0:
4032 return handle_bp_command_list(CMD);
4034 case 2:
4035 asid = 0;
4036 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4037 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4038 return handle_bp_command_set(CMD, addr, asid, length, hw);
4040 case 3:
4041 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4042 hw = BKPT_HARD;
4043 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4044 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4045 asid = 0;
4046 return handle_bp_command_set(CMD, addr, asid, length, hw);
4047 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4048 hw = BKPT_HARD;
4049 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4050 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4051 addr = 0;
4052 return handle_bp_command_set(CMD, addr, asid, length, hw);
4054 /* fallthrough */
4055 case 4:
4056 hw = BKPT_HARD;
4057 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4058 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4059 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4060 return handle_bp_command_set(CMD, addr, asid, length, hw);
4062 default:
4063 return ERROR_COMMAND_SYNTAX_ERROR;
4067 COMMAND_HANDLER(handle_rbp_command)
4069 if (CMD_ARGC != 1)
4070 return ERROR_COMMAND_SYNTAX_ERROR;
4072 struct target *target = get_current_target(CMD_CTX);
4074 if (!strcmp(CMD_ARGV[0], "all")) {
4075 breakpoint_remove_all(target);
4076 } else {
4077 target_addr_t addr;
4078 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4080 breakpoint_remove(target, addr);
4083 return ERROR_OK;
4086 COMMAND_HANDLER(handle_wp_command)
4088 struct target *target = get_current_target(CMD_CTX);
4090 if (CMD_ARGC == 0) {
4091 struct watchpoint *watchpoint = target->watchpoints;
4093 while (watchpoint) {
4094 command_print(CMD, "address: " TARGET_ADDR_FMT
4095 ", len: 0x%8.8" PRIx32
4096 ", r/w/a: %i, value: 0x%8.8" PRIx32
4097 ", mask: 0x%8.8" PRIx32,
4098 watchpoint->address,
4099 watchpoint->length,
4100 (int)watchpoint->rw,
4101 watchpoint->value,
4102 watchpoint->mask);
4103 watchpoint = watchpoint->next;
4105 return ERROR_OK;
4108 enum watchpoint_rw type = WPT_ACCESS;
4109 target_addr_t addr = 0;
4110 uint32_t length = 0;
4111 uint32_t data_value = 0x0;
4112 uint32_t data_mask = 0xffffffff;
4114 switch (CMD_ARGC) {
4115 case 5:
4116 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4117 /* fall through */
4118 case 4:
4119 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4120 /* fall through */
4121 case 3:
4122 switch (CMD_ARGV[2][0]) {
4123 case 'r':
4124 type = WPT_READ;
4125 break;
4126 case 'w':
4127 type = WPT_WRITE;
4128 break;
4129 case 'a':
4130 type = WPT_ACCESS;
4131 break;
4132 default:
4133 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4134 return ERROR_COMMAND_SYNTAX_ERROR;
4136 /* fall through */
4137 case 2:
4138 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4139 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4140 break;
4142 default:
4143 return ERROR_COMMAND_SYNTAX_ERROR;
4146 int retval = watchpoint_add(target, addr, length, type,
4147 data_value, data_mask);
4148 if (retval != ERROR_OK)
4149 LOG_ERROR("Failure setting watchpoints");
4151 return retval;
4154 COMMAND_HANDLER(handle_rwp_command)
4156 if (CMD_ARGC != 1)
4157 return ERROR_COMMAND_SYNTAX_ERROR;
4159 target_addr_t addr;
4160 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4162 struct target *target = get_current_target(CMD_CTX);
4163 watchpoint_remove(target, addr);
4165 return ERROR_OK;
4169 * Translate a virtual address to a physical address.
4171 * The low-level target implementation must have logged a detailed error
4172 * which is forwarded to telnet/GDB session.
4174 COMMAND_HANDLER(handle_virt2phys_command)
4176 if (CMD_ARGC != 1)
4177 return ERROR_COMMAND_SYNTAX_ERROR;
4179 target_addr_t va;
4180 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4181 target_addr_t pa;
4183 struct target *target = get_current_target(CMD_CTX);
4184 int retval = target->type->virt2phys(target, va, &pa);
4185 if (retval == ERROR_OK)
4186 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4188 return retval;
4191 static void write_data(FILE *f, const void *data, size_t len)
4193 size_t written = fwrite(data, 1, len, f);
4194 if (written != len)
4195 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4198 static void write_long(FILE *f, int l, struct target *target)
4200 uint8_t val[4];
4202 target_buffer_set_u32(target, val, l);
4203 write_data(f, val, 4);
4206 static void write_string(FILE *f, char *s)
4208 write_data(f, s, strlen(s));
4211 typedef unsigned char UNIT[2]; /* unit of profiling */
4213 /* Dump a gmon.out histogram file. */
4214 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4215 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4217 uint32_t i;
4218 FILE *f = fopen(filename, "w");
4219 if (!f)
4220 return;
4221 write_string(f, "gmon");
4222 write_long(f, 0x00000001, target); /* Version */
4223 write_long(f, 0, target); /* padding */
4224 write_long(f, 0, target); /* padding */
4225 write_long(f, 0, target); /* padding */
4227 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4228 write_data(f, &zero, 1);
4230 /* figure out bucket size */
4231 uint32_t min;
4232 uint32_t max;
4233 if (with_range) {
4234 min = start_address;
4235 max = end_address;
4236 } else {
4237 min = samples[0];
4238 max = samples[0];
4239 for (i = 0; i < sample_num; i++) {
4240 if (min > samples[i])
4241 min = samples[i];
4242 if (max < samples[i])
4243 max = samples[i];
4246 /* max should be (largest sample + 1)
4247 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4248 if (max < UINT32_MAX)
4249 max++;
4251 /* gprof requires (max - min) >= 2 */
4252 while ((max - min) < 2) {
4253 if (max < UINT32_MAX)
4254 max++;
4255 else
4256 min--;
4260 uint32_t address_space = max - min;
4262 /* FIXME: What is the reasonable number of buckets?
4263 * The profiling result will be more accurate if there are enough buckets. */
4264 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4265 uint32_t num_buckets = address_space / sizeof(UNIT);
4266 if (num_buckets > max_buckets)
4267 num_buckets = max_buckets;
4268 int *buckets = malloc(sizeof(int) * num_buckets);
4269 if (!buckets) {
4270 fclose(f);
4271 return;
4273 memset(buckets, 0, sizeof(int) * num_buckets);
4274 for (i = 0; i < sample_num; i++) {
4275 uint32_t address = samples[i];
4277 if ((address < min) || (max <= address))
4278 continue;
4280 long long a = address - min;
4281 long long b = num_buckets;
4282 long long c = address_space;
4283 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4284 buckets[index_t]++;
4287 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4288 write_long(f, min, target); /* low_pc */
4289 write_long(f, max, target); /* high_pc */
4290 write_long(f, num_buckets, target); /* # of buckets */
4291 float sample_rate = sample_num / (duration_ms / 1000.0);
4292 write_long(f, sample_rate, target);
4293 write_string(f, "seconds");
4294 for (i = 0; i < (15-strlen("seconds")); i++)
4295 write_data(f, &zero, 1);
4296 write_string(f, "s");
4298 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4300 char *data = malloc(2 * num_buckets);
4301 if (data) {
4302 for (i = 0; i < num_buckets; i++) {
4303 int val;
4304 val = buckets[i];
4305 if (val > 65535)
4306 val = 65535;
4307 data[i * 2] = val&0xff;
4308 data[i * 2 + 1] = (val >> 8) & 0xff;
4310 free(buckets);
4311 write_data(f, data, num_buckets * 2);
4312 free(data);
4313 } else
4314 free(buckets);
4316 fclose(f);
4319 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4320 * which will be used as a random sampling of PC */
4321 COMMAND_HANDLER(handle_profile_command)
4323 struct target *target = get_current_target(CMD_CTX);
4325 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4326 return ERROR_COMMAND_SYNTAX_ERROR;
4328 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4329 uint32_t offset;
4330 uint32_t num_of_samples;
4331 int retval = ERROR_OK;
4332 bool halted_before_profiling = target->state == TARGET_HALTED;
4334 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4336 uint32_t start_address = 0;
4337 uint32_t end_address = 0;
4338 bool with_range = false;
4339 if (CMD_ARGC == 4) {
4340 with_range = true;
4341 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4342 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4343 if (start_address > end_address || (end_address - start_address) < 2) {
4344 command_print(CMD, "Error: end - start < 2");
4345 return ERROR_COMMAND_ARGUMENT_INVALID;
4349 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4350 if (!samples) {
4351 LOG_ERROR("No memory to store samples.");
4352 return ERROR_FAIL;
4355 uint64_t timestart_ms = timeval_ms();
4357 * Some cores let us sample the PC without the
4358 * annoying halt/resume step; for example, ARMv7 PCSR.
4359 * Provide a way to use that more efficient mechanism.
4361 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4362 &num_of_samples, offset);
4363 if (retval != ERROR_OK) {
4364 free(samples);
4365 return retval;
4367 uint32_t duration_ms = timeval_ms() - timestart_ms;
4369 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4371 retval = target_poll(target);
4372 if (retval != ERROR_OK) {
4373 free(samples);
4374 return retval;
4377 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4378 /* The target was halted before we started and is running now. Halt it,
4379 * for consistency. */
4380 retval = target_halt(target);
4381 if (retval != ERROR_OK) {
4382 free(samples);
4383 return retval;
4385 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4386 /* The target was running before we started and is halted now. Resume
4387 * it, for consistency. */
4388 retval = target_resume(target, 1, 0, 0, 0);
4389 if (retval != ERROR_OK) {
4390 free(samples);
4391 return retval;
4395 retval = target_poll(target);
4396 if (retval != ERROR_OK) {
4397 free(samples);
4398 return retval;
4401 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4402 with_range, start_address, end_address, target, duration_ms);
4403 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4405 free(samples);
4406 return retval;
4409 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4411 char *namebuf;
4412 Jim_Obj *obj_name, *obj_val;
4413 int result;
4415 namebuf = alloc_printf("%s(%d)", varname, idx);
4416 if (!namebuf)
4417 return JIM_ERR;
4419 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4420 jim_wide wide_val = val;
4421 obj_val = Jim_NewWideObj(interp, wide_val);
4422 if (!obj_name || !obj_val) {
4423 free(namebuf);
4424 return JIM_ERR;
4427 Jim_IncrRefCount(obj_name);
4428 Jim_IncrRefCount(obj_val);
4429 result = Jim_SetVariable(interp, obj_name, obj_val);
4430 Jim_DecrRefCount(interp, obj_name);
4431 Jim_DecrRefCount(interp, obj_val);
4432 free(namebuf);
4433 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4434 return result;
4437 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4439 int e;
4441 LOG_WARNING("DEPRECATED! use 'read_memory' not 'mem2array'");
4443 /* argv[0] = name of array to receive the data
4444 * argv[1] = desired element width in bits
4445 * argv[2] = memory address
4446 * argv[3] = count of times to read
4447 * argv[4] = optional "phys"
4449 if (argc < 4 || argc > 5) {
4450 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4451 return JIM_ERR;
4454 /* Arg 0: Name of the array variable */
4455 const char *varname = Jim_GetString(argv[0], NULL);
4457 /* Arg 1: Bit width of one element */
4458 long l;
4459 e = Jim_GetLong(interp, argv[1], &l);
4460 if (e != JIM_OK)
4461 return e;
4462 const unsigned int width_bits = l;
4464 if (width_bits != 8 &&
4465 width_bits != 16 &&
4466 width_bits != 32 &&
4467 width_bits != 64) {
4468 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4469 Jim_AppendStrings(interp, Jim_GetResult(interp),
4470 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4471 return JIM_ERR;
4473 const unsigned int width = width_bits / 8;
4475 /* Arg 2: Memory address */
4476 jim_wide wide_addr;
4477 e = Jim_GetWide(interp, argv[2], &wide_addr);
4478 if (e != JIM_OK)
4479 return e;
4480 target_addr_t addr = (target_addr_t)wide_addr;
4482 /* Arg 3: Number of elements to read */
4483 e = Jim_GetLong(interp, argv[3], &l);
4484 if (e != JIM_OK)
4485 return e;
4486 size_t len = l;
4488 /* Arg 4: phys */
4489 bool is_phys = false;
4490 if (argc > 4) {
4491 int str_len = 0;
4492 const char *phys = Jim_GetString(argv[4], &str_len);
4493 if (!strncmp(phys, "phys", str_len))
4494 is_phys = true;
4495 else
4496 return JIM_ERR;
4499 /* Argument checks */
4500 if (len == 0) {
4501 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4502 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4503 return JIM_ERR;
4505 if ((addr + (len * width)) < addr) {
4506 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4507 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4508 return JIM_ERR;
4510 if (len > 65536) {
4511 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4512 Jim_AppendStrings(interp, Jim_GetResult(interp),
4513 "mem2array: too large read request, exceeds 64K items", NULL);
4514 return JIM_ERR;
4517 if ((width == 1) ||
4518 ((width == 2) && ((addr & 1) == 0)) ||
4519 ((width == 4) && ((addr & 3) == 0)) ||
4520 ((width == 8) && ((addr & 7) == 0))) {
4521 /* alignment correct */
4522 } else {
4523 char buf[100];
4524 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4525 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4526 addr,
4527 width);
4528 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4529 return JIM_ERR;
4532 /* Transfer loop */
4534 /* index counter */
4535 size_t idx = 0;
4537 const size_t buffersize = 4096;
4538 uint8_t *buffer = malloc(buffersize);
4539 if (!buffer)
4540 return JIM_ERR;
4542 /* assume ok */
4543 e = JIM_OK;
4544 while (len) {
4545 /* Slurp... in buffer size chunks */
4546 const unsigned int max_chunk_len = buffersize / width;
4547 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4549 int retval;
4550 if (is_phys)
4551 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4552 else
4553 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4554 if (retval != ERROR_OK) {
4555 /* BOO !*/
4556 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4557 addr,
4558 width,
4559 chunk_len);
4560 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4561 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4562 e = JIM_ERR;
4563 break;
4564 } else {
4565 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4566 uint64_t v = 0;
4567 switch (width) {
4568 case 8:
4569 v = target_buffer_get_u64(target, &buffer[i*width]);
4570 break;
4571 case 4:
4572 v = target_buffer_get_u32(target, &buffer[i*width]);
4573 break;
4574 case 2:
4575 v = target_buffer_get_u16(target, &buffer[i*width]);
4576 break;
4577 case 1:
4578 v = buffer[i] & 0x0ff;
4579 break;
4581 new_u64_array_element(interp, varname, idx, v);
4583 len -= chunk_len;
4584 addr += chunk_len * width;
4588 free(buffer);
4590 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4592 return e;
4595 COMMAND_HANDLER(handle_target_read_memory)
4598 * CMD_ARGV[0] = memory address
4599 * CMD_ARGV[1] = desired element width in bits
4600 * CMD_ARGV[2] = number of elements to read
4601 * CMD_ARGV[3] = optional "phys"
4604 if (CMD_ARGC < 3 || CMD_ARGC > 4)
4605 return ERROR_COMMAND_SYNTAX_ERROR;
4607 /* Arg 1: Memory address. */
4608 target_addr_t addr;
4609 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], addr);
4611 /* Arg 2: Bit width of one element. */
4612 unsigned int width_bits;
4613 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], width_bits);
4615 /* Arg 3: Number of elements to read. */
4616 unsigned int count;
4617 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
4619 /* Arg 4: Optional 'phys'. */
4620 bool is_phys = false;
4621 if (CMD_ARGC == 4) {
4622 if (strcmp(CMD_ARGV[3], "phys")) {
4623 command_print(CMD, "invalid argument '%s', must be 'phys'", CMD_ARGV[3]);
4624 return ERROR_COMMAND_ARGUMENT_INVALID;
4627 is_phys = true;
4630 switch (width_bits) {
4631 case 8:
4632 case 16:
4633 case 32:
4634 case 64:
4635 break;
4636 default:
4637 command_print(CMD, "invalid width, must be 8, 16, 32 or 64");
4638 return ERROR_COMMAND_ARGUMENT_INVALID;
4641 const unsigned int width = width_bits / 8;
4643 if ((addr + (count * width)) < addr) {
4644 command_print(CMD, "read_memory: addr + count wraps to zero");
4645 return ERROR_COMMAND_ARGUMENT_INVALID;
4648 if (count > 65536) {
4649 command_print(CMD, "read_memory: too large read request, exceeds 64K elements");
4650 return ERROR_COMMAND_ARGUMENT_INVALID;
4653 struct target *target = get_current_target(CMD_CTX);
4655 const size_t buffersize = 4096;
4656 uint8_t *buffer = malloc(buffersize);
4658 if (!buffer) {
4659 LOG_ERROR("Failed to allocate memory");
4660 return ERROR_FAIL;
4663 char *separator = "";
4664 while (count > 0) {
4665 const unsigned int max_chunk_len = buffersize / width;
4666 const size_t chunk_len = MIN(count, max_chunk_len);
4668 int retval;
4670 if (is_phys)
4671 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4672 else
4673 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4675 if (retval != ERROR_OK) {
4676 LOG_DEBUG("read_memory: read at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
4677 addr, width_bits, chunk_len);
4679 * FIXME: we append the errmsg to the list of value already read.
4680 * Add a way to flush and replace old output, but LOG_DEBUG() it
4682 command_print(CMD, "read_memory: failed to read memory");
4683 free(buffer);
4684 return retval;
4687 for (size_t i = 0; i < chunk_len ; i++) {
4688 uint64_t v = 0;
4690 switch (width) {
4691 case 8:
4692 v = target_buffer_get_u64(target, &buffer[i * width]);
4693 break;
4694 case 4:
4695 v = target_buffer_get_u32(target, &buffer[i * width]);
4696 break;
4697 case 2:
4698 v = target_buffer_get_u16(target, &buffer[i * width]);
4699 break;
4700 case 1:
4701 v = buffer[i];
4702 break;
4705 command_print_sameline(CMD, "%s0x%" PRIx64, separator, v);
4706 separator = " ";
4709 count -= chunk_len;
4710 addr += chunk_len * width;
4713 free(buffer);
4715 return ERROR_OK;
4718 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4720 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4721 if (!namebuf)
4722 return JIM_ERR;
4724 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4725 if (!obj_name) {
4726 free(namebuf);
4727 return JIM_ERR;
4730 Jim_IncrRefCount(obj_name);
4731 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4732 Jim_DecrRefCount(interp, obj_name);
4733 free(namebuf);
4734 if (!obj_val)
4735 return JIM_ERR;
4737 jim_wide wide_val;
4738 int result = Jim_GetWide(interp, obj_val, &wide_val);
4739 *val = wide_val;
4740 return result;
4743 static int target_array2mem(Jim_Interp *interp, struct target *target,
4744 int argc, Jim_Obj *const *argv)
4746 int e;
4748 LOG_WARNING("DEPRECATED! use 'write_memory' not 'array2mem'");
4750 /* argv[0] = name of array from which to read the data
4751 * argv[1] = desired element width in bits
4752 * argv[2] = memory address
4753 * argv[3] = number of elements to write
4754 * argv[4] = optional "phys"
4756 if (argc < 4 || argc > 5) {
4757 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4758 return JIM_ERR;
4761 /* Arg 0: Name of the array variable */
4762 const char *varname = Jim_GetString(argv[0], NULL);
4764 /* Arg 1: Bit width of one element */
4765 long l;
4766 e = Jim_GetLong(interp, argv[1], &l);
4767 if (e != JIM_OK)
4768 return e;
4769 const unsigned int width_bits = l;
4771 if (width_bits != 8 &&
4772 width_bits != 16 &&
4773 width_bits != 32 &&
4774 width_bits != 64) {
4775 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4776 Jim_AppendStrings(interp, Jim_GetResult(interp),
4777 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4778 return JIM_ERR;
4780 const unsigned int width = width_bits / 8;
4782 /* Arg 2: Memory address */
4783 jim_wide wide_addr;
4784 e = Jim_GetWide(interp, argv[2], &wide_addr);
4785 if (e != JIM_OK)
4786 return e;
4787 target_addr_t addr = (target_addr_t)wide_addr;
4789 /* Arg 3: Number of elements to write */
4790 e = Jim_GetLong(interp, argv[3], &l);
4791 if (e != JIM_OK)
4792 return e;
4793 size_t len = l;
4795 /* Arg 4: Phys */
4796 bool is_phys = false;
4797 if (argc > 4) {
4798 int str_len = 0;
4799 const char *phys = Jim_GetString(argv[4], &str_len);
4800 if (!strncmp(phys, "phys", str_len))
4801 is_phys = true;
4802 else
4803 return JIM_ERR;
4806 /* Argument checks */
4807 if (len == 0) {
4808 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4809 Jim_AppendStrings(interp, Jim_GetResult(interp),
4810 "array2mem: zero width read?", NULL);
4811 return JIM_ERR;
4814 if ((addr + (len * width)) < addr) {
4815 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4816 Jim_AppendStrings(interp, Jim_GetResult(interp),
4817 "array2mem: addr + len - wraps to zero?", NULL);
4818 return JIM_ERR;
4821 if (len > 65536) {
4822 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4823 Jim_AppendStrings(interp, Jim_GetResult(interp),
4824 "array2mem: too large memory write request, exceeds 64K items", NULL);
4825 return JIM_ERR;
4828 if ((width == 1) ||
4829 ((width == 2) && ((addr & 1) == 0)) ||
4830 ((width == 4) && ((addr & 3) == 0)) ||
4831 ((width == 8) && ((addr & 7) == 0))) {
4832 /* alignment correct */
4833 } else {
4834 char buf[100];
4835 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4836 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4837 addr,
4838 width);
4839 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4840 return JIM_ERR;
4843 /* Transfer loop */
4845 /* assume ok */
4846 e = JIM_OK;
4848 const size_t buffersize = 4096;
4849 uint8_t *buffer = malloc(buffersize);
4850 if (!buffer)
4851 return JIM_ERR;
4853 /* index counter */
4854 size_t idx = 0;
4856 while (len) {
4857 /* Slurp... in buffer size chunks */
4858 const unsigned int max_chunk_len = buffersize / width;
4860 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4862 /* Fill the buffer */
4863 for (size_t i = 0; i < chunk_len; i++, idx++) {
4864 uint64_t v = 0;
4865 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4866 free(buffer);
4867 return JIM_ERR;
4869 switch (width) {
4870 case 8:
4871 target_buffer_set_u64(target, &buffer[i * width], v);
4872 break;
4873 case 4:
4874 target_buffer_set_u32(target, &buffer[i * width], v);
4875 break;
4876 case 2:
4877 target_buffer_set_u16(target, &buffer[i * width], v);
4878 break;
4879 case 1:
4880 buffer[i] = v & 0x0ff;
4881 break;
4884 len -= chunk_len;
4886 /* Write the buffer to memory */
4887 int retval;
4888 if (is_phys)
4889 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4890 else
4891 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4892 if (retval != ERROR_OK) {
4893 /* BOO !*/
4894 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4895 addr,
4896 width,
4897 chunk_len);
4898 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4899 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4900 e = JIM_ERR;
4901 break;
4903 addr += chunk_len * width;
4906 free(buffer);
4908 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4910 return e;
4913 static int target_jim_write_memory(Jim_Interp *interp, int argc,
4914 Jim_Obj * const *argv)
4917 * argv[1] = memory address
4918 * argv[2] = desired element width in bits
4919 * argv[3] = list of data to write
4920 * argv[4] = optional "phys"
4923 if (argc < 4 || argc > 5) {
4924 Jim_WrongNumArgs(interp, 1, argv, "address width data ['phys']");
4925 return JIM_ERR;
4928 /* Arg 1: Memory address. */
4929 int e;
4930 jim_wide wide_addr;
4931 e = Jim_GetWide(interp, argv[1], &wide_addr);
4933 if (e != JIM_OK)
4934 return e;
4936 target_addr_t addr = (target_addr_t)wide_addr;
4938 /* Arg 2: Bit width of one element. */
4939 long l;
4940 e = Jim_GetLong(interp, argv[2], &l);
4942 if (e != JIM_OK)
4943 return e;
4945 const unsigned int width_bits = l;
4946 size_t count = Jim_ListLength(interp, argv[3]);
4948 /* Arg 4: Optional 'phys'. */
4949 bool is_phys = false;
4951 if (argc > 4) {
4952 const char *phys = Jim_GetString(argv[4], NULL);
4954 if (strcmp(phys, "phys")) {
4955 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4956 return JIM_ERR;
4959 is_phys = true;
4962 switch (width_bits) {
4963 case 8:
4964 case 16:
4965 case 32:
4966 case 64:
4967 break;
4968 default:
4969 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4970 return JIM_ERR;
4973 const unsigned int width = width_bits / 8;
4975 if ((addr + (count * width)) < addr) {
4976 Jim_SetResultString(interp, "write_memory: addr + len wraps to zero", -1);
4977 return JIM_ERR;
4980 if (count > 65536) {
4981 Jim_SetResultString(interp, "write_memory: too large memory write request, exceeds 64K elements", -1);
4982 return JIM_ERR;
4985 struct command_context *cmd_ctx = current_command_context(interp);
4986 assert(cmd_ctx != NULL);
4987 struct target *target = get_current_target(cmd_ctx);
4989 const size_t buffersize = 4096;
4990 uint8_t *buffer = malloc(buffersize);
4992 if (!buffer) {
4993 LOG_ERROR("Failed to allocate memory");
4994 return JIM_ERR;
4997 size_t j = 0;
4999 while (count > 0) {
5000 const unsigned int max_chunk_len = buffersize / width;
5001 const size_t chunk_len = MIN(count, max_chunk_len);
5003 for (size_t i = 0; i < chunk_len; i++, j++) {
5004 Jim_Obj *tmp = Jim_ListGetIndex(interp, argv[3], j);
5005 jim_wide element_wide;
5006 Jim_GetWide(interp, tmp, &element_wide);
5008 const uint64_t v = element_wide;
5010 switch (width) {
5011 case 8:
5012 target_buffer_set_u64(target, &buffer[i * width], v);
5013 break;
5014 case 4:
5015 target_buffer_set_u32(target, &buffer[i * width], v);
5016 break;
5017 case 2:
5018 target_buffer_set_u16(target, &buffer[i * width], v);
5019 break;
5020 case 1:
5021 buffer[i] = v & 0x0ff;
5022 break;
5026 count -= chunk_len;
5028 int retval;
5030 if (is_phys)
5031 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
5032 else
5033 retval = target_write_memory(target, addr, width, chunk_len, buffer);
5035 if (retval != ERROR_OK) {
5036 LOG_ERROR("write_memory: write at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
5037 addr, width_bits, chunk_len);
5038 Jim_SetResultString(interp, "write_memory: failed to write memory", -1);
5039 e = JIM_ERR;
5040 break;
5043 addr += chunk_len * width;
5046 free(buffer);
5048 return e;
5051 /* FIX? should we propagate errors here rather than printing them
5052 * and continuing?
5054 void target_handle_event(struct target *target, enum target_event e)
5056 struct target_event_action *teap;
5057 int retval;
5059 for (teap = target->event_action; teap; teap = teap->next) {
5060 if (teap->event == e) {
5061 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
5062 target->target_number,
5063 target_name(target),
5064 target_type_name(target),
5066 target_event_name(e),
5067 Jim_GetString(teap->body, NULL));
5069 /* Override current target by the target an event
5070 * is issued from (lot of scripts need it).
5071 * Return back to previous override as soon
5072 * as the handler processing is done */
5073 struct command_context *cmd_ctx = current_command_context(teap->interp);
5074 struct target *saved_target_override = cmd_ctx->current_target_override;
5075 cmd_ctx->current_target_override = target;
5077 retval = Jim_EvalObj(teap->interp, teap->body);
5079 cmd_ctx->current_target_override = saved_target_override;
5081 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
5082 return;
5084 if (retval == JIM_RETURN)
5085 retval = teap->interp->returnCode;
5087 if (retval != JIM_OK) {
5088 Jim_MakeErrorMessage(teap->interp);
5089 LOG_USER("Error executing event %s on target %s:\n%s",
5090 target_event_name(e),
5091 target_name(target),
5092 Jim_GetString(Jim_GetResult(teap->interp), NULL));
5093 /* clean both error code and stacktrace before return */
5094 Jim_Eval(teap->interp, "error \"\" \"\"");
5100 static int target_jim_get_reg(Jim_Interp *interp, int argc,
5101 Jim_Obj * const *argv)
5103 bool force = false;
5105 if (argc == 3) {
5106 const char *option = Jim_GetString(argv[1], NULL);
5108 if (!strcmp(option, "-force")) {
5109 argc--;
5110 argv++;
5111 force = true;
5112 } else {
5113 Jim_SetResultFormatted(interp, "invalid option '%s'", option);
5114 return JIM_ERR;
5118 if (argc != 2) {
5119 Jim_WrongNumArgs(interp, 1, argv, "[-force] list");
5120 return JIM_ERR;
5123 const int length = Jim_ListLength(interp, argv[1]);
5125 Jim_Obj *result_dict = Jim_NewDictObj(interp, NULL, 0);
5127 if (!result_dict)
5128 return JIM_ERR;
5130 struct command_context *cmd_ctx = current_command_context(interp);
5131 assert(cmd_ctx != NULL);
5132 const struct target *target = get_current_target(cmd_ctx);
5134 for (int i = 0; i < length; i++) {
5135 Jim_Obj *elem = Jim_ListGetIndex(interp, argv[1], i);
5137 if (!elem)
5138 return JIM_ERR;
5140 const char *reg_name = Jim_String(elem);
5142 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5143 false);
5145 if (!reg || !reg->exist) {
5146 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5147 return JIM_ERR;
5150 if (force) {
5151 int retval = reg->type->get(reg);
5153 if (retval != ERROR_OK) {
5154 Jim_SetResultFormatted(interp, "failed to read register '%s'",
5155 reg_name);
5156 return JIM_ERR;
5160 char *reg_value = buf_to_hex_str(reg->value, reg->size);
5162 if (!reg_value) {
5163 LOG_ERROR("Failed to allocate memory");
5164 return JIM_ERR;
5167 char *tmp = alloc_printf("0x%s", reg_value);
5169 free(reg_value);
5171 if (!tmp) {
5172 LOG_ERROR("Failed to allocate memory");
5173 return JIM_ERR;
5176 Jim_DictAddElement(interp, result_dict, elem,
5177 Jim_NewStringObj(interp, tmp, -1));
5179 free(tmp);
5182 Jim_SetResult(interp, result_dict);
5184 return JIM_OK;
5187 static int target_jim_set_reg(Jim_Interp *interp, int argc,
5188 Jim_Obj * const *argv)
5190 if (argc != 2) {
5191 Jim_WrongNumArgs(interp, 1, argv, "dict");
5192 return JIM_ERR;
5195 int tmp;
5196 #if JIM_VERSION >= 80
5197 Jim_Obj **dict = Jim_DictPairs(interp, argv[1], &tmp);
5199 if (!dict)
5200 return JIM_ERR;
5201 #else
5202 Jim_Obj **dict;
5203 int ret = Jim_DictPairs(interp, argv[1], &dict, &tmp);
5205 if (ret != JIM_OK)
5206 return ret;
5207 #endif
5209 const unsigned int length = tmp;
5210 struct command_context *cmd_ctx = current_command_context(interp);
5211 assert(cmd_ctx);
5212 const struct target *target = get_current_target(cmd_ctx);
5214 for (unsigned int i = 0; i < length; i += 2) {
5215 const char *reg_name = Jim_String(dict[i]);
5216 const char *reg_value = Jim_String(dict[i + 1]);
5217 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5218 false);
5220 if (!reg || !reg->exist) {
5221 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5222 return JIM_ERR;
5225 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
5227 if (!buf) {
5228 LOG_ERROR("Failed to allocate memory");
5229 return JIM_ERR;
5232 str_to_buf(reg_value, strlen(reg_value), buf, reg->size, 0);
5233 int retval = reg->type->set(reg, buf);
5234 free(buf);
5236 if (retval != ERROR_OK) {
5237 Jim_SetResultFormatted(interp, "failed to set '%s' to register '%s'",
5238 reg_value, reg_name);
5239 return JIM_ERR;
5243 return JIM_OK;
5247 * Returns true only if the target has a handler for the specified event.
5249 bool target_has_event_action(struct target *target, enum target_event event)
5251 struct target_event_action *teap;
5253 for (teap = target->event_action; teap; teap = teap->next) {
5254 if (teap->event == event)
5255 return true;
5257 return false;
5260 enum target_cfg_param {
5261 TCFG_TYPE,
5262 TCFG_EVENT,
5263 TCFG_WORK_AREA_VIRT,
5264 TCFG_WORK_AREA_PHYS,
5265 TCFG_WORK_AREA_SIZE,
5266 TCFG_WORK_AREA_BACKUP,
5267 TCFG_ENDIAN,
5268 TCFG_COREID,
5269 TCFG_CHAIN_POSITION,
5270 TCFG_DBGBASE,
5271 TCFG_RTOS,
5272 TCFG_DEFER_EXAMINE,
5273 TCFG_GDB_PORT,
5274 TCFG_GDB_MAX_CONNECTIONS,
5277 static struct jim_nvp nvp_config_opts[] = {
5278 { .name = "-type", .value = TCFG_TYPE },
5279 { .name = "-event", .value = TCFG_EVENT },
5280 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
5281 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
5282 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
5283 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
5284 { .name = "-endian", .value = TCFG_ENDIAN },
5285 { .name = "-coreid", .value = TCFG_COREID },
5286 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
5287 { .name = "-dbgbase", .value = TCFG_DBGBASE },
5288 { .name = "-rtos", .value = TCFG_RTOS },
5289 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
5290 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
5291 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
5292 { .name = NULL, .value = -1 }
5295 static int target_configure(struct jim_getopt_info *goi, struct target *target)
5297 struct jim_nvp *n;
5298 Jim_Obj *o;
5299 jim_wide w;
5300 int e;
5302 /* parse config or cget options ... */
5303 while (goi->argc > 0) {
5304 Jim_SetEmptyResult(goi->interp);
5305 /* jim_getopt_debug(goi); */
5307 if (target->type->target_jim_configure) {
5308 /* target defines a configure function */
5309 /* target gets first dibs on parameters */
5310 e = (*(target->type->target_jim_configure))(target, goi);
5311 if (e == JIM_OK) {
5312 /* more? */
5313 continue;
5315 if (e == JIM_ERR) {
5316 /* An error */
5317 return e;
5319 /* otherwise we 'continue' below */
5321 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
5322 if (e != JIM_OK) {
5323 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
5324 return e;
5326 switch (n->value) {
5327 case TCFG_TYPE:
5328 /* not settable */
5329 if (goi->isconfigure) {
5330 Jim_SetResultFormatted(goi->interp,
5331 "not settable: %s", n->name);
5332 return JIM_ERR;
5333 } else {
5334 no_params:
5335 if (goi->argc != 0) {
5336 Jim_WrongNumArgs(goi->interp,
5337 goi->argc, goi->argv,
5338 "NO PARAMS");
5339 return JIM_ERR;
5342 Jim_SetResultString(goi->interp,
5343 target_type_name(target), -1);
5344 /* loop for more */
5345 break;
5346 case TCFG_EVENT:
5347 if (goi->argc == 0) {
5348 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
5349 return JIM_ERR;
5352 e = jim_getopt_nvp(goi, nvp_target_event, &n);
5353 if (e != JIM_OK) {
5354 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
5355 return e;
5358 if (goi->isconfigure) {
5359 if (goi->argc != 1) {
5360 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
5361 return JIM_ERR;
5363 } else {
5364 if (goi->argc != 0) {
5365 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
5366 return JIM_ERR;
5371 struct target_event_action *teap;
5373 teap = target->event_action;
5374 /* replace existing? */
5375 while (teap) {
5376 if (teap->event == (enum target_event)n->value)
5377 break;
5378 teap = teap->next;
5381 if (goi->isconfigure) {
5382 /* START_DEPRECATED_TPIU */
5383 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5384 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5385 /* END_DEPRECATED_TPIU */
5387 bool replace = true;
5388 if (!teap) {
5389 /* create new */
5390 teap = calloc(1, sizeof(*teap));
5391 replace = false;
5393 teap->event = n->value;
5394 teap->interp = goi->interp;
5395 jim_getopt_obj(goi, &o);
5396 if (teap->body)
5397 Jim_DecrRefCount(teap->interp, teap->body);
5398 teap->body = Jim_DuplicateObj(goi->interp, o);
5400 * FIXME:
5401 * Tcl/TK - "tk events" have a nice feature.
5402 * See the "BIND" command.
5403 * We should support that here.
5404 * You can specify %X and %Y in the event code.
5405 * The idea is: %T - target name.
5406 * The idea is: %N - target number
5407 * The idea is: %E - event name.
5409 Jim_IncrRefCount(teap->body);
5411 if (!replace) {
5412 /* add to head of event list */
5413 teap->next = target->event_action;
5414 target->event_action = teap;
5416 Jim_SetEmptyResult(goi->interp);
5417 } else {
5418 /* get */
5419 if (!teap)
5420 Jim_SetEmptyResult(goi->interp);
5421 else
5422 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5425 /* loop for more */
5426 break;
5428 case TCFG_WORK_AREA_VIRT:
5429 if (goi->isconfigure) {
5430 target_free_all_working_areas(target);
5431 e = jim_getopt_wide(goi, &w);
5432 if (e != JIM_OK)
5433 return e;
5434 target->working_area_virt = w;
5435 target->working_area_virt_spec = true;
5436 } else {
5437 if (goi->argc != 0)
5438 goto no_params;
5440 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5441 /* loop for more */
5442 break;
5444 case TCFG_WORK_AREA_PHYS:
5445 if (goi->isconfigure) {
5446 target_free_all_working_areas(target);
5447 e = jim_getopt_wide(goi, &w);
5448 if (e != JIM_OK)
5449 return e;
5450 target->working_area_phys = w;
5451 target->working_area_phys_spec = true;
5452 } else {
5453 if (goi->argc != 0)
5454 goto no_params;
5456 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5457 /* loop for more */
5458 break;
5460 case TCFG_WORK_AREA_SIZE:
5461 if (goi->isconfigure) {
5462 target_free_all_working_areas(target);
5463 e = jim_getopt_wide(goi, &w);
5464 if (e != JIM_OK)
5465 return e;
5466 target->working_area_size = w;
5467 } else {
5468 if (goi->argc != 0)
5469 goto no_params;
5471 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5472 /* loop for more */
5473 break;
5475 case TCFG_WORK_AREA_BACKUP:
5476 if (goi->isconfigure) {
5477 target_free_all_working_areas(target);
5478 e = jim_getopt_wide(goi, &w);
5479 if (e != JIM_OK)
5480 return e;
5481 /* make this exactly 1 or 0 */
5482 target->backup_working_area = (!!w);
5483 } else {
5484 if (goi->argc != 0)
5485 goto no_params;
5487 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5488 /* loop for more e*/
5489 break;
5492 case TCFG_ENDIAN:
5493 if (goi->isconfigure) {
5494 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5495 if (e != JIM_OK) {
5496 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5497 return e;
5499 target->endianness = n->value;
5500 } else {
5501 if (goi->argc != 0)
5502 goto no_params;
5504 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5505 if (!n->name) {
5506 target->endianness = TARGET_LITTLE_ENDIAN;
5507 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5509 Jim_SetResultString(goi->interp, n->name, -1);
5510 /* loop for more */
5511 break;
5513 case TCFG_COREID:
5514 if (goi->isconfigure) {
5515 e = jim_getopt_wide(goi, &w);
5516 if (e != JIM_OK)
5517 return e;
5518 target->coreid = (int32_t)w;
5519 } else {
5520 if (goi->argc != 0)
5521 goto no_params;
5523 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5524 /* loop for more */
5525 break;
5527 case TCFG_CHAIN_POSITION:
5528 if (goi->isconfigure) {
5529 Jim_Obj *o_t;
5530 struct jtag_tap *tap;
5532 if (target->has_dap) {
5533 Jim_SetResultString(goi->interp,
5534 "target requires -dap parameter instead of -chain-position!", -1);
5535 return JIM_ERR;
5538 target_free_all_working_areas(target);
5539 e = jim_getopt_obj(goi, &o_t);
5540 if (e != JIM_OK)
5541 return e;
5542 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5543 if (!tap)
5544 return JIM_ERR;
5545 target->tap = tap;
5546 target->tap_configured = true;
5547 } else {
5548 if (goi->argc != 0)
5549 goto no_params;
5551 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5552 /* loop for more e*/
5553 break;
5554 case TCFG_DBGBASE:
5555 if (goi->isconfigure) {
5556 e = jim_getopt_wide(goi, &w);
5557 if (e != JIM_OK)
5558 return e;
5559 target->dbgbase = (uint32_t)w;
5560 target->dbgbase_set = true;
5561 } else {
5562 if (goi->argc != 0)
5563 goto no_params;
5565 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5566 /* loop for more */
5567 break;
5568 case TCFG_RTOS:
5569 /* RTOS */
5571 int result = rtos_create(goi, target);
5572 if (result != JIM_OK)
5573 return result;
5575 /* loop for more */
5576 break;
5578 case TCFG_DEFER_EXAMINE:
5579 /* DEFER_EXAMINE */
5580 target->defer_examine = true;
5581 /* loop for more */
5582 break;
5584 case TCFG_GDB_PORT:
5585 if (goi->isconfigure) {
5586 struct command_context *cmd_ctx = current_command_context(goi->interp);
5587 if (cmd_ctx->mode != COMMAND_CONFIG) {
5588 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5589 return JIM_ERR;
5592 const char *s;
5593 e = jim_getopt_string(goi, &s, NULL);
5594 if (e != JIM_OK)
5595 return e;
5596 free(target->gdb_port_override);
5597 target->gdb_port_override = strdup(s);
5598 } else {
5599 if (goi->argc != 0)
5600 goto no_params;
5602 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5603 /* loop for more */
5604 break;
5606 case TCFG_GDB_MAX_CONNECTIONS:
5607 if (goi->isconfigure) {
5608 struct command_context *cmd_ctx = current_command_context(goi->interp);
5609 if (cmd_ctx->mode != COMMAND_CONFIG) {
5610 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5611 return JIM_ERR;
5614 e = jim_getopt_wide(goi, &w);
5615 if (e != JIM_OK)
5616 return e;
5617 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5618 } else {
5619 if (goi->argc != 0)
5620 goto no_params;
5622 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5623 break;
5625 } /* while (goi->argc) */
5628 /* done - we return */
5629 return JIM_OK;
5632 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5634 struct command *c = jim_to_command(interp);
5635 struct jim_getopt_info goi;
5637 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5638 goi.isconfigure = !strcmp(c->name, "configure");
5639 if (goi.argc < 1) {
5640 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5641 "missing: -option ...");
5642 return JIM_ERR;
5644 struct command_context *cmd_ctx = current_command_context(interp);
5645 assert(cmd_ctx);
5646 struct target *target = get_current_target(cmd_ctx);
5647 return target_configure(&goi, target);
5650 static int jim_target_mem2array(Jim_Interp *interp,
5651 int argc, Jim_Obj *const *argv)
5653 struct command_context *cmd_ctx = current_command_context(interp);
5654 assert(cmd_ctx);
5655 struct target *target = get_current_target(cmd_ctx);
5656 return target_mem2array(interp, target, argc - 1, argv + 1);
5659 static int jim_target_array2mem(Jim_Interp *interp,
5660 int argc, Jim_Obj *const *argv)
5662 struct command_context *cmd_ctx = current_command_context(interp);
5663 assert(cmd_ctx);
5664 struct target *target = get_current_target(cmd_ctx);
5665 return target_array2mem(interp, target, argc - 1, argv + 1);
5668 static int jim_target_tap_disabled(Jim_Interp *interp)
5670 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5671 return JIM_ERR;
5674 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5676 bool allow_defer = false;
5678 struct jim_getopt_info goi;
5679 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5680 if (goi.argc > 1) {
5681 const char *cmd_name = Jim_GetString(argv[0], NULL);
5682 Jim_SetResultFormatted(goi.interp,
5683 "usage: %s ['allow-defer']", cmd_name);
5684 return JIM_ERR;
5686 if (goi.argc > 0 &&
5687 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5688 /* consume it */
5689 Jim_Obj *obj;
5690 int e = jim_getopt_obj(&goi, &obj);
5691 if (e != JIM_OK)
5692 return e;
5693 allow_defer = true;
5696 struct command_context *cmd_ctx = current_command_context(interp);
5697 assert(cmd_ctx);
5698 struct target *target = get_current_target(cmd_ctx);
5699 if (!target->tap->enabled)
5700 return jim_target_tap_disabled(interp);
5702 if (allow_defer && target->defer_examine) {
5703 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5704 LOG_INFO("Use arp_examine command to examine it manually!");
5705 return JIM_OK;
5708 int e = target->type->examine(target);
5709 if (e != ERROR_OK) {
5710 target_reset_examined(target);
5711 return JIM_ERR;
5714 target_set_examined(target);
5716 return JIM_OK;
5719 COMMAND_HANDLER(handle_target_was_examined)
5721 if (CMD_ARGC != 0)
5722 return ERROR_COMMAND_SYNTAX_ERROR;
5724 struct target *target = get_current_target(CMD_CTX);
5726 command_print(CMD, "%d", target_was_examined(target) ? 1 : 0);
5728 return ERROR_OK;
5731 COMMAND_HANDLER(handle_target_examine_deferred)
5733 if (CMD_ARGC != 0)
5734 return ERROR_COMMAND_SYNTAX_ERROR;
5736 struct target *target = get_current_target(CMD_CTX);
5738 command_print(CMD, "%d", target->defer_examine ? 1 : 0);
5740 return ERROR_OK;
5743 COMMAND_HANDLER(handle_target_halt_gdb)
5745 if (CMD_ARGC != 0)
5746 return ERROR_COMMAND_SYNTAX_ERROR;
5748 struct target *target = get_current_target(CMD_CTX);
5750 return target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
5753 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5755 if (argc != 1) {
5756 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5757 return JIM_ERR;
5759 struct command_context *cmd_ctx = current_command_context(interp);
5760 assert(cmd_ctx);
5761 struct target *target = get_current_target(cmd_ctx);
5762 if (!target->tap->enabled)
5763 return jim_target_tap_disabled(interp);
5765 int e;
5766 if (!(target_was_examined(target)))
5767 e = ERROR_TARGET_NOT_EXAMINED;
5768 else
5769 e = target->type->poll(target);
5770 if (e != ERROR_OK)
5771 return JIM_ERR;
5772 return JIM_OK;
5775 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5777 struct jim_getopt_info goi;
5778 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5780 if (goi.argc != 2) {
5781 Jim_WrongNumArgs(interp, 0, argv,
5782 "([tT]|[fF]|assert|deassert) BOOL");
5783 return JIM_ERR;
5786 struct jim_nvp *n;
5787 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5788 if (e != JIM_OK) {
5789 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5790 return e;
5792 /* the halt or not param */
5793 jim_wide a;
5794 e = jim_getopt_wide(&goi, &a);
5795 if (e != JIM_OK)
5796 return e;
5798 struct command_context *cmd_ctx = current_command_context(interp);
5799 assert(cmd_ctx);
5800 struct target *target = get_current_target(cmd_ctx);
5801 if (!target->tap->enabled)
5802 return jim_target_tap_disabled(interp);
5804 if (!target->type->assert_reset || !target->type->deassert_reset) {
5805 Jim_SetResultFormatted(interp,
5806 "No target-specific reset for %s",
5807 target_name(target));
5808 return JIM_ERR;
5811 if (target->defer_examine)
5812 target_reset_examined(target);
5814 /* determine if we should halt or not. */
5815 target->reset_halt = (a != 0);
5816 /* When this happens - all workareas are invalid. */
5817 target_free_all_working_areas_restore(target, 0);
5819 /* do the assert */
5820 if (n->value == NVP_ASSERT)
5821 e = target->type->assert_reset(target);
5822 else
5823 e = target->type->deassert_reset(target);
5824 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5827 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5829 if (argc != 1) {
5830 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5831 return JIM_ERR;
5833 struct command_context *cmd_ctx = current_command_context(interp);
5834 assert(cmd_ctx);
5835 struct target *target = get_current_target(cmd_ctx);
5836 if (!target->tap->enabled)
5837 return jim_target_tap_disabled(interp);
5838 int e = target->type->halt(target);
5839 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5842 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5844 struct jim_getopt_info goi;
5845 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5847 /* params: <name> statename timeoutmsecs */
5848 if (goi.argc != 2) {
5849 const char *cmd_name = Jim_GetString(argv[0], NULL);
5850 Jim_SetResultFormatted(goi.interp,
5851 "%s <state_name> <timeout_in_msec>", cmd_name);
5852 return JIM_ERR;
5855 struct jim_nvp *n;
5856 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5857 if (e != JIM_OK) {
5858 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5859 return e;
5861 jim_wide a;
5862 e = jim_getopt_wide(&goi, &a);
5863 if (e != JIM_OK)
5864 return e;
5865 struct command_context *cmd_ctx = current_command_context(interp);
5866 assert(cmd_ctx);
5867 struct target *target = get_current_target(cmd_ctx);
5868 if (!target->tap->enabled)
5869 return jim_target_tap_disabled(interp);
5871 e = target_wait_state(target, n->value, a);
5872 if (e != ERROR_OK) {
5873 Jim_Obj *obj = Jim_NewIntObj(interp, e);
5874 Jim_SetResultFormatted(goi.interp,
5875 "target: %s wait %s fails (%#s) %s",
5876 target_name(target), n->name,
5877 obj, target_strerror_safe(e));
5878 return JIM_ERR;
5880 return JIM_OK;
5882 /* List for human, Events defined for this target.
5883 * scripts/programs should use 'name cget -event NAME'
5885 COMMAND_HANDLER(handle_target_event_list)
5887 struct target *target = get_current_target(CMD_CTX);
5888 struct target_event_action *teap = target->event_action;
5890 command_print(CMD, "Event actions for target (%d) %s\n",
5891 target->target_number,
5892 target_name(target));
5893 command_print(CMD, "%-25s | Body", "Event");
5894 command_print(CMD, "------------------------- | "
5895 "----------------------------------------");
5896 while (teap) {
5897 command_print(CMD, "%-25s | %s",
5898 target_event_name(teap->event),
5899 Jim_GetString(teap->body, NULL));
5900 teap = teap->next;
5902 command_print(CMD, "***END***");
5903 return ERROR_OK;
5906 COMMAND_HANDLER(handle_target_current_state)
5908 if (CMD_ARGC != 0)
5909 return ERROR_COMMAND_SYNTAX_ERROR;
5911 struct target *target = get_current_target(CMD_CTX);
5913 command_print(CMD, "%s", target_state_name(target));
5915 return ERROR_OK;
5918 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5920 struct jim_getopt_info goi;
5921 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5922 if (goi.argc != 1) {
5923 const char *cmd_name = Jim_GetString(argv[0], NULL);
5924 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5925 return JIM_ERR;
5927 struct jim_nvp *n;
5928 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5929 if (e != JIM_OK) {
5930 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5931 return e;
5933 struct command_context *cmd_ctx = current_command_context(interp);
5934 assert(cmd_ctx);
5935 struct target *target = get_current_target(cmd_ctx);
5936 target_handle_event(target, n->value);
5937 return JIM_OK;
5940 static const struct command_registration target_instance_command_handlers[] = {
5942 .name = "configure",
5943 .mode = COMMAND_ANY,
5944 .jim_handler = jim_target_configure,
5945 .help = "configure a new target for use",
5946 .usage = "[target_attribute ...]",
5949 .name = "cget",
5950 .mode = COMMAND_ANY,
5951 .jim_handler = jim_target_configure,
5952 .help = "returns the specified target attribute",
5953 .usage = "target_attribute",
5956 .name = "mwd",
5957 .handler = handle_mw_command,
5958 .mode = COMMAND_EXEC,
5959 .help = "Write 64-bit word(s) to target memory",
5960 .usage = "address data [count]",
5963 .name = "mww",
5964 .handler = handle_mw_command,
5965 .mode = COMMAND_EXEC,
5966 .help = "Write 32-bit word(s) to target memory",
5967 .usage = "address data [count]",
5970 .name = "mwh",
5971 .handler = handle_mw_command,
5972 .mode = COMMAND_EXEC,
5973 .help = "Write 16-bit half-word(s) to target memory",
5974 .usage = "address data [count]",
5977 .name = "mwb",
5978 .handler = handle_mw_command,
5979 .mode = COMMAND_EXEC,
5980 .help = "Write byte(s) to target memory",
5981 .usage = "address data [count]",
5984 .name = "mdd",
5985 .handler = handle_md_command,
5986 .mode = COMMAND_EXEC,
5987 .help = "Display target memory as 64-bit words",
5988 .usage = "address [count]",
5991 .name = "mdw",
5992 .handler = handle_md_command,
5993 .mode = COMMAND_EXEC,
5994 .help = "Display target memory as 32-bit words",
5995 .usage = "address [count]",
5998 .name = "mdh",
5999 .handler = handle_md_command,
6000 .mode = COMMAND_EXEC,
6001 .help = "Display target memory as 16-bit half-words",
6002 .usage = "address [count]",
6005 .name = "mdb",
6006 .handler = handle_md_command,
6007 .mode = COMMAND_EXEC,
6008 .help = "Display target memory as 8-bit bytes",
6009 .usage = "address [count]",
6012 .name = "array2mem",
6013 .mode = COMMAND_EXEC,
6014 .jim_handler = jim_target_array2mem,
6015 .help = "Writes Tcl array of 8/16/32 bit numbers "
6016 "to target memory",
6017 .usage = "arrayname bitwidth address count",
6020 .name = "mem2array",
6021 .mode = COMMAND_EXEC,
6022 .jim_handler = jim_target_mem2array,
6023 .help = "Loads Tcl array of 8/16/32 bit numbers "
6024 "from target memory",
6025 .usage = "arrayname bitwidth address count",
6028 .name = "get_reg",
6029 .mode = COMMAND_EXEC,
6030 .jim_handler = target_jim_get_reg,
6031 .help = "Get register values from the target",
6032 .usage = "list",
6035 .name = "set_reg",
6036 .mode = COMMAND_EXEC,
6037 .jim_handler = target_jim_set_reg,
6038 .help = "Set target register values",
6039 .usage = "dict",
6042 .name = "read_memory",
6043 .mode = COMMAND_EXEC,
6044 .handler = handle_target_read_memory,
6045 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
6046 .usage = "address width count ['phys']",
6049 .name = "write_memory",
6050 .mode = COMMAND_EXEC,
6051 .jim_handler = target_jim_write_memory,
6052 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
6053 .usage = "address width data ['phys']",
6056 .name = "eventlist",
6057 .handler = handle_target_event_list,
6058 .mode = COMMAND_EXEC,
6059 .help = "displays a table of events defined for this target",
6060 .usage = "",
6063 .name = "curstate",
6064 .mode = COMMAND_EXEC,
6065 .handler = handle_target_current_state,
6066 .help = "displays the current state of this target",
6067 .usage = "",
6070 .name = "arp_examine",
6071 .mode = COMMAND_EXEC,
6072 .jim_handler = jim_target_examine,
6073 .help = "used internally for reset processing",
6074 .usage = "['allow-defer']",
6077 .name = "was_examined",
6078 .mode = COMMAND_EXEC,
6079 .handler = handle_target_was_examined,
6080 .help = "used internally for reset processing",
6081 .usage = "",
6084 .name = "examine_deferred",
6085 .mode = COMMAND_EXEC,
6086 .handler = handle_target_examine_deferred,
6087 .help = "used internally for reset processing",
6088 .usage = "",
6091 .name = "arp_halt_gdb",
6092 .mode = COMMAND_EXEC,
6093 .handler = handle_target_halt_gdb,
6094 .help = "used internally for reset processing to halt GDB",
6095 .usage = "",
6098 .name = "arp_poll",
6099 .mode = COMMAND_EXEC,
6100 .jim_handler = jim_target_poll,
6101 .help = "used internally for reset processing",
6104 .name = "arp_reset",
6105 .mode = COMMAND_EXEC,
6106 .jim_handler = jim_target_reset,
6107 .help = "used internally for reset processing",
6110 .name = "arp_halt",
6111 .mode = COMMAND_EXEC,
6112 .jim_handler = jim_target_halt,
6113 .help = "used internally for reset processing",
6116 .name = "arp_waitstate",
6117 .mode = COMMAND_EXEC,
6118 .jim_handler = jim_target_wait_state,
6119 .help = "used internally for reset processing",
6122 .name = "invoke-event",
6123 .mode = COMMAND_EXEC,
6124 .jim_handler = jim_target_invoke_event,
6125 .help = "invoke handler for specified event",
6126 .usage = "event_name",
6128 COMMAND_REGISTRATION_DONE
6131 static int target_create(struct jim_getopt_info *goi)
6133 Jim_Obj *new_cmd;
6134 Jim_Cmd *cmd;
6135 const char *cp;
6136 int e;
6137 int x;
6138 struct target *target;
6139 struct command_context *cmd_ctx;
6141 cmd_ctx = current_command_context(goi->interp);
6142 assert(cmd_ctx);
6144 if (goi->argc < 3) {
6145 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
6146 return JIM_ERR;
6149 /* COMMAND */
6150 jim_getopt_obj(goi, &new_cmd);
6151 /* does this command exist? */
6152 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
6153 if (cmd) {
6154 cp = Jim_GetString(new_cmd, NULL);
6155 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
6156 return JIM_ERR;
6159 /* TYPE */
6160 e = jim_getopt_string(goi, &cp, NULL);
6161 if (e != JIM_OK)
6162 return e;
6163 struct transport *tr = get_current_transport();
6164 if (tr->override_target) {
6165 e = tr->override_target(&cp);
6166 if (e != ERROR_OK) {
6167 LOG_ERROR("The selected transport doesn't support this target");
6168 return JIM_ERR;
6170 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
6172 /* now does target type exist */
6173 for (x = 0 ; target_types[x] ; x++) {
6174 if (strcmp(cp, target_types[x]->name) == 0) {
6175 /* found */
6176 break;
6179 if (!target_types[x]) {
6180 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
6181 for (x = 0 ; target_types[x] ; x++) {
6182 if (target_types[x + 1]) {
6183 Jim_AppendStrings(goi->interp,
6184 Jim_GetResult(goi->interp),
6185 target_types[x]->name,
6186 ", ", NULL);
6187 } else {
6188 Jim_AppendStrings(goi->interp,
6189 Jim_GetResult(goi->interp),
6190 " or ",
6191 target_types[x]->name, NULL);
6194 return JIM_ERR;
6197 /* Create it */
6198 target = calloc(1, sizeof(struct target));
6199 if (!target) {
6200 LOG_ERROR("Out of memory");
6201 return JIM_ERR;
6204 /* set empty smp cluster */
6205 target->smp_targets = &empty_smp_targets;
6207 /* set target number */
6208 target->target_number = new_target_number();
6210 /* allocate memory for each unique target type */
6211 target->type = malloc(sizeof(struct target_type));
6212 if (!target->type) {
6213 LOG_ERROR("Out of memory");
6214 free(target);
6215 return JIM_ERR;
6218 memcpy(target->type, target_types[x], sizeof(struct target_type));
6220 /* default to first core, override with -coreid */
6221 target->coreid = 0;
6223 target->working_area = 0x0;
6224 target->working_area_size = 0x0;
6225 target->working_areas = NULL;
6226 target->backup_working_area = 0;
6228 target->state = TARGET_UNKNOWN;
6229 target->debug_reason = DBG_REASON_UNDEFINED;
6230 target->reg_cache = NULL;
6231 target->breakpoints = NULL;
6232 target->watchpoints = NULL;
6233 target->next = NULL;
6234 target->arch_info = NULL;
6236 target->verbose_halt_msg = true;
6238 target->halt_issued = false;
6240 /* initialize trace information */
6241 target->trace_info = calloc(1, sizeof(struct trace));
6242 if (!target->trace_info) {
6243 LOG_ERROR("Out of memory");
6244 free(target->type);
6245 free(target);
6246 return JIM_ERR;
6249 target->dbgmsg = NULL;
6250 target->dbg_msg_enabled = 0;
6252 target->endianness = TARGET_ENDIAN_UNKNOWN;
6254 target->rtos = NULL;
6255 target->rtos_auto_detect = false;
6257 target->gdb_port_override = NULL;
6258 target->gdb_max_connections = 1;
6260 /* Do the rest as "configure" options */
6261 goi->isconfigure = 1;
6262 e = target_configure(goi, target);
6264 if (e == JIM_OK) {
6265 if (target->has_dap) {
6266 if (!target->dap_configured) {
6267 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
6268 e = JIM_ERR;
6270 } else {
6271 if (!target->tap_configured) {
6272 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
6273 e = JIM_ERR;
6276 /* tap must be set after target was configured */
6277 if (!target->tap)
6278 e = JIM_ERR;
6281 if (e != JIM_OK) {
6282 rtos_destroy(target);
6283 free(target->gdb_port_override);
6284 free(target->trace_info);
6285 free(target->type);
6286 free(target);
6287 return e;
6290 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
6291 /* default endian to little if not specified */
6292 target->endianness = TARGET_LITTLE_ENDIAN;
6295 cp = Jim_GetString(new_cmd, NULL);
6296 target->cmd_name = strdup(cp);
6297 if (!target->cmd_name) {
6298 LOG_ERROR("Out of memory");
6299 rtos_destroy(target);
6300 free(target->gdb_port_override);
6301 free(target->trace_info);
6302 free(target->type);
6303 free(target);
6304 return JIM_ERR;
6307 if (target->type->target_create) {
6308 e = (*(target->type->target_create))(target, goi->interp);
6309 if (e != ERROR_OK) {
6310 LOG_DEBUG("target_create failed");
6311 free(target->cmd_name);
6312 rtos_destroy(target);
6313 free(target->gdb_port_override);
6314 free(target->trace_info);
6315 free(target->type);
6316 free(target);
6317 return JIM_ERR;
6321 /* create the target specific commands */
6322 if (target->type->commands) {
6323 e = register_commands(cmd_ctx, NULL, target->type->commands);
6324 if (e != ERROR_OK)
6325 LOG_ERROR("unable to register '%s' commands", cp);
6328 /* now - create the new target name command */
6329 const struct command_registration target_subcommands[] = {
6331 .chain = target_instance_command_handlers,
6334 .chain = target->type->commands,
6336 COMMAND_REGISTRATION_DONE
6338 const struct command_registration target_commands[] = {
6340 .name = cp,
6341 .mode = COMMAND_ANY,
6342 .help = "target command group",
6343 .usage = "",
6344 .chain = target_subcommands,
6346 COMMAND_REGISTRATION_DONE
6348 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
6349 if (e != ERROR_OK) {
6350 if (target->type->deinit_target)
6351 target->type->deinit_target(target);
6352 free(target->cmd_name);
6353 rtos_destroy(target);
6354 free(target->gdb_port_override);
6355 free(target->trace_info);
6356 free(target->type);
6357 free(target);
6358 return JIM_ERR;
6361 /* append to end of list */
6362 append_to_list_all_targets(target);
6364 cmd_ctx->current_target = target;
6365 return JIM_OK;
6368 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6370 if (argc != 1) {
6371 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6372 return JIM_ERR;
6374 struct command_context *cmd_ctx = current_command_context(interp);
6375 assert(cmd_ctx);
6377 struct target *target = get_current_target_or_null(cmd_ctx);
6378 if (target)
6379 Jim_SetResultString(interp, target_name(target), -1);
6380 return JIM_OK;
6383 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6385 if (argc != 1) {
6386 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6387 return JIM_ERR;
6389 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6390 for (unsigned x = 0; target_types[x]; x++) {
6391 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6392 Jim_NewStringObj(interp, target_types[x]->name, -1));
6394 return JIM_OK;
6397 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6399 if (argc != 1) {
6400 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6401 return JIM_ERR;
6403 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6404 struct target *target = all_targets;
6405 while (target) {
6406 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6407 Jim_NewStringObj(interp, target_name(target), -1));
6408 target = target->next;
6410 return JIM_OK;
6413 static struct target_list *
6414 __attribute__((warn_unused_result))
6415 create_target_list_node(Jim_Obj *const name) {
6416 int len;
6417 const char *targetname = Jim_GetString(name, &len);
6418 struct target *target = get_target(targetname);
6419 LOG_DEBUG("%s ", targetname);
6420 if (!target)
6421 return NULL;
6423 struct target_list *new = malloc(sizeof(struct target_list));
6424 if (!new) {
6425 LOG_ERROR("Out of memory");
6426 return new;
6429 new->target = target;
6430 return new;
6433 static int get_target_with_common_rtos_type(struct list_head *lh, struct target **result)
6435 struct target *target = NULL;
6436 struct target_list *curr;
6437 foreach_smp_target(curr, lh) {
6438 struct rtos *curr_rtos = curr->target->rtos;
6439 if (curr_rtos) {
6440 if (target && target->rtos && target->rtos->type != curr_rtos->type) {
6441 LOG_ERROR("Different rtos types in members of one smp target!");
6442 return JIM_ERR;
6444 target = curr->target;
6447 *result = target;
6448 return JIM_OK;
6451 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6453 static int smp_group = 1;
6455 if (argc == 1) {
6456 LOG_DEBUG("Empty SMP target");
6457 return JIM_OK;
6459 LOG_DEBUG("%d", argc);
6460 /* argv[1] = target to associate in smp
6461 * argv[2] = target to associate in smp
6462 * argv[3] ...
6465 struct list_head *lh = malloc(sizeof(*lh));
6466 if (!lh) {
6467 LOG_ERROR("Out of memory");
6468 return JIM_ERR;
6470 INIT_LIST_HEAD(lh);
6472 for (int i = 1; i < argc; i++) {
6473 struct target_list *new = create_target_list_node(argv[i]);
6474 if (new)
6475 list_add_tail(&new->lh, lh);
6477 /* now parse the list of cpu and put the target in smp mode*/
6478 struct target_list *curr;
6479 foreach_smp_target(curr, lh) {
6480 struct target *target = curr->target;
6481 target->smp = smp_group;
6482 target->smp_targets = lh;
6484 smp_group++;
6486 struct target *rtos_target;
6487 int retval = get_target_with_common_rtos_type(lh, &rtos_target);
6488 if (retval == JIM_OK && rtos_target)
6489 retval = rtos_smp_init(rtos_target);
6491 return retval;
6495 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6497 struct jim_getopt_info goi;
6498 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6499 if (goi.argc < 3) {
6500 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6501 "<name> <target_type> [<target_options> ...]");
6502 return JIM_ERR;
6504 return target_create(&goi);
6507 static const struct command_registration target_subcommand_handlers[] = {
6509 .name = "init",
6510 .mode = COMMAND_CONFIG,
6511 .handler = handle_target_init_command,
6512 .help = "initialize targets",
6513 .usage = "",
6516 .name = "create",
6517 .mode = COMMAND_CONFIG,
6518 .jim_handler = jim_target_create,
6519 .usage = "name type '-chain-position' name [options ...]",
6520 .help = "Creates and selects a new target",
6523 .name = "current",
6524 .mode = COMMAND_ANY,
6525 .jim_handler = jim_target_current,
6526 .help = "Returns the currently selected target",
6529 .name = "types",
6530 .mode = COMMAND_ANY,
6531 .jim_handler = jim_target_types,
6532 .help = "Returns the available target types as "
6533 "a list of strings",
6536 .name = "names",
6537 .mode = COMMAND_ANY,
6538 .jim_handler = jim_target_names,
6539 .help = "Returns the names of all targets as a list of strings",
6542 .name = "smp",
6543 .mode = COMMAND_ANY,
6544 .jim_handler = jim_target_smp,
6545 .usage = "targetname1 targetname2 ...",
6546 .help = "gather several target in a smp list"
6549 COMMAND_REGISTRATION_DONE
6552 struct fast_load {
6553 target_addr_t address;
6554 uint8_t *data;
6555 int length;
6559 static int fastload_num;
6560 static struct fast_load *fastload;
6562 static void free_fastload(void)
6564 if (fastload) {
6565 for (int i = 0; i < fastload_num; i++)
6566 free(fastload[i].data);
6567 free(fastload);
6568 fastload = NULL;
6572 COMMAND_HANDLER(handle_fast_load_image_command)
6574 uint8_t *buffer;
6575 size_t buf_cnt;
6576 uint32_t image_size;
6577 target_addr_t min_address = 0;
6578 target_addr_t max_address = -1;
6580 struct image image;
6582 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6583 &image, &min_address, &max_address);
6584 if (retval != ERROR_OK)
6585 return retval;
6587 struct duration bench;
6588 duration_start(&bench);
6590 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6591 if (retval != ERROR_OK)
6592 return retval;
6594 image_size = 0x0;
6595 retval = ERROR_OK;
6596 fastload_num = image.num_sections;
6597 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6598 if (!fastload) {
6599 command_print(CMD, "out of memory");
6600 image_close(&image);
6601 return ERROR_FAIL;
6603 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6604 for (unsigned int i = 0; i < image.num_sections; i++) {
6605 buffer = malloc(image.sections[i].size);
6606 if (!buffer) {
6607 command_print(CMD, "error allocating buffer for section (%d bytes)",
6608 (int)(image.sections[i].size));
6609 retval = ERROR_FAIL;
6610 break;
6613 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6614 if (retval != ERROR_OK) {
6615 free(buffer);
6616 break;
6619 uint32_t offset = 0;
6620 uint32_t length = buf_cnt;
6622 /* DANGER!!! beware of unsigned comparison here!!! */
6624 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6625 (image.sections[i].base_address < max_address)) {
6626 if (image.sections[i].base_address < min_address) {
6627 /* clip addresses below */
6628 offset += min_address-image.sections[i].base_address;
6629 length -= offset;
6632 if (image.sections[i].base_address + buf_cnt > max_address)
6633 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6635 fastload[i].address = image.sections[i].base_address + offset;
6636 fastload[i].data = malloc(length);
6637 if (!fastload[i].data) {
6638 free(buffer);
6639 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6640 length);
6641 retval = ERROR_FAIL;
6642 break;
6644 memcpy(fastload[i].data, buffer + offset, length);
6645 fastload[i].length = length;
6647 image_size += length;
6648 command_print(CMD, "%u bytes written at address 0x%8.8x",
6649 (unsigned int)length,
6650 ((unsigned int)(image.sections[i].base_address + offset)));
6653 free(buffer);
6656 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6657 command_print(CMD, "Loaded %" PRIu32 " bytes "
6658 "in %fs (%0.3f KiB/s)", image_size,
6659 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6661 command_print(CMD,
6662 "WARNING: image has not been loaded to target!"
6663 "You can issue a 'fast_load' to finish loading.");
6666 image_close(&image);
6668 if (retval != ERROR_OK)
6669 free_fastload();
6671 return retval;
6674 COMMAND_HANDLER(handle_fast_load_command)
6676 if (CMD_ARGC > 0)
6677 return ERROR_COMMAND_SYNTAX_ERROR;
6678 if (!fastload) {
6679 LOG_ERROR("No image in memory");
6680 return ERROR_FAIL;
6682 int i;
6683 int64_t ms = timeval_ms();
6684 int size = 0;
6685 int retval = ERROR_OK;
6686 for (i = 0; i < fastload_num; i++) {
6687 struct target *target = get_current_target(CMD_CTX);
6688 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6689 (unsigned int)(fastload[i].address),
6690 (unsigned int)(fastload[i].length));
6691 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6692 if (retval != ERROR_OK)
6693 break;
6694 size += fastload[i].length;
6696 if (retval == ERROR_OK) {
6697 int64_t after = timeval_ms();
6698 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6700 return retval;
6703 static const struct command_registration target_command_handlers[] = {
6705 .name = "targets",
6706 .handler = handle_targets_command,
6707 .mode = COMMAND_ANY,
6708 .help = "change current default target (one parameter) "
6709 "or prints table of all targets (no parameters)",
6710 .usage = "[target]",
6713 .name = "target",
6714 .mode = COMMAND_CONFIG,
6715 .help = "configure target",
6716 .chain = target_subcommand_handlers,
6717 .usage = "",
6719 COMMAND_REGISTRATION_DONE
6722 int target_register_commands(struct command_context *cmd_ctx)
6724 return register_commands(cmd_ctx, NULL, target_command_handlers);
6727 static bool target_reset_nag = true;
6729 bool get_target_reset_nag(void)
6731 return target_reset_nag;
6734 COMMAND_HANDLER(handle_target_reset_nag)
6736 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6737 &target_reset_nag, "Nag after each reset about options to improve "
6738 "performance");
6741 COMMAND_HANDLER(handle_ps_command)
6743 struct target *target = get_current_target(CMD_CTX);
6744 char *display;
6745 if (target->state != TARGET_HALTED) {
6746 LOG_INFO("target not halted !!");
6747 return ERROR_OK;
6750 if ((target->rtos) && (target->rtos->type)
6751 && (target->rtos->type->ps_command)) {
6752 display = target->rtos->type->ps_command(target);
6753 command_print(CMD, "%s", display);
6754 free(display);
6755 return ERROR_OK;
6756 } else {
6757 LOG_INFO("failed");
6758 return ERROR_TARGET_FAILURE;
6762 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6764 if (text)
6765 command_print_sameline(cmd, "%s", text);
6766 for (int i = 0; i < size; i++)
6767 command_print_sameline(cmd, " %02x", buf[i]);
6768 command_print(cmd, " ");
6771 COMMAND_HANDLER(handle_test_mem_access_command)
6773 struct target *target = get_current_target(CMD_CTX);
6774 uint32_t test_size;
6775 int retval = ERROR_OK;
6777 if (target->state != TARGET_HALTED) {
6778 LOG_INFO("target not halted !!");
6779 return ERROR_FAIL;
6782 if (CMD_ARGC != 1)
6783 return ERROR_COMMAND_SYNTAX_ERROR;
6785 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6787 /* Test reads */
6788 size_t num_bytes = test_size + 4;
6790 struct working_area *wa = NULL;
6791 retval = target_alloc_working_area(target, num_bytes, &wa);
6792 if (retval != ERROR_OK) {
6793 LOG_ERROR("Not enough working area");
6794 return ERROR_FAIL;
6797 uint8_t *test_pattern = malloc(num_bytes);
6799 for (size_t i = 0; i < num_bytes; i++)
6800 test_pattern[i] = rand();
6802 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6803 if (retval != ERROR_OK) {
6804 LOG_ERROR("Test pattern write failed");
6805 goto out;
6808 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6809 for (int size = 1; size <= 4; size *= 2) {
6810 for (int offset = 0; offset < 4; offset++) {
6811 uint32_t count = test_size / size;
6812 size_t host_bufsiz = (count + 2) * size + host_offset;
6813 uint8_t *read_ref = malloc(host_bufsiz);
6814 uint8_t *read_buf = malloc(host_bufsiz);
6816 for (size_t i = 0; i < host_bufsiz; i++) {
6817 read_ref[i] = rand();
6818 read_buf[i] = read_ref[i];
6820 command_print_sameline(CMD,
6821 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6822 size, offset, host_offset ? "un" : "");
6824 struct duration bench;
6825 duration_start(&bench);
6827 retval = target_read_memory(target, wa->address + offset, size, count,
6828 read_buf + size + host_offset);
6830 duration_measure(&bench);
6832 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6833 command_print(CMD, "Unsupported alignment");
6834 goto next;
6835 } else if (retval != ERROR_OK) {
6836 command_print(CMD, "Memory read failed");
6837 goto next;
6840 /* replay on host */
6841 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6843 /* check result */
6844 int result = memcmp(read_ref, read_buf, host_bufsiz);
6845 if (result == 0) {
6846 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6847 duration_elapsed(&bench),
6848 duration_kbps(&bench, count * size));
6849 } else {
6850 command_print(CMD, "Compare failed");
6851 binprint(CMD, "ref:", read_ref, host_bufsiz);
6852 binprint(CMD, "buf:", read_buf, host_bufsiz);
6854 next:
6855 free(read_ref);
6856 free(read_buf);
6861 out:
6862 free(test_pattern);
6864 target_free_working_area(target, wa);
6866 /* Test writes */
6867 num_bytes = test_size + 4 + 4 + 4;
6869 retval = target_alloc_working_area(target, num_bytes, &wa);
6870 if (retval != ERROR_OK) {
6871 LOG_ERROR("Not enough working area");
6872 return ERROR_FAIL;
6875 test_pattern = malloc(num_bytes);
6877 for (size_t i = 0; i < num_bytes; i++)
6878 test_pattern[i] = rand();
6880 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6881 for (int size = 1; size <= 4; size *= 2) {
6882 for (int offset = 0; offset < 4; offset++) {
6883 uint32_t count = test_size / size;
6884 size_t host_bufsiz = count * size + host_offset;
6885 uint8_t *read_ref = malloc(num_bytes);
6886 uint8_t *read_buf = malloc(num_bytes);
6887 uint8_t *write_buf = malloc(host_bufsiz);
6889 for (size_t i = 0; i < host_bufsiz; i++)
6890 write_buf[i] = rand();
6891 command_print_sameline(CMD,
6892 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6893 size, offset, host_offset ? "un" : "");
6895 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6896 if (retval != ERROR_OK) {
6897 command_print(CMD, "Test pattern write failed");
6898 goto nextw;
6901 /* replay on host */
6902 memcpy(read_ref, test_pattern, num_bytes);
6903 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6905 struct duration bench;
6906 duration_start(&bench);
6908 retval = target_write_memory(target, wa->address + size + offset, size, count,
6909 write_buf + host_offset);
6911 duration_measure(&bench);
6913 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6914 command_print(CMD, "Unsupported alignment");
6915 goto nextw;
6916 } else if (retval != ERROR_OK) {
6917 command_print(CMD, "Memory write failed");
6918 goto nextw;
6921 /* read back */
6922 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6923 if (retval != ERROR_OK) {
6924 command_print(CMD, "Test pattern write failed");
6925 goto nextw;
6928 /* check result */
6929 int result = memcmp(read_ref, read_buf, num_bytes);
6930 if (result == 0) {
6931 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6932 duration_elapsed(&bench),
6933 duration_kbps(&bench, count * size));
6934 } else {
6935 command_print(CMD, "Compare failed");
6936 binprint(CMD, "ref:", read_ref, num_bytes);
6937 binprint(CMD, "buf:", read_buf, num_bytes);
6939 nextw:
6940 free(read_ref);
6941 free(read_buf);
6946 free(test_pattern);
6948 target_free_working_area(target, wa);
6949 return retval;
6952 static const struct command_registration target_exec_command_handlers[] = {
6954 .name = "fast_load_image",
6955 .handler = handle_fast_load_image_command,
6956 .mode = COMMAND_ANY,
6957 .help = "Load image into server memory for later use by "
6958 "fast_load; primarily for profiling",
6959 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6960 "[min_address [max_length]]",
6963 .name = "fast_load",
6964 .handler = handle_fast_load_command,
6965 .mode = COMMAND_EXEC,
6966 .help = "loads active fast load image to current target "
6967 "- mainly for profiling purposes",
6968 .usage = "",
6971 .name = "profile",
6972 .handler = handle_profile_command,
6973 .mode = COMMAND_EXEC,
6974 .usage = "seconds filename [start end]",
6975 .help = "profiling samples the CPU PC",
6977 /** @todo don't register virt2phys() unless target supports it */
6979 .name = "virt2phys",
6980 .handler = handle_virt2phys_command,
6981 .mode = COMMAND_ANY,
6982 .help = "translate a virtual address into a physical address",
6983 .usage = "virtual_address",
6986 .name = "reg",
6987 .handler = handle_reg_command,
6988 .mode = COMMAND_EXEC,
6989 .help = "display (reread from target with \"force\") or set a register; "
6990 "with no arguments, displays all registers and their values",
6991 .usage = "[(register_number|register_name) [(value|'force')]]",
6994 .name = "poll",
6995 .handler = handle_poll_command,
6996 .mode = COMMAND_EXEC,
6997 .help = "poll target state; or reconfigure background polling",
6998 .usage = "['on'|'off']",
7001 .name = "wait_halt",
7002 .handler = handle_wait_halt_command,
7003 .mode = COMMAND_EXEC,
7004 .help = "wait up to the specified number of milliseconds "
7005 "(default 5000) for a previously requested halt",
7006 .usage = "[milliseconds]",
7009 .name = "halt",
7010 .handler = handle_halt_command,
7011 .mode = COMMAND_EXEC,
7012 .help = "request target to halt, then wait up to the specified "
7013 "number of milliseconds (default 5000) for it to complete",
7014 .usage = "[milliseconds]",
7017 .name = "resume",
7018 .handler = handle_resume_command,
7019 .mode = COMMAND_EXEC,
7020 .help = "resume target execution from current PC or address",
7021 .usage = "[address]",
7024 .name = "reset",
7025 .handler = handle_reset_command,
7026 .mode = COMMAND_EXEC,
7027 .usage = "[run|halt|init]",
7028 .help = "Reset all targets into the specified mode. "
7029 "Default reset mode is run, if not given.",
7032 .name = "soft_reset_halt",
7033 .handler = handle_soft_reset_halt_command,
7034 .mode = COMMAND_EXEC,
7035 .usage = "",
7036 .help = "halt the target and do a soft reset",
7039 .name = "step",
7040 .handler = handle_step_command,
7041 .mode = COMMAND_EXEC,
7042 .help = "step one instruction from current PC or address",
7043 .usage = "[address]",
7046 .name = "mdd",
7047 .handler = handle_md_command,
7048 .mode = COMMAND_EXEC,
7049 .help = "display memory double-words",
7050 .usage = "['phys'] address [count]",
7053 .name = "mdw",
7054 .handler = handle_md_command,
7055 .mode = COMMAND_EXEC,
7056 .help = "display memory words",
7057 .usage = "['phys'] address [count]",
7060 .name = "mdh",
7061 .handler = handle_md_command,
7062 .mode = COMMAND_EXEC,
7063 .help = "display memory half-words",
7064 .usage = "['phys'] address [count]",
7067 .name = "mdb",
7068 .handler = handle_md_command,
7069 .mode = COMMAND_EXEC,
7070 .help = "display memory bytes",
7071 .usage = "['phys'] address [count]",
7074 .name = "mwd",
7075 .handler = handle_mw_command,
7076 .mode = COMMAND_EXEC,
7077 .help = "write memory double-word",
7078 .usage = "['phys'] address value [count]",
7081 .name = "mww",
7082 .handler = handle_mw_command,
7083 .mode = COMMAND_EXEC,
7084 .help = "write memory word",
7085 .usage = "['phys'] address value [count]",
7088 .name = "mwh",
7089 .handler = handle_mw_command,
7090 .mode = COMMAND_EXEC,
7091 .help = "write memory half-word",
7092 .usage = "['phys'] address value [count]",
7095 .name = "mwb",
7096 .handler = handle_mw_command,
7097 .mode = COMMAND_EXEC,
7098 .help = "write memory byte",
7099 .usage = "['phys'] address value [count]",
7102 .name = "bp",
7103 .handler = handle_bp_command,
7104 .mode = COMMAND_EXEC,
7105 .help = "list or set hardware or software breakpoint",
7106 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
7109 .name = "rbp",
7110 .handler = handle_rbp_command,
7111 .mode = COMMAND_EXEC,
7112 .help = "remove breakpoint",
7113 .usage = "'all' | address",
7116 .name = "wp",
7117 .handler = handle_wp_command,
7118 .mode = COMMAND_EXEC,
7119 .help = "list (no params) or create watchpoints",
7120 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
7123 .name = "rwp",
7124 .handler = handle_rwp_command,
7125 .mode = COMMAND_EXEC,
7126 .help = "remove watchpoint",
7127 .usage = "address",
7130 .name = "load_image",
7131 .handler = handle_load_image_command,
7132 .mode = COMMAND_EXEC,
7133 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
7134 "[min_address] [max_length]",
7137 .name = "dump_image",
7138 .handler = handle_dump_image_command,
7139 .mode = COMMAND_EXEC,
7140 .usage = "filename address size",
7143 .name = "verify_image_checksum",
7144 .handler = handle_verify_image_checksum_command,
7145 .mode = COMMAND_EXEC,
7146 .usage = "filename [offset [type]]",
7149 .name = "verify_image",
7150 .handler = handle_verify_image_command,
7151 .mode = COMMAND_EXEC,
7152 .usage = "filename [offset [type]]",
7155 .name = "test_image",
7156 .handler = handle_test_image_command,
7157 .mode = COMMAND_EXEC,
7158 .usage = "filename [offset [type]]",
7161 .name = "get_reg",
7162 .mode = COMMAND_EXEC,
7163 .jim_handler = target_jim_get_reg,
7164 .help = "Get register values from the target",
7165 .usage = "list",
7168 .name = "set_reg",
7169 .mode = COMMAND_EXEC,
7170 .jim_handler = target_jim_set_reg,
7171 .help = "Set target register values",
7172 .usage = "dict",
7175 .name = "read_memory",
7176 .mode = COMMAND_EXEC,
7177 .handler = handle_target_read_memory,
7178 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
7179 .usage = "address width count ['phys']",
7182 .name = "write_memory",
7183 .mode = COMMAND_EXEC,
7184 .jim_handler = target_jim_write_memory,
7185 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
7186 .usage = "address width data ['phys']",
7189 .name = "reset_nag",
7190 .handler = handle_target_reset_nag,
7191 .mode = COMMAND_ANY,
7192 .help = "Nag after each reset about options that could have been "
7193 "enabled to improve performance.",
7194 .usage = "['enable'|'disable']",
7197 .name = "ps",
7198 .handler = handle_ps_command,
7199 .mode = COMMAND_EXEC,
7200 .help = "list all tasks",
7201 .usage = "",
7204 .name = "test_mem_access",
7205 .handler = handle_test_mem_access_command,
7206 .mode = COMMAND_EXEC,
7207 .help = "Test the target's memory access functions",
7208 .usage = "size",
7211 COMMAND_REGISTRATION_DONE
7213 static int target_register_user_commands(struct command_context *cmd_ctx)
7215 int retval = ERROR_OK;
7216 retval = target_request_register_commands(cmd_ctx);
7217 if (retval != ERROR_OK)
7218 return retval;
7220 retval = trace_register_commands(cmd_ctx);
7221 if (retval != ERROR_OK)
7222 return retval;
7225 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);