target/tcl: Add get_reg function
[openocd.git] / src / target / target.c
blobb72dc53e3e4f57f87d811dadaea944cc0a1df2a2
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
44 #include <helper/align.h>
45 #include <helper/time_support.h>
46 #include <jtag/jtag.h>
47 #include <flash/nor/core.h>
49 #include "target.h"
50 #include "target_type.h"
51 #include "target_request.h"
52 #include "breakpoints.h"
53 #include "register.h"
54 #include "trace.h"
55 #include "image.h"
56 #include "rtos/rtos.h"
57 #include "transport/transport.h"
58 #include "arm_cti.h"
59 #include "smp.h"
61 /* default halt wait timeout (ms) */
62 #define DEFAULT_HALT_TIMEOUT 5000
64 static int target_read_buffer_default(struct target *target, target_addr_t address,
65 uint32_t count, uint8_t *buffer);
66 static int target_write_buffer_default(struct target *target, target_addr_t address,
67 uint32_t count, const uint8_t *buffer);
68 static int target_array2mem(Jim_Interp *interp, struct target *target,
69 int argc, Jim_Obj * const *argv);
70 static int target_mem2array(Jim_Interp *interp, struct target *target,
71 int argc, Jim_Obj * const *argv);
72 static int target_register_user_commands(struct command_context *cmd_ctx);
73 static int target_get_gdb_fileio_info_default(struct target *target,
74 struct gdb_fileio_info *fileio_info);
75 static int target_gdb_fileio_end_default(struct target *target, int retcode,
76 int fileio_errno, bool ctrl_c);
78 /* targets */
79 extern struct target_type arm7tdmi_target;
80 extern struct target_type arm720t_target;
81 extern struct target_type arm9tdmi_target;
82 extern struct target_type arm920t_target;
83 extern struct target_type arm966e_target;
84 extern struct target_type arm946e_target;
85 extern struct target_type arm926ejs_target;
86 extern struct target_type fa526_target;
87 extern struct target_type feroceon_target;
88 extern struct target_type dragonite_target;
89 extern struct target_type xscale_target;
90 extern struct target_type cortexm_target;
91 extern struct target_type cortexa_target;
92 extern struct target_type aarch64_target;
93 extern struct target_type cortexr4_target;
94 extern struct target_type arm11_target;
95 extern struct target_type ls1_sap_target;
96 extern struct target_type mips_m4k_target;
97 extern struct target_type mips_mips64_target;
98 extern struct target_type avr_target;
99 extern struct target_type dsp563xx_target;
100 extern struct target_type dsp5680xx_target;
101 extern struct target_type testee_target;
102 extern struct target_type avr32_ap7k_target;
103 extern struct target_type hla_target;
104 extern struct target_type nds32_v2_target;
105 extern struct target_type nds32_v3_target;
106 extern struct target_type nds32_v3m_target;
107 extern struct target_type or1k_target;
108 extern struct target_type quark_x10xx_target;
109 extern struct target_type quark_d20xx_target;
110 extern struct target_type stm8_target;
111 extern struct target_type riscv_target;
112 extern struct target_type mem_ap_target;
113 extern struct target_type esirisc_target;
114 extern struct target_type arcv2_target;
116 static struct target_type *target_types[] = {
117 &arm7tdmi_target,
118 &arm9tdmi_target,
119 &arm920t_target,
120 &arm720t_target,
121 &arm966e_target,
122 &arm946e_target,
123 &arm926ejs_target,
124 &fa526_target,
125 &feroceon_target,
126 &dragonite_target,
127 &xscale_target,
128 &cortexm_target,
129 &cortexa_target,
130 &cortexr4_target,
131 &arm11_target,
132 &ls1_sap_target,
133 &mips_m4k_target,
134 &avr_target,
135 &dsp563xx_target,
136 &dsp5680xx_target,
137 &testee_target,
138 &avr32_ap7k_target,
139 &hla_target,
140 &nds32_v2_target,
141 &nds32_v3_target,
142 &nds32_v3m_target,
143 &or1k_target,
144 &quark_x10xx_target,
145 &quark_d20xx_target,
146 &stm8_target,
147 &riscv_target,
148 &mem_ap_target,
149 &esirisc_target,
150 &arcv2_target,
151 &aarch64_target,
152 &mips_mips64_target,
153 NULL,
156 struct target *all_targets;
157 static struct target_event_callback *target_event_callbacks;
158 static struct target_timer_callback *target_timer_callbacks;
159 static int64_t target_timer_next_event_value;
160 static LIST_HEAD(target_reset_callback_list);
161 static LIST_HEAD(target_trace_callback_list);
162 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
163 static LIST_HEAD(empty_smp_targets);
165 static const struct jim_nvp nvp_assert[] = {
166 { .name = "assert", NVP_ASSERT },
167 { .name = "deassert", NVP_DEASSERT },
168 { .name = "T", NVP_ASSERT },
169 { .name = "F", NVP_DEASSERT },
170 { .name = "t", NVP_ASSERT },
171 { .name = "f", NVP_DEASSERT },
172 { .name = NULL, .value = -1 }
175 static const struct jim_nvp nvp_error_target[] = {
176 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
177 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
178 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
179 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
180 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
181 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
182 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
183 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
184 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
185 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
186 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
187 { .value = -1, .name = NULL }
190 static const char *target_strerror_safe(int err)
192 const struct jim_nvp *n;
194 n = jim_nvp_value2name_simple(nvp_error_target, err);
195 if (!n->name)
196 return "unknown";
197 else
198 return n->name;
201 static const struct jim_nvp nvp_target_event[] = {
203 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
204 { .value = TARGET_EVENT_HALTED, .name = "halted" },
205 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
206 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
207 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
208 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
209 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
211 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
212 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
214 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
215 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
216 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
217 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
218 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
219 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
220 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
221 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
223 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
224 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
225 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
227 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
228 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
230 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
231 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
233 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
234 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
236 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
237 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
239 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
241 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x100, .name = "semihosting-user-cmd-0x100" },
242 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x101, .name = "semihosting-user-cmd-0x101" },
243 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x102, .name = "semihosting-user-cmd-0x102" },
244 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x103, .name = "semihosting-user-cmd-0x103" },
245 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x104, .name = "semihosting-user-cmd-0x104" },
246 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x105, .name = "semihosting-user-cmd-0x105" },
247 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x106, .name = "semihosting-user-cmd-0x106" },
248 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x107, .name = "semihosting-user-cmd-0x107" },
250 { .name = NULL, .value = -1 }
253 static const struct jim_nvp nvp_target_state[] = {
254 { .name = "unknown", .value = TARGET_UNKNOWN },
255 { .name = "running", .value = TARGET_RUNNING },
256 { .name = "halted", .value = TARGET_HALTED },
257 { .name = "reset", .value = TARGET_RESET },
258 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
259 { .name = NULL, .value = -1 },
262 static const struct jim_nvp nvp_target_debug_reason[] = {
263 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
264 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
265 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
266 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
267 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
268 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
269 { .name = "program-exit", .value = DBG_REASON_EXIT },
270 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
271 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
272 { .name = NULL, .value = -1 },
275 static const struct jim_nvp nvp_target_endian[] = {
276 { .name = "big", .value = TARGET_BIG_ENDIAN },
277 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
278 { .name = "be", .value = TARGET_BIG_ENDIAN },
279 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
280 { .name = NULL, .value = -1 },
283 static const struct jim_nvp nvp_reset_modes[] = {
284 { .name = "unknown", .value = RESET_UNKNOWN },
285 { .name = "run", .value = RESET_RUN },
286 { .name = "halt", .value = RESET_HALT },
287 { .name = "init", .value = RESET_INIT },
288 { .name = NULL, .value = -1 },
291 const char *debug_reason_name(struct target *t)
293 const char *cp;
295 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
296 t->debug_reason)->name;
297 if (!cp) {
298 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
299 cp = "(*BUG*unknown*BUG*)";
301 return cp;
304 const char *target_state_name(struct target *t)
306 const char *cp;
307 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
308 if (!cp) {
309 LOG_ERROR("Invalid target state: %d", (int)(t->state));
310 cp = "(*BUG*unknown*BUG*)";
313 if (!target_was_examined(t) && t->defer_examine)
314 cp = "examine deferred";
316 return cp;
319 const char *target_event_name(enum target_event event)
321 const char *cp;
322 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
323 if (!cp) {
324 LOG_ERROR("Invalid target event: %d", (int)(event));
325 cp = "(*BUG*unknown*BUG*)";
327 return cp;
330 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
332 const char *cp;
333 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
334 if (!cp) {
335 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
336 cp = "(*BUG*unknown*BUG*)";
338 return cp;
341 /* determine the number of the new target */
342 static int new_target_number(void)
344 struct target *t;
345 int x;
347 /* number is 0 based */
348 x = -1;
349 t = all_targets;
350 while (t) {
351 if (x < t->target_number)
352 x = t->target_number;
353 t = t->next;
355 return x + 1;
358 static void append_to_list_all_targets(struct target *target)
360 struct target **t = &all_targets;
362 while (*t)
363 t = &((*t)->next);
364 *t = target;
367 /* read a uint64_t from a buffer in target memory endianness */
368 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
370 if (target->endianness == TARGET_LITTLE_ENDIAN)
371 return le_to_h_u64(buffer);
372 else
373 return be_to_h_u64(buffer);
376 /* read a uint32_t from a buffer in target memory endianness */
377 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
379 if (target->endianness == TARGET_LITTLE_ENDIAN)
380 return le_to_h_u32(buffer);
381 else
382 return be_to_h_u32(buffer);
385 /* read a uint24_t from a buffer in target memory endianness */
386 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
388 if (target->endianness == TARGET_LITTLE_ENDIAN)
389 return le_to_h_u24(buffer);
390 else
391 return be_to_h_u24(buffer);
394 /* read a uint16_t from a buffer in target memory endianness */
395 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
397 if (target->endianness == TARGET_LITTLE_ENDIAN)
398 return le_to_h_u16(buffer);
399 else
400 return be_to_h_u16(buffer);
403 /* write a uint64_t to a buffer in target memory endianness */
404 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
406 if (target->endianness == TARGET_LITTLE_ENDIAN)
407 h_u64_to_le(buffer, value);
408 else
409 h_u64_to_be(buffer, value);
412 /* write a uint32_t to a buffer in target memory endianness */
413 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
415 if (target->endianness == TARGET_LITTLE_ENDIAN)
416 h_u32_to_le(buffer, value);
417 else
418 h_u32_to_be(buffer, value);
421 /* write a uint24_t to a buffer in target memory endianness */
422 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
424 if (target->endianness == TARGET_LITTLE_ENDIAN)
425 h_u24_to_le(buffer, value);
426 else
427 h_u24_to_be(buffer, value);
430 /* write a uint16_t to a buffer in target memory endianness */
431 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
433 if (target->endianness == TARGET_LITTLE_ENDIAN)
434 h_u16_to_le(buffer, value);
435 else
436 h_u16_to_be(buffer, value);
439 /* write a uint8_t to a buffer in target memory endianness */
440 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
442 *buffer = value;
445 /* write a uint64_t array to a buffer in target memory endianness */
446 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
448 uint32_t i;
449 for (i = 0; i < count; i++)
450 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
453 /* write a uint32_t array to a buffer in target memory endianness */
454 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
456 uint32_t i;
457 for (i = 0; i < count; i++)
458 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
461 /* write a uint16_t array to a buffer in target memory endianness */
462 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
464 uint32_t i;
465 for (i = 0; i < count; i++)
466 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
469 /* write a uint64_t array to a buffer in target memory endianness */
470 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
472 uint32_t i;
473 for (i = 0; i < count; i++)
474 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
477 /* write a uint32_t array to a buffer in target memory endianness */
478 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
480 uint32_t i;
481 for (i = 0; i < count; i++)
482 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
485 /* write a uint16_t array to a buffer in target memory endianness */
486 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
488 uint32_t i;
489 for (i = 0; i < count; i++)
490 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
493 /* return a pointer to a configured target; id is name or number */
494 struct target *get_target(const char *id)
496 struct target *target;
498 /* try as tcltarget name */
499 for (target = all_targets; target; target = target->next) {
500 if (!target_name(target))
501 continue;
502 if (strcmp(id, target_name(target)) == 0)
503 return target;
506 /* It's OK to remove this fallback sometime after August 2010 or so */
508 /* no match, try as number */
509 unsigned num;
510 if (parse_uint(id, &num) != ERROR_OK)
511 return NULL;
513 for (target = all_targets; target; target = target->next) {
514 if (target->target_number == (int)num) {
515 LOG_WARNING("use '%s' as target identifier, not '%u'",
516 target_name(target), num);
517 return target;
521 return NULL;
524 /* returns a pointer to the n-th configured target */
525 struct target *get_target_by_num(int num)
527 struct target *target = all_targets;
529 while (target) {
530 if (target->target_number == num)
531 return target;
532 target = target->next;
535 return NULL;
538 struct target *get_current_target(struct command_context *cmd_ctx)
540 struct target *target = get_current_target_or_null(cmd_ctx);
542 if (!target) {
543 LOG_ERROR("BUG: current_target out of bounds");
544 exit(-1);
547 return target;
550 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
552 return cmd_ctx->current_target_override
553 ? cmd_ctx->current_target_override
554 : cmd_ctx->current_target;
557 int target_poll(struct target *target)
559 int retval;
561 /* We can't poll until after examine */
562 if (!target_was_examined(target)) {
563 /* Fail silently lest we pollute the log */
564 return ERROR_FAIL;
567 retval = target->type->poll(target);
568 if (retval != ERROR_OK)
569 return retval;
571 if (target->halt_issued) {
572 if (target->state == TARGET_HALTED)
573 target->halt_issued = false;
574 else {
575 int64_t t = timeval_ms() - target->halt_issued_time;
576 if (t > DEFAULT_HALT_TIMEOUT) {
577 target->halt_issued = false;
578 LOG_INFO("Halt timed out, wake up GDB.");
579 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
584 return ERROR_OK;
587 int target_halt(struct target *target)
589 int retval;
590 /* We can't poll until after examine */
591 if (!target_was_examined(target)) {
592 LOG_ERROR("Target not examined yet");
593 return ERROR_FAIL;
596 retval = target->type->halt(target);
597 if (retval != ERROR_OK)
598 return retval;
600 target->halt_issued = true;
601 target->halt_issued_time = timeval_ms();
603 return ERROR_OK;
607 * Make the target (re)start executing using its saved execution
608 * context (possibly with some modifications).
610 * @param target Which target should start executing.
611 * @param current True to use the target's saved program counter instead
612 * of the address parameter
613 * @param address Optionally used as the program counter.
614 * @param handle_breakpoints True iff breakpoints at the resumption PC
615 * should be skipped. (For example, maybe execution was stopped by
616 * such a breakpoint, in which case it would be counterproductive to
617 * let it re-trigger.
618 * @param debug_execution False if all working areas allocated by OpenOCD
619 * should be released and/or restored to their original contents.
620 * (This would for example be true to run some downloaded "helper"
621 * algorithm code, which resides in one such working buffer and uses
622 * another for data storage.)
624 * @todo Resolve the ambiguity about what the "debug_execution" flag
625 * signifies. For example, Target implementations don't agree on how
626 * it relates to invalidation of the register cache, or to whether
627 * breakpoints and watchpoints should be enabled. (It would seem wrong
628 * to enable breakpoints when running downloaded "helper" algorithms
629 * (debug_execution true), since the breakpoints would be set to match
630 * target firmware being debugged, not the helper algorithm.... and
631 * enabling them could cause such helpers to malfunction (for example,
632 * by overwriting data with a breakpoint instruction. On the other
633 * hand the infrastructure for running such helpers might use this
634 * procedure but rely on hardware breakpoint to detect termination.)
636 int target_resume(struct target *target, int current, target_addr_t address,
637 int handle_breakpoints, int debug_execution)
639 int retval;
641 /* We can't poll until after examine */
642 if (!target_was_examined(target)) {
643 LOG_ERROR("Target not examined yet");
644 return ERROR_FAIL;
647 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
649 /* note that resume *must* be asynchronous. The CPU can halt before
650 * we poll. The CPU can even halt at the current PC as a result of
651 * a software breakpoint being inserted by (a bug?) the application.
654 * resume() triggers the event 'resumed'. The execution of TCL commands
655 * in the event handler causes the polling of targets. If the target has
656 * already halted for a breakpoint, polling will run the 'halted' event
657 * handler before the pending 'resumed' handler.
658 * Disable polling during resume() to guarantee the execution of handlers
659 * in the correct order.
661 bool save_poll = jtag_poll_get_enabled();
662 jtag_poll_set_enabled(false);
663 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
664 jtag_poll_set_enabled(save_poll);
665 if (retval != ERROR_OK)
666 return retval;
668 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
670 return retval;
673 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
675 char buf[100];
676 int retval;
677 struct jim_nvp *n;
678 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
679 if (!n->name) {
680 LOG_ERROR("invalid reset mode");
681 return ERROR_FAIL;
684 struct target *target;
685 for (target = all_targets; target; target = target->next)
686 target_call_reset_callbacks(target, reset_mode);
688 /* disable polling during reset to make reset event scripts
689 * more predictable, i.e. dr/irscan & pathmove in events will
690 * not have JTAG operations injected into the middle of a sequence.
692 bool save_poll = jtag_poll_get_enabled();
694 jtag_poll_set_enabled(false);
696 sprintf(buf, "ocd_process_reset %s", n->name);
697 retval = Jim_Eval(cmd->ctx->interp, buf);
699 jtag_poll_set_enabled(save_poll);
701 if (retval != JIM_OK) {
702 Jim_MakeErrorMessage(cmd->ctx->interp);
703 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
704 return ERROR_FAIL;
707 /* We want any events to be processed before the prompt */
708 retval = target_call_timer_callbacks_now();
710 for (target = all_targets; target; target = target->next) {
711 target->type->check_reset(target);
712 target->running_alg = false;
715 return retval;
718 static int identity_virt2phys(struct target *target,
719 target_addr_t virtual, target_addr_t *physical)
721 *physical = virtual;
722 return ERROR_OK;
725 static int no_mmu(struct target *target, int *enabled)
727 *enabled = 0;
728 return ERROR_OK;
732 * Reset the @c examined flag for the given target.
733 * Pure paranoia -- targets are zeroed on allocation.
735 static inline void target_reset_examined(struct target *target)
737 target->examined = false;
740 static int default_examine(struct target *target)
742 target_set_examined(target);
743 return ERROR_OK;
746 /* no check by default */
747 static int default_check_reset(struct target *target)
749 return ERROR_OK;
752 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
753 * Keep in sync */
754 int target_examine_one(struct target *target)
756 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
758 int retval = target->type->examine(target);
759 if (retval != ERROR_OK) {
760 target_reset_examined(target);
761 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
762 return retval;
765 target_set_examined(target);
766 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
768 return ERROR_OK;
771 static int jtag_enable_callback(enum jtag_event event, void *priv)
773 struct target *target = priv;
775 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
776 return ERROR_OK;
778 jtag_unregister_event_callback(jtag_enable_callback, target);
780 return target_examine_one(target);
783 /* Targets that correctly implement init + examine, i.e.
784 * no communication with target during init:
786 * XScale
788 int target_examine(void)
790 int retval = ERROR_OK;
791 struct target *target;
793 for (target = all_targets; target; target = target->next) {
794 /* defer examination, but don't skip it */
795 if (!target->tap->enabled) {
796 jtag_register_event_callback(jtag_enable_callback,
797 target);
798 continue;
801 if (target->defer_examine)
802 continue;
804 int retval2 = target_examine_one(target);
805 if (retval2 != ERROR_OK) {
806 LOG_WARNING("target %s examination failed", target_name(target));
807 retval = retval2;
810 return retval;
813 const char *target_type_name(struct target *target)
815 return target->type->name;
818 static int target_soft_reset_halt(struct target *target)
820 if (!target_was_examined(target)) {
821 LOG_ERROR("Target not examined yet");
822 return ERROR_FAIL;
824 if (!target->type->soft_reset_halt) {
825 LOG_ERROR("Target %s does not support soft_reset_halt",
826 target_name(target));
827 return ERROR_FAIL;
829 return target->type->soft_reset_halt(target);
833 * Downloads a target-specific native code algorithm to the target,
834 * and executes it. * Note that some targets may need to set up, enable,
835 * and tear down a breakpoint (hard or * soft) to detect algorithm
836 * termination, while others may support lower overhead schemes where
837 * soft breakpoints embedded in the algorithm automatically terminate the
838 * algorithm.
840 * @param target used to run the algorithm
841 * @param num_mem_params
842 * @param mem_params
843 * @param num_reg_params
844 * @param reg_param
845 * @param entry_point
846 * @param exit_point
847 * @param timeout_ms
848 * @param arch_info target-specific description of the algorithm.
850 int target_run_algorithm(struct target *target,
851 int num_mem_params, struct mem_param *mem_params,
852 int num_reg_params, struct reg_param *reg_param,
853 target_addr_t entry_point, target_addr_t exit_point,
854 int timeout_ms, void *arch_info)
856 int retval = ERROR_FAIL;
858 if (!target_was_examined(target)) {
859 LOG_ERROR("Target not examined yet");
860 goto done;
862 if (!target->type->run_algorithm) {
863 LOG_ERROR("Target type '%s' does not support %s",
864 target_type_name(target), __func__);
865 goto done;
868 target->running_alg = true;
869 retval = target->type->run_algorithm(target,
870 num_mem_params, mem_params,
871 num_reg_params, reg_param,
872 entry_point, exit_point, timeout_ms, arch_info);
873 target->running_alg = false;
875 done:
876 return retval;
880 * Executes a target-specific native code algorithm and leaves it running.
882 * @param target used to run the algorithm
883 * @param num_mem_params
884 * @param mem_params
885 * @param num_reg_params
886 * @param reg_params
887 * @param entry_point
888 * @param exit_point
889 * @param arch_info target-specific description of the algorithm.
891 int target_start_algorithm(struct target *target,
892 int num_mem_params, struct mem_param *mem_params,
893 int num_reg_params, struct reg_param *reg_params,
894 target_addr_t entry_point, target_addr_t exit_point,
895 void *arch_info)
897 int retval = ERROR_FAIL;
899 if (!target_was_examined(target)) {
900 LOG_ERROR("Target not examined yet");
901 goto done;
903 if (!target->type->start_algorithm) {
904 LOG_ERROR("Target type '%s' does not support %s",
905 target_type_name(target), __func__);
906 goto done;
908 if (target->running_alg) {
909 LOG_ERROR("Target is already running an algorithm");
910 goto done;
913 target->running_alg = true;
914 retval = target->type->start_algorithm(target,
915 num_mem_params, mem_params,
916 num_reg_params, reg_params,
917 entry_point, exit_point, arch_info);
919 done:
920 return retval;
924 * Waits for an algorithm started with target_start_algorithm() to complete.
926 * @param target used to run the algorithm
927 * @param num_mem_params
928 * @param mem_params
929 * @param num_reg_params
930 * @param reg_params
931 * @param exit_point
932 * @param timeout_ms
933 * @param arch_info target-specific description of the algorithm.
935 int target_wait_algorithm(struct target *target,
936 int num_mem_params, struct mem_param *mem_params,
937 int num_reg_params, struct reg_param *reg_params,
938 target_addr_t exit_point, int timeout_ms,
939 void *arch_info)
941 int retval = ERROR_FAIL;
943 if (!target->type->wait_algorithm) {
944 LOG_ERROR("Target type '%s' does not support %s",
945 target_type_name(target), __func__);
946 goto done;
948 if (!target->running_alg) {
949 LOG_ERROR("Target is not running an algorithm");
950 goto done;
953 retval = target->type->wait_algorithm(target,
954 num_mem_params, mem_params,
955 num_reg_params, reg_params,
956 exit_point, timeout_ms, arch_info);
957 if (retval != ERROR_TARGET_TIMEOUT)
958 target->running_alg = false;
960 done:
961 return retval;
965 * Streams data to a circular buffer on target intended for consumption by code
966 * running asynchronously on target.
968 * This is intended for applications where target-specific native code runs
969 * on the target, receives data from the circular buffer, does something with
970 * it (most likely writing it to a flash memory), and advances the circular
971 * buffer pointer.
973 * This assumes that the helper algorithm has already been loaded to the target,
974 * but has not been started yet. Given memory and register parameters are passed
975 * to the algorithm.
977 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
978 * following format:
980 * [buffer_start + 0, buffer_start + 4):
981 * Write Pointer address (aka head). Written and updated by this
982 * routine when new data is written to the circular buffer.
983 * [buffer_start + 4, buffer_start + 8):
984 * Read Pointer address (aka tail). Updated by code running on the
985 * target after it consumes data.
986 * [buffer_start + 8, buffer_start + buffer_size):
987 * Circular buffer contents.
989 * See contrib/loaders/flash/stm32f1x.S for an example.
991 * @param target used to run the algorithm
992 * @param buffer address on the host where data to be sent is located
993 * @param count number of blocks to send
994 * @param block_size size in bytes of each block
995 * @param num_mem_params count of memory-based params to pass to algorithm
996 * @param mem_params memory-based params to pass to algorithm
997 * @param num_reg_params count of register-based params to pass to algorithm
998 * @param reg_params memory-based params to pass to algorithm
999 * @param buffer_start address on the target of the circular buffer structure
1000 * @param buffer_size size of the circular buffer structure
1001 * @param entry_point address on the target to execute to start the algorithm
1002 * @param exit_point address at which to set a breakpoint to catch the
1003 * end of the algorithm; can be 0 if target triggers a breakpoint itself
1004 * @param arch_info
1007 int target_run_flash_async_algorithm(struct target *target,
1008 const uint8_t *buffer, uint32_t count, int block_size,
1009 int num_mem_params, struct mem_param *mem_params,
1010 int num_reg_params, struct reg_param *reg_params,
1011 uint32_t buffer_start, uint32_t buffer_size,
1012 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1014 int retval;
1015 int timeout = 0;
1017 const uint8_t *buffer_orig = buffer;
1019 /* Set up working area. First word is write pointer, second word is read pointer,
1020 * rest is fifo data area. */
1021 uint32_t wp_addr = buffer_start;
1022 uint32_t rp_addr = buffer_start + 4;
1023 uint32_t fifo_start_addr = buffer_start + 8;
1024 uint32_t fifo_end_addr = buffer_start + buffer_size;
1026 uint32_t wp = fifo_start_addr;
1027 uint32_t rp = fifo_start_addr;
1029 /* validate block_size is 2^n */
1030 assert(IS_PWR_OF_2(block_size));
1032 retval = target_write_u32(target, wp_addr, wp);
1033 if (retval != ERROR_OK)
1034 return retval;
1035 retval = target_write_u32(target, rp_addr, rp);
1036 if (retval != ERROR_OK)
1037 return retval;
1039 /* Start up algorithm on target and let it idle while writing the first chunk */
1040 retval = target_start_algorithm(target, num_mem_params, mem_params,
1041 num_reg_params, reg_params,
1042 entry_point,
1043 exit_point,
1044 arch_info);
1046 if (retval != ERROR_OK) {
1047 LOG_ERROR("error starting target flash write algorithm");
1048 return retval;
1051 while (count > 0) {
1053 retval = target_read_u32(target, rp_addr, &rp);
1054 if (retval != ERROR_OK) {
1055 LOG_ERROR("failed to get read pointer");
1056 break;
1059 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1060 (size_t) (buffer - buffer_orig), count, wp, rp);
1062 if (rp == 0) {
1063 LOG_ERROR("flash write algorithm aborted by target");
1064 retval = ERROR_FLASH_OPERATION_FAILED;
1065 break;
1068 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1069 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1070 break;
1073 /* Count the number of bytes available in the fifo without
1074 * crossing the wrap around. Make sure to not fill it completely,
1075 * because that would make wp == rp and that's the empty condition. */
1076 uint32_t thisrun_bytes;
1077 if (rp > wp)
1078 thisrun_bytes = rp - wp - block_size;
1079 else if (rp > fifo_start_addr)
1080 thisrun_bytes = fifo_end_addr - wp;
1081 else
1082 thisrun_bytes = fifo_end_addr - wp - block_size;
1084 if (thisrun_bytes == 0) {
1085 /* Throttle polling a bit if transfer is (much) faster than flash
1086 * programming. The exact delay shouldn't matter as long as it's
1087 * less than buffer size / flash speed. This is very unlikely to
1088 * run when using high latency connections such as USB. */
1089 alive_sleep(2);
1091 /* to stop an infinite loop on some targets check and increment a timeout
1092 * this issue was observed on a stellaris using the new ICDI interface */
1093 if (timeout++ >= 2500) {
1094 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1095 return ERROR_FLASH_OPERATION_FAILED;
1097 continue;
1100 /* reset our timeout */
1101 timeout = 0;
1103 /* Limit to the amount of data we actually want to write */
1104 if (thisrun_bytes > count * block_size)
1105 thisrun_bytes = count * block_size;
1107 /* Force end of large blocks to be word aligned */
1108 if (thisrun_bytes >= 16)
1109 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1111 /* Write data to fifo */
1112 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1113 if (retval != ERROR_OK)
1114 break;
1116 /* Update counters and wrap write pointer */
1117 buffer += thisrun_bytes;
1118 count -= thisrun_bytes / block_size;
1119 wp += thisrun_bytes;
1120 if (wp >= fifo_end_addr)
1121 wp = fifo_start_addr;
1123 /* Store updated write pointer to target */
1124 retval = target_write_u32(target, wp_addr, wp);
1125 if (retval != ERROR_OK)
1126 break;
1128 /* Avoid GDB timeouts */
1129 keep_alive();
1132 if (retval != ERROR_OK) {
1133 /* abort flash write algorithm on target */
1134 target_write_u32(target, wp_addr, 0);
1137 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1138 num_reg_params, reg_params,
1139 exit_point,
1140 10000,
1141 arch_info);
1143 if (retval2 != ERROR_OK) {
1144 LOG_ERROR("error waiting for target flash write algorithm");
1145 retval = retval2;
1148 if (retval == ERROR_OK) {
1149 /* check if algorithm set rp = 0 after fifo writer loop finished */
1150 retval = target_read_u32(target, rp_addr, &rp);
1151 if (retval == ERROR_OK && rp == 0) {
1152 LOG_ERROR("flash write algorithm aborted by target");
1153 retval = ERROR_FLASH_OPERATION_FAILED;
1157 return retval;
1160 int target_run_read_async_algorithm(struct target *target,
1161 uint8_t *buffer, uint32_t count, int block_size,
1162 int num_mem_params, struct mem_param *mem_params,
1163 int num_reg_params, struct reg_param *reg_params,
1164 uint32_t buffer_start, uint32_t buffer_size,
1165 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1167 int retval;
1168 int timeout = 0;
1170 const uint8_t *buffer_orig = buffer;
1172 /* Set up working area. First word is write pointer, second word is read pointer,
1173 * rest is fifo data area. */
1174 uint32_t wp_addr = buffer_start;
1175 uint32_t rp_addr = buffer_start + 4;
1176 uint32_t fifo_start_addr = buffer_start + 8;
1177 uint32_t fifo_end_addr = buffer_start + buffer_size;
1179 uint32_t wp = fifo_start_addr;
1180 uint32_t rp = fifo_start_addr;
1182 /* validate block_size is 2^n */
1183 assert(IS_PWR_OF_2(block_size));
1185 retval = target_write_u32(target, wp_addr, wp);
1186 if (retval != ERROR_OK)
1187 return retval;
1188 retval = target_write_u32(target, rp_addr, rp);
1189 if (retval != ERROR_OK)
1190 return retval;
1192 /* Start up algorithm on target */
1193 retval = target_start_algorithm(target, num_mem_params, mem_params,
1194 num_reg_params, reg_params,
1195 entry_point,
1196 exit_point,
1197 arch_info);
1199 if (retval != ERROR_OK) {
1200 LOG_ERROR("error starting target flash read algorithm");
1201 return retval;
1204 while (count > 0) {
1205 retval = target_read_u32(target, wp_addr, &wp);
1206 if (retval != ERROR_OK) {
1207 LOG_ERROR("failed to get write pointer");
1208 break;
1211 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1212 (size_t)(buffer - buffer_orig), count, wp, rp);
1214 if (wp == 0) {
1215 LOG_ERROR("flash read algorithm aborted by target");
1216 retval = ERROR_FLASH_OPERATION_FAILED;
1217 break;
1220 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1221 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1222 break;
1225 /* Count the number of bytes available in the fifo without
1226 * crossing the wrap around. */
1227 uint32_t thisrun_bytes;
1228 if (wp >= rp)
1229 thisrun_bytes = wp - rp;
1230 else
1231 thisrun_bytes = fifo_end_addr - rp;
1233 if (thisrun_bytes == 0) {
1234 /* Throttle polling a bit if transfer is (much) faster than flash
1235 * reading. The exact delay shouldn't matter as long as it's
1236 * less than buffer size / flash speed. This is very unlikely to
1237 * run when using high latency connections such as USB. */
1238 alive_sleep(2);
1240 /* to stop an infinite loop on some targets check and increment a timeout
1241 * this issue was observed on a stellaris using the new ICDI interface */
1242 if (timeout++ >= 2500) {
1243 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1244 return ERROR_FLASH_OPERATION_FAILED;
1246 continue;
1249 /* Reset our timeout */
1250 timeout = 0;
1252 /* Limit to the amount of data we actually want to read */
1253 if (thisrun_bytes > count * block_size)
1254 thisrun_bytes = count * block_size;
1256 /* Force end of large blocks to be word aligned */
1257 if (thisrun_bytes >= 16)
1258 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1260 /* Read data from fifo */
1261 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1262 if (retval != ERROR_OK)
1263 break;
1265 /* Update counters and wrap write pointer */
1266 buffer += thisrun_bytes;
1267 count -= thisrun_bytes / block_size;
1268 rp += thisrun_bytes;
1269 if (rp >= fifo_end_addr)
1270 rp = fifo_start_addr;
1272 /* Store updated write pointer to target */
1273 retval = target_write_u32(target, rp_addr, rp);
1274 if (retval != ERROR_OK)
1275 break;
1277 /* Avoid GDB timeouts */
1278 keep_alive();
1282 if (retval != ERROR_OK) {
1283 /* abort flash write algorithm on target */
1284 target_write_u32(target, rp_addr, 0);
1287 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1288 num_reg_params, reg_params,
1289 exit_point,
1290 10000,
1291 arch_info);
1293 if (retval2 != ERROR_OK) {
1294 LOG_ERROR("error waiting for target flash write algorithm");
1295 retval = retval2;
1298 if (retval == ERROR_OK) {
1299 /* check if algorithm set wp = 0 after fifo writer loop finished */
1300 retval = target_read_u32(target, wp_addr, &wp);
1301 if (retval == ERROR_OK && wp == 0) {
1302 LOG_ERROR("flash read algorithm aborted by target");
1303 retval = ERROR_FLASH_OPERATION_FAILED;
1307 return retval;
1310 int target_read_memory(struct target *target,
1311 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1313 if (!target_was_examined(target)) {
1314 LOG_ERROR("Target not examined yet");
1315 return ERROR_FAIL;
1317 if (!target->type->read_memory) {
1318 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1319 return ERROR_FAIL;
1321 return target->type->read_memory(target, address, size, count, buffer);
1324 int target_read_phys_memory(struct target *target,
1325 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1327 if (!target_was_examined(target)) {
1328 LOG_ERROR("Target not examined yet");
1329 return ERROR_FAIL;
1331 if (!target->type->read_phys_memory) {
1332 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1333 return ERROR_FAIL;
1335 return target->type->read_phys_memory(target, address, size, count, buffer);
1338 int target_write_memory(struct target *target,
1339 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1341 if (!target_was_examined(target)) {
1342 LOG_ERROR("Target not examined yet");
1343 return ERROR_FAIL;
1345 if (!target->type->write_memory) {
1346 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1347 return ERROR_FAIL;
1349 return target->type->write_memory(target, address, size, count, buffer);
1352 int target_write_phys_memory(struct target *target,
1353 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1355 if (!target_was_examined(target)) {
1356 LOG_ERROR("Target not examined yet");
1357 return ERROR_FAIL;
1359 if (!target->type->write_phys_memory) {
1360 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1361 return ERROR_FAIL;
1363 return target->type->write_phys_memory(target, address, size, count, buffer);
1366 int target_add_breakpoint(struct target *target,
1367 struct breakpoint *breakpoint)
1369 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1370 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1371 return ERROR_TARGET_NOT_HALTED;
1373 return target->type->add_breakpoint(target, breakpoint);
1376 int target_add_context_breakpoint(struct target *target,
1377 struct breakpoint *breakpoint)
1379 if (target->state != TARGET_HALTED) {
1380 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1381 return ERROR_TARGET_NOT_HALTED;
1383 return target->type->add_context_breakpoint(target, breakpoint);
1386 int target_add_hybrid_breakpoint(struct target *target,
1387 struct breakpoint *breakpoint)
1389 if (target->state != TARGET_HALTED) {
1390 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1391 return ERROR_TARGET_NOT_HALTED;
1393 return target->type->add_hybrid_breakpoint(target, breakpoint);
1396 int target_remove_breakpoint(struct target *target,
1397 struct breakpoint *breakpoint)
1399 return target->type->remove_breakpoint(target, breakpoint);
1402 int target_add_watchpoint(struct target *target,
1403 struct watchpoint *watchpoint)
1405 if (target->state != TARGET_HALTED) {
1406 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1407 return ERROR_TARGET_NOT_HALTED;
1409 return target->type->add_watchpoint(target, watchpoint);
1411 int target_remove_watchpoint(struct target *target,
1412 struct watchpoint *watchpoint)
1414 return target->type->remove_watchpoint(target, watchpoint);
1416 int target_hit_watchpoint(struct target *target,
1417 struct watchpoint **hit_watchpoint)
1419 if (target->state != TARGET_HALTED) {
1420 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1421 return ERROR_TARGET_NOT_HALTED;
1424 if (!target->type->hit_watchpoint) {
1425 /* For backward compatible, if hit_watchpoint is not implemented,
1426 * return ERROR_FAIL such that gdb_server will not take the nonsense
1427 * information. */
1428 return ERROR_FAIL;
1431 return target->type->hit_watchpoint(target, hit_watchpoint);
1434 const char *target_get_gdb_arch(struct target *target)
1436 if (!target->type->get_gdb_arch)
1437 return NULL;
1438 return target->type->get_gdb_arch(target);
1441 int target_get_gdb_reg_list(struct target *target,
1442 struct reg **reg_list[], int *reg_list_size,
1443 enum target_register_class reg_class)
1445 int result = ERROR_FAIL;
1447 if (!target_was_examined(target)) {
1448 LOG_ERROR("Target not examined yet");
1449 goto done;
1452 result = target->type->get_gdb_reg_list(target, reg_list,
1453 reg_list_size, reg_class);
1455 done:
1456 if (result != ERROR_OK) {
1457 *reg_list = NULL;
1458 *reg_list_size = 0;
1460 return result;
1463 int target_get_gdb_reg_list_noread(struct target *target,
1464 struct reg **reg_list[], int *reg_list_size,
1465 enum target_register_class reg_class)
1467 if (target->type->get_gdb_reg_list_noread &&
1468 target->type->get_gdb_reg_list_noread(target, reg_list,
1469 reg_list_size, reg_class) == ERROR_OK)
1470 return ERROR_OK;
1471 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1474 bool target_supports_gdb_connection(struct target *target)
1477 * exclude all the targets that don't provide get_gdb_reg_list
1478 * or that have explicit gdb_max_connection == 0
1480 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1483 int target_step(struct target *target,
1484 int current, target_addr_t address, int handle_breakpoints)
1486 int retval;
1488 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1490 retval = target->type->step(target, current, address, handle_breakpoints);
1491 if (retval != ERROR_OK)
1492 return retval;
1494 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1496 return retval;
1499 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1501 if (target->state != TARGET_HALTED) {
1502 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1503 return ERROR_TARGET_NOT_HALTED;
1505 return target->type->get_gdb_fileio_info(target, fileio_info);
1508 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1510 if (target->state != TARGET_HALTED) {
1511 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1512 return ERROR_TARGET_NOT_HALTED;
1514 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1517 target_addr_t target_address_max(struct target *target)
1519 unsigned bits = target_address_bits(target);
1520 if (sizeof(target_addr_t) * 8 == bits)
1521 return (target_addr_t) -1;
1522 else
1523 return (((target_addr_t) 1) << bits) - 1;
1526 unsigned target_address_bits(struct target *target)
1528 if (target->type->address_bits)
1529 return target->type->address_bits(target);
1530 return 32;
1533 unsigned int target_data_bits(struct target *target)
1535 if (target->type->data_bits)
1536 return target->type->data_bits(target);
1537 return 32;
1540 static int target_profiling(struct target *target, uint32_t *samples,
1541 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1543 return target->type->profiling(target, samples, max_num_samples,
1544 num_samples, seconds);
1547 static int handle_target(void *priv);
1549 static int target_init_one(struct command_context *cmd_ctx,
1550 struct target *target)
1552 target_reset_examined(target);
1554 struct target_type *type = target->type;
1555 if (!type->examine)
1556 type->examine = default_examine;
1558 if (!type->check_reset)
1559 type->check_reset = default_check_reset;
1561 assert(type->init_target);
1563 int retval = type->init_target(cmd_ctx, target);
1564 if (retval != ERROR_OK) {
1565 LOG_ERROR("target '%s' init failed", target_name(target));
1566 return retval;
1569 /* Sanity-check MMU support ... stub in what we must, to help
1570 * implement it in stages, but warn if we need to do so.
1572 if (type->mmu) {
1573 if (!type->virt2phys) {
1574 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1575 type->virt2phys = identity_virt2phys;
1577 } else {
1578 /* Make sure no-MMU targets all behave the same: make no
1579 * distinction between physical and virtual addresses, and
1580 * ensure that virt2phys() is always an identity mapping.
1582 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1583 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1585 type->mmu = no_mmu;
1586 type->write_phys_memory = type->write_memory;
1587 type->read_phys_memory = type->read_memory;
1588 type->virt2phys = identity_virt2phys;
1591 if (!target->type->read_buffer)
1592 target->type->read_buffer = target_read_buffer_default;
1594 if (!target->type->write_buffer)
1595 target->type->write_buffer = target_write_buffer_default;
1597 if (!target->type->get_gdb_fileio_info)
1598 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1600 if (!target->type->gdb_fileio_end)
1601 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1603 if (!target->type->profiling)
1604 target->type->profiling = target_profiling_default;
1606 return ERROR_OK;
1609 static int target_init(struct command_context *cmd_ctx)
1611 struct target *target;
1612 int retval;
1614 for (target = all_targets; target; target = target->next) {
1615 retval = target_init_one(cmd_ctx, target);
1616 if (retval != ERROR_OK)
1617 return retval;
1620 if (!all_targets)
1621 return ERROR_OK;
1623 retval = target_register_user_commands(cmd_ctx);
1624 if (retval != ERROR_OK)
1625 return retval;
1627 retval = target_register_timer_callback(&handle_target,
1628 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1629 if (retval != ERROR_OK)
1630 return retval;
1632 return ERROR_OK;
1635 COMMAND_HANDLER(handle_target_init_command)
1637 int retval;
1639 if (CMD_ARGC != 0)
1640 return ERROR_COMMAND_SYNTAX_ERROR;
1642 static bool target_initialized;
1643 if (target_initialized) {
1644 LOG_INFO("'target init' has already been called");
1645 return ERROR_OK;
1647 target_initialized = true;
1649 retval = command_run_line(CMD_CTX, "init_targets");
1650 if (retval != ERROR_OK)
1651 return retval;
1653 retval = command_run_line(CMD_CTX, "init_target_events");
1654 if (retval != ERROR_OK)
1655 return retval;
1657 retval = command_run_line(CMD_CTX, "init_board");
1658 if (retval != ERROR_OK)
1659 return retval;
1661 LOG_DEBUG("Initializing targets...");
1662 return target_init(CMD_CTX);
1665 int target_register_event_callback(int (*callback)(struct target *target,
1666 enum target_event event, void *priv), void *priv)
1668 struct target_event_callback **callbacks_p = &target_event_callbacks;
1670 if (!callback)
1671 return ERROR_COMMAND_SYNTAX_ERROR;
1673 if (*callbacks_p) {
1674 while ((*callbacks_p)->next)
1675 callbacks_p = &((*callbacks_p)->next);
1676 callbacks_p = &((*callbacks_p)->next);
1679 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1680 (*callbacks_p)->callback = callback;
1681 (*callbacks_p)->priv = priv;
1682 (*callbacks_p)->next = NULL;
1684 return ERROR_OK;
1687 int target_register_reset_callback(int (*callback)(struct target *target,
1688 enum target_reset_mode reset_mode, void *priv), void *priv)
1690 struct target_reset_callback *entry;
1692 if (!callback)
1693 return ERROR_COMMAND_SYNTAX_ERROR;
1695 entry = malloc(sizeof(struct target_reset_callback));
1696 if (!entry) {
1697 LOG_ERROR("error allocating buffer for reset callback entry");
1698 return ERROR_COMMAND_SYNTAX_ERROR;
1701 entry->callback = callback;
1702 entry->priv = priv;
1703 list_add(&entry->list, &target_reset_callback_list);
1706 return ERROR_OK;
1709 int target_register_trace_callback(int (*callback)(struct target *target,
1710 size_t len, uint8_t *data, void *priv), void *priv)
1712 struct target_trace_callback *entry;
1714 if (!callback)
1715 return ERROR_COMMAND_SYNTAX_ERROR;
1717 entry = malloc(sizeof(struct target_trace_callback));
1718 if (!entry) {
1719 LOG_ERROR("error allocating buffer for trace callback entry");
1720 return ERROR_COMMAND_SYNTAX_ERROR;
1723 entry->callback = callback;
1724 entry->priv = priv;
1725 list_add(&entry->list, &target_trace_callback_list);
1728 return ERROR_OK;
1731 int target_register_timer_callback(int (*callback)(void *priv),
1732 unsigned int time_ms, enum target_timer_type type, void *priv)
1734 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1736 if (!callback)
1737 return ERROR_COMMAND_SYNTAX_ERROR;
1739 if (*callbacks_p) {
1740 while ((*callbacks_p)->next)
1741 callbacks_p = &((*callbacks_p)->next);
1742 callbacks_p = &((*callbacks_p)->next);
1745 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1746 (*callbacks_p)->callback = callback;
1747 (*callbacks_p)->type = type;
1748 (*callbacks_p)->time_ms = time_ms;
1749 (*callbacks_p)->removed = false;
1751 (*callbacks_p)->when = timeval_ms() + time_ms;
1752 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1754 (*callbacks_p)->priv = priv;
1755 (*callbacks_p)->next = NULL;
1757 return ERROR_OK;
1760 int target_unregister_event_callback(int (*callback)(struct target *target,
1761 enum target_event event, void *priv), void *priv)
1763 struct target_event_callback **p = &target_event_callbacks;
1764 struct target_event_callback *c = target_event_callbacks;
1766 if (!callback)
1767 return ERROR_COMMAND_SYNTAX_ERROR;
1769 while (c) {
1770 struct target_event_callback *next = c->next;
1771 if ((c->callback == callback) && (c->priv == priv)) {
1772 *p = next;
1773 free(c);
1774 return ERROR_OK;
1775 } else
1776 p = &(c->next);
1777 c = next;
1780 return ERROR_OK;
1783 int target_unregister_reset_callback(int (*callback)(struct target *target,
1784 enum target_reset_mode reset_mode, void *priv), void *priv)
1786 struct target_reset_callback *entry;
1788 if (!callback)
1789 return ERROR_COMMAND_SYNTAX_ERROR;
1791 list_for_each_entry(entry, &target_reset_callback_list, list) {
1792 if (entry->callback == callback && entry->priv == priv) {
1793 list_del(&entry->list);
1794 free(entry);
1795 break;
1799 return ERROR_OK;
1802 int target_unregister_trace_callback(int (*callback)(struct target *target,
1803 size_t len, uint8_t *data, void *priv), void *priv)
1805 struct target_trace_callback *entry;
1807 if (!callback)
1808 return ERROR_COMMAND_SYNTAX_ERROR;
1810 list_for_each_entry(entry, &target_trace_callback_list, list) {
1811 if (entry->callback == callback && entry->priv == priv) {
1812 list_del(&entry->list);
1813 free(entry);
1814 break;
1818 return ERROR_OK;
1821 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1823 if (!callback)
1824 return ERROR_COMMAND_SYNTAX_ERROR;
1826 for (struct target_timer_callback *c = target_timer_callbacks;
1827 c; c = c->next) {
1828 if ((c->callback == callback) && (c->priv == priv)) {
1829 c->removed = true;
1830 return ERROR_OK;
1834 return ERROR_FAIL;
1837 int target_call_event_callbacks(struct target *target, enum target_event event)
1839 struct target_event_callback *callback = target_event_callbacks;
1840 struct target_event_callback *next_callback;
1842 if (event == TARGET_EVENT_HALTED) {
1843 /* execute early halted first */
1844 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1847 LOG_DEBUG("target event %i (%s) for core %s", event,
1848 target_event_name(event),
1849 target_name(target));
1851 target_handle_event(target, event);
1853 while (callback) {
1854 next_callback = callback->next;
1855 callback->callback(target, event, callback->priv);
1856 callback = next_callback;
1859 return ERROR_OK;
1862 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1864 struct target_reset_callback *callback;
1866 LOG_DEBUG("target reset %i (%s)", reset_mode,
1867 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1869 list_for_each_entry(callback, &target_reset_callback_list, list)
1870 callback->callback(target, reset_mode, callback->priv);
1872 return ERROR_OK;
1875 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1877 struct target_trace_callback *callback;
1879 list_for_each_entry(callback, &target_trace_callback_list, list)
1880 callback->callback(target, len, data, callback->priv);
1882 return ERROR_OK;
1885 static int target_timer_callback_periodic_restart(
1886 struct target_timer_callback *cb, int64_t *now)
1888 cb->when = *now + cb->time_ms;
1889 return ERROR_OK;
1892 static int target_call_timer_callback(struct target_timer_callback *cb,
1893 int64_t *now)
1895 cb->callback(cb->priv);
1897 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1898 return target_timer_callback_periodic_restart(cb, now);
1900 return target_unregister_timer_callback(cb->callback, cb->priv);
1903 static int target_call_timer_callbacks_check_time(int checktime)
1905 static bool callback_processing;
1907 /* Do not allow nesting */
1908 if (callback_processing)
1909 return ERROR_OK;
1911 callback_processing = true;
1913 keep_alive();
1915 int64_t now = timeval_ms();
1917 /* Initialize to a default value that's a ways into the future.
1918 * The loop below will make it closer to now if there are
1919 * callbacks that want to be called sooner. */
1920 target_timer_next_event_value = now + 1000;
1922 /* Store an address of the place containing a pointer to the
1923 * next item; initially, that's a standalone "root of the
1924 * list" variable. */
1925 struct target_timer_callback **callback = &target_timer_callbacks;
1926 while (callback && *callback) {
1927 if ((*callback)->removed) {
1928 struct target_timer_callback *p = *callback;
1929 *callback = (*callback)->next;
1930 free(p);
1931 continue;
1934 bool call_it = (*callback)->callback &&
1935 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1936 now >= (*callback)->when);
1938 if (call_it)
1939 target_call_timer_callback(*callback, &now);
1941 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1942 target_timer_next_event_value = (*callback)->when;
1944 callback = &(*callback)->next;
1947 callback_processing = false;
1948 return ERROR_OK;
1951 int target_call_timer_callbacks()
1953 return target_call_timer_callbacks_check_time(1);
1956 /* invoke periodic callbacks immediately */
1957 int target_call_timer_callbacks_now()
1959 return target_call_timer_callbacks_check_time(0);
1962 int64_t target_timer_next_event(void)
1964 return target_timer_next_event_value;
1967 /* Prints the working area layout for debug purposes */
1968 static void print_wa_layout(struct target *target)
1970 struct working_area *c = target->working_areas;
1972 while (c) {
1973 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1974 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1975 c->address, c->address + c->size - 1, c->size);
1976 c = c->next;
1980 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1981 static void target_split_working_area(struct working_area *area, uint32_t size)
1983 assert(area->free); /* Shouldn't split an allocated area */
1984 assert(size <= area->size); /* Caller should guarantee this */
1986 /* Split only if not already the right size */
1987 if (size < area->size) {
1988 struct working_area *new_wa = malloc(sizeof(*new_wa));
1990 if (!new_wa)
1991 return;
1993 new_wa->next = area->next;
1994 new_wa->size = area->size - size;
1995 new_wa->address = area->address + size;
1996 new_wa->backup = NULL;
1997 new_wa->user = NULL;
1998 new_wa->free = true;
2000 area->next = new_wa;
2001 area->size = size;
2003 /* If backup memory was allocated to this area, it has the wrong size
2004 * now so free it and it will be reallocated if/when needed */
2005 free(area->backup);
2006 area->backup = NULL;
2010 /* Merge all adjacent free areas into one */
2011 static void target_merge_working_areas(struct target *target)
2013 struct working_area *c = target->working_areas;
2015 while (c && c->next) {
2016 assert(c->next->address == c->address + c->size); /* This is an invariant */
2018 /* Find two adjacent free areas */
2019 if (c->free && c->next->free) {
2020 /* Merge the last into the first */
2021 c->size += c->next->size;
2023 /* Remove the last */
2024 struct working_area *to_be_freed = c->next;
2025 c->next = c->next->next;
2026 free(to_be_freed->backup);
2027 free(to_be_freed);
2029 /* If backup memory was allocated to the remaining area, it's has
2030 * the wrong size now */
2031 free(c->backup);
2032 c->backup = NULL;
2033 } else {
2034 c = c->next;
2039 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2041 /* Reevaluate working area address based on MMU state*/
2042 if (!target->working_areas) {
2043 int retval;
2044 int enabled;
2046 retval = target->type->mmu(target, &enabled);
2047 if (retval != ERROR_OK)
2048 return retval;
2050 if (!enabled) {
2051 if (target->working_area_phys_spec) {
2052 LOG_DEBUG("MMU disabled, using physical "
2053 "address for working memory " TARGET_ADDR_FMT,
2054 target->working_area_phys);
2055 target->working_area = target->working_area_phys;
2056 } else {
2057 LOG_ERROR("No working memory available. "
2058 "Specify -work-area-phys to target.");
2059 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2061 } else {
2062 if (target->working_area_virt_spec) {
2063 LOG_DEBUG("MMU enabled, using virtual "
2064 "address for working memory " TARGET_ADDR_FMT,
2065 target->working_area_virt);
2066 target->working_area = target->working_area_virt;
2067 } else {
2068 LOG_ERROR("No working memory available. "
2069 "Specify -work-area-virt to target.");
2070 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2074 /* Set up initial working area on first call */
2075 struct working_area *new_wa = malloc(sizeof(*new_wa));
2076 if (new_wa) {
2077 new_wa->next = NULL;
2078 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2079 new_wa->address = target->working_area;
2080 new_wa->backup = NULL;
2081 new_wa->user = NULL;
2082 new_wa->free = true;
2085 target->working_areas = new_wa;
2088 /* only allocate multiples of 4 byte */
2089 if (size % 4)
2090 size = (size + 3) & (~3UL);
2092 struct working_area *c = target->working_areas;
2094 /* Find the first large enough working area */
2095 while (c) {
2096 if (c->free && c->size >= size)
2097 break;
2098 c = c->next;
2101 if (!c)
2102 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2104 /* Split the working area into the requested size */
2105 target_split_working_area(c, size);
2107 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2108 size, c->address);
2110 if (target->backup_working_area) {
2111 if (!c->backup) {
2112 c->backup = malloc(c->size);
2113 if (!c->backup)
2114 return ERROR_FAIL;
2117 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2118 if (retval != ERROR_OK)
2119 return retval;
2122 /* mark as used, and return the new (reused) area */
2123 c->free = false;
2124 *area = c;
2126 /* user pointer */
2127 c->user = area;
2129 print_wa_layout(target);
2131 return ERROR_OK;
2134 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2136 int retval;
2138 retval = target_alloc_working_area_try(target, size, area);
2139 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2140 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2141 return retval;
2145 static int target_restore_working_area(struct target *target, struct working_area *area)
2147 int retval = ERROR_OK;
2149 if (target->backup_working_area && area->backup) {
2150 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2151 if (retval != ERROR_OK)
2152 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2153 area->size, area->address);
2156 return retval;
2159 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2160 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2162 if (!area || area->free)
2163 return ERROR_OK;
2165 int retval = ERROR_OK;
2166 if (restore) {
2167 retval = target_restore_working_area(target, area);
2168 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2169 if (retval != ERROR_OK)
2170 return retval;
2173 area->free = true;
2175 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2176 area->size, area->address);
2178 /* mark user pointer invalid */
2179 /* TODO: Is this really safe? It points to some previous caller's memory.
2180 * How could we know that the area pointer is still in that place and not
2181 * some other vital data? What's the purpose of this, anyway? */
2182 *area->user = NULL;
2183 area->user = NULL;
2185 target_merge_working_areas(target);
2187 print_wa_layout(target);
2189 return retval;
2192 int target_free_working_area(struct target *target, struct working_area *area)
2194 return target_free_working_area_restore(target, area, 1);
2197 /* free resources and restore memory, if restoring memory fails,
2198 * free up resources anyway
2200 static void target_free_all_working_areas_restore(struct target *target, int restore)
2202 struct working_area *c = target->working_areas;
2204 LOG_DEBUG("freeing all working areas");
2206 /* Loop through all areas, restoring the allocated ones and marking them as free */
2207 while (c) {
2208 if (!c->free) {
2209 if (restore)
2210 target_restore_working_area(target, c);
2211 c->free = true;
2212 *c->user = NULL; /* Same as above */
2213 c->user = NULL;
2215 c = c->next;
2218 /* Run a merge pass to combine all areas into one */
2219 target_merge_working_areas(target);
2221 print_wa_layout(target);
2224 void target_free_all_working_areas(struct target *target)
2226 target_free_all_working_areas_restore(target, 1);
2228 /* Now we have none or only one working area marked as free */
2229 if (target->working_areas) {
2230 /* Free the last one to allow on-the-fly moving and resizing */
2231 free(target->working_areas->backup);
2232 free(target->working_areas);
2233 target->working_areas = NULL;
2237 /* Find the largest number of bytes that can be allocated */
2238 uint32_t target_get_working_area_avail(struct target *target)
2240 struct working_area *c = target->working_areas;
2241 uint32_t max_size = 0;
2243 if (!c)
2244 return target->working_area_size;
2246 while (c) {
2247 if (c->free && max_size < c->size)
2248 max_size = c->size;
2250 c = c->next;
2253 return max_size;
2256 static void target_destroy(struct target *target)
2258 if (target->type->deinit_target)
2259 target->type->deinit_target(target);
2261 free(target->semihosting);
2263 jtag_unregister_event_callback(jtag_enable_callback, target);
2265 struct target_event_action *teap = target->event_action;
2266 while (teap) {
2267 struct target_event_action *next = teap->next;
2268 Jim_DecrRefCount(teap->interp, teap->body);
2269 free(teap);
2270 teap = next;
2273 target_free_all_working_areas(target);
2275 /* release the targets SMP list */
2276 if (target->smp) {
2277 struct target_list *head, *tmp;
2279 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2280 list_del(&head->lh);
2281 head->target->smp = 0;
2282 free(head);
2284 if (target->smp_targets != &empty_smp_targets)
2285 free(target->smp_targets);
2286 target->smp = 0;
2289 rtos_destroy(target);
2291 free(target->gdb_port_override);
2292 free(target->type);
2293 free(target->trace_info);
2294 free(target->fileio_info);
2295 free(target->cmd_name);
2296 free(target);
2299 void target_quit(void)
2301 struct target_event_callback *pe = target_event_callbacks;
2302 while (pe) {
2303 struct target_event_callback *t = pe->next;
2304 free(pe);
2305 pe = t;
2307 target_event_callbacks = NULL;
2309 struct target_timer_callback *pt = target_timer_callbacks;
2310 while (pt) {
2311 struct target_timer_callback *t = pt->next;
2312 free(pt);
2313 pt = t;
2315 target_timer_callbacks = NULL;
2317 for (struct target *target = all_targets; target;) {
2318 struct target *tmp;
2320 tmp = target->next;
2321 target_destroy(target);
2322 target = tmp;
2325 all_targets = NULL;
2328 int target_arch_state(struct target *target)
2330 int retval;
2331 if (!target) {
2332 LOG_WARNING("No target has been configured");
2333 return ERROR_OK;
2336 if (target->state != TARGET_HALTED)
2337 return ERROR_OK;
2339 retval = target->type->arch_state(target);
2340 return retval;
2343 static int target_get_gdb_fileio_info_default(struct target *target,
2344 struct gdb_fileio_info *fileio_info)
2346 /* If target does not support semi-hosting function, target
2347 has no need to provide .get_gdb_fileio_info callback.
2348 It just return ERROR_FAIL and gdb_server will return "Txx"
2349 as target halted every time. */
2350 return ERROR_FAIL;
2353 static int target_gdb_fileio_end_default(struct target *target,
2354 int retcode, int fileio_errno, bool ctrl_c)
2356 return ERROR_OK;
2359 int target_profiling_default(struct target *target, uint32_t *samples,
2360 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2362 struct timeval timeout, now;
2364 gettimeofday(&timeout, NULL);
2365 timeval_add_time(&timeout, seconds, 0);
2367 LOG_INFO("Starting profiling. Halting and resuming the"
2368 " target as often as we can...");
2370 uint32_t sample_count = 0;
2371 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2372 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2374 int retval = ERROR_OK;
2375 for (;;) {
2376 target_poll(target);
2377 if (target->state == TARGET_HALTED) {
2378 uint32_t t = buf_get_u32(reg->value, 0, 32);
2379 samples[sample_count++] = t;
2380 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2381 retval = target_resume(target, 1, 0, 0, 0);
2382 target_poll(target);
2383 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2384 } else if (target->state == TARGET_RUNNING) {
2385 /* We want to quickly sample the PC. */
2386 retval = target_halt(target);
2387 } else {
2388 LOG_INFO("Target not halted or running");
2389 retval = ERROR_OK;
2390 break;
2393 if (retval != ERROR_OK)
2394 break;
2396 gettimeofday(&now, NULL);
2397 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2398 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2399 break;
2403 *num_samples = sample_count;
2404 return retval;
2407 /* Single aligned words are guaranteed to use 16 or 32 bit access
2408 * mode respectively, otherwise data is handled as quickly as
2409 * possible
2411 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2413 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2414 size, address);
2416 if (!target_was_examined(target)) {
2417 LOG_ERROR("Target not examined yet");
2418 return ERROR_FAIL;
2421 if (size == 0)
2422 return ERROR_OK;
2424 if ((address + size - 1) < address) {
2425 /* GDB can request this when e.g. PC is 0xfffffffc */
2426 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2427 address,
2428 size);
2429 return ERROR_FAIL;
2432 return target->type->write_buffer(target, address, size, buffer);
2435 static int target_write_buffer_default(struct target *target,
2436 target_addr_t address, uint32_t count, const uint8_t *buffer)
2438 uint32_t size;
2439 unsigned int data_bytes = target_data_bits(target) / 8;
2441 /* Align up to maximum bytes. The loop condition makes sure the next pass
2442 * will have something to do with the size we leave to it. */
2443 for (size = 1;
2444 size < data_bytes && count >= size * 2 + (address & size);
2445 size *= 2) {
2446 if (address & size) {
2447 int retval = target_write_memory(target, address, size, 1, buffer);
2448 if (retval != ERROR_OK)
2449 return retval;
2450 address += size;
2451 count -= size;
2452 buffer += size;
2456 /* Write the data with as large access size as possible. */
2457 for (; size > 0; size /= 2) {
2458 uint32_t aligned = count - count % size;
2459 if (aligned > 0) {
2460 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2461 if (retval != ERROR_OK)
2462 return retval;
2463 address += aligned;
2464 count -= aligned;
2465 buffer += aligned;
2469 return ERROR_OK;
2472 /* Single aligned words are guaranteed to use 16 or 32 bit access
2473 * mode respectively, otherwise data is handled as quickly as
2474 * possible
2476 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2478 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2479 size, address);
2481 if (!target_was_examined(target)) {
2482 LOG_ERROR("Target not examined yet");
2483 return ERROR_FAIL;
2486 if (size == 0)
2487 return ERROR_OK;
2489 if ((address + size - 1) < address) {
2490 /* GDB can request this when e.g. PC is 0xfffffffc */
2491 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2492 address,
2493 size);
2494 return ERROR_FAIL;
2497 return target->type->read_buffer(target, address, size, buffer);
2500 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2502 uint32_t size;
2503 unsigned int data_bytes = target_data_bits(target) / 8;
2505 /* Align up to maximum bytes. The loop condition makes sure the next pass
2506 * will have something to do with the size we leave to it. */
2507 for (size = 1;
2508 size < data_bytes && count >= size * 2 + (address & size);
2509 size *= 2) {
2510 if (address & size) {
2511 int retval = target_read_memory(target, address, size, 1, buffer);
2512 if (retval != ERROR_OK)
2513 return retval;
2514 address += size;
2515 count -= size;
2516 buffer += size;
2520 /* Read the data with as large access size as possible. */
2521 for (; size > 0; size /= 2) {
2522 uint32_t aligned = count - count % size;
2523 if (aligned > 0) {
2524 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2525 if (retval != ERROR_OK)
2526 return retval;
2527 address += aligned;
2528 count -= aligned;
2529 buffer += aligned;
2533 return ERROR_OK;
2536 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2538 uint8_t *buffer;
2539 int retval;
2540 uint32_t i;
2541 uint32_t checksum = 0;
2542 if (!target_was_examined(target)) {
2543 LOG_ERROR("Target not examined yet");
2544 return ERROR_FAIL;
2546 if (!target->type->checksum_memory) {
2547 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2548 return ERROR_FAIL;
2551 retval = target->type->checksum_memory(target, address, size, &checksum);
2552 if (retval != ERROR_OK) {
2553 buffer = malloc(size);
2554 if (!buffer) {
2555 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2556 return ERROR_COMMAND_SYNTAX_ERROR;
2558 retval = target_read_buffer(target, address, size, buffer);
2559 if (retval != ERROR_OK) {
2560 free(buffer);
2561 return retval;
2564 /* convert to target endianness */
2565 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2566 uint32_t target_data;
2567 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2568 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2571 retval = image_calculate_checksum(buffer, size, &checksum);
2572 free(buffer);
2575 *crc = checksum;
2577 return retval;
2580 int target_blank_check_memory(struct target *target,
2581 struct target_memory_check_block *blocks, int num_blocks,
2582 uint8_t erased_value)
2584 if (!target_was_examined(target)) {
2585 LOG_ERROR("Target not examined yet");
2586 return ERROR_FAIL;
2589 if (!target->type->blank_check_memory)
2590 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2592 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2595 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2597 uint8_t value_buf[8];
2598 if (!target_was_examined(target)) {
2599 LOG_ERROR("Target not examined yet");
2600 return ERROR_FAIL;
2603 int retval = target_read_memory(target, address, 8, 1, value_buf);
2605 if (retval == ERROR_OK) {
2606 *value = target_buffer_get_u64(target, value_buf);
2607 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2608 address,
2609 *value);
2610 } else {
2611 *value = 0x0;
2612 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2613 address);
2616 return retval;
2619 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2621 uint8_t value_buf[4];
2622 if (!target_was_examined(target)) {
2623 LOG_ERROR("Target not examined yet");
2624 return ERROR_FAIL;
2627 int retval = target_read_memory(target, address, 4, 1, value_buf);
2629 if (retval == ERROR_OK) {
2630 *value = target_buffer_get_u32(target, value_buf);
2631 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2632 address,
2633 *value);
2634 } else {
2635 *value = 0x0;
2636 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2637 address);
2640 return retval;
2643 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2645 uint8_t value_buf[2];
2646 if (!target_was_examined(target)) {
2647 LOG_ERROR("Target not examined yet");
2648 return ERROR_FAIL;
2651 int retval = target_read_memory(target, address, 2, 1, value_buf);
2653 if (retval == ERROR_OK) {
2654 *value = target_buffer_get_u16(target, value_buf);
2655 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2656 address,
2657 *value);
2658 } else {
2659 *value = 0x0;
2660 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2661 address);
2664 return retval;
2667 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2669 if (!target_was_examined(target)) {
2670 LOG_ERROR("Target not examined yet");
2671 return ERROR_FAIL;
2674 int retval = target_read_memory(target, address, 1, 1, value);
2676 if (retval == ERROR_OK) {
2677 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2678 address,
2679 *value);
2680 } else {
2681 *value = 0x0;
2682 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2683 address);
2686 return retval;
2689 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2691 int retval;
2692 uint8_t value_buf[8];
2693 if (!target_was_examined(target)) {
2694 LOG_ERROR("Target not examined yet");
2695 return ERROR_FAIL;
2698 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2699 address,
2700 value);
2702 target_buffer_set_u64(target, value_buf, value);
2703 retval = target_write_memory(target, address, 8, 1, value_buf);
2704 if (retval != ERROR_OK)
2705 LOG_DEBUG("failed: %i", retval);
2707 return retval;
2710 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2712 int retval;
2713 uint8_t value_buf[4];
2714 if (!target_was_examined(target)) {
2715 LOG_ERROR("Target not examined yet");
2716 return ERROR_FAIL;
2719 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2720 address,
2721 value);
2723 target_buffer_set_u32(target, value_buf, value);
2724 retval = target_write_memory(target, address, 4, 1, value_buf);
2725 if (retval != ERROR_OK)
2726 LOG_DEBUG("failed: %i", retval);
2728 return retval;
2731 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2733 int retval;
2734 uint8_t value_buf[2];
2735 if (!target_was_examined(target)) {
2736 LOG_ERROR("Target not examined yet");
2737 return ERROR_FAIL;
2740 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2741 address,
2742 value);
2744 target_buffer_set_u16(target, value_buf, value);
2745 retval = target_write_memory(target, address, 2, 1, value_buf);
2746 if (retval != ERROR_OK)
2747 LOG_DEBUG("failed: %i", retval);
2749 return retval;
2752 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2754 int retval;
2755 if (!target_was_examined(target)) {
2756 LOG_ERROR("Target not examined yet");
2757 return ERROR_FAIL;
2760 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2761 address, value);
2763 retval = target_write_memory(target, address, 1, 1, &value);
2764 if (retval != ERROR_OK)
2765 LOG_DEBUG("failed: %i", retval);
2767 return retval;
2770 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2772 int retval;
2773 uint8_t value_buf[8];
2774 if (!target_was_examined(target)) {
2775 LOG_ERROR("Target not examined yet");
2776 return ERROR_FAIL;
2779 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2780 address,
2781 value);
2783 target_buffer_set_u64(target, value_buf, value);
2784 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2785 if (retval != ERROR_OK)
2786 LOG_DEBUG("failed: %i", retval);
2788 return retval;
2791 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2793 int retval;
2794 uint8_t value_buf[4];
2795 if (!target_was_examined(target)) {
2796 LOG_ERROR("Target not examined yet");
2797 return ERROR_FAIL;
2800 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2801 address,
2802 value);
2804 target_buffer_set_u32(target, value_buf, value);
2805 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2806 if (retval != ERROR_OK)
2807 LOG_DEBUG("failed: %i", retval);
2809 return retval;
2812 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2814 int retval;
2815 uint8_t value_buf[2];
2816 if (!target_was_examined(target)) {
2817 LOG_ERROR("Target not examined yet");
2818 return ERROR_FAIL;
2821 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2822 address,
2823 value);
2825 target_buffer_set_u16(target, value_buf, value);
2826 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2827 if (retval != ERROR_OK)
2828 LOG_DEBUG("failed: %i", retval);
2830 return retval;
2833 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2835 int retval;
2836 if (!target_was_examined(target)) {
2837 LOG_ERROR("Target not examined yet");
2838 return ERROR_FAIL;
2841 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2842 address, value);
2844 retval = target_write_phys_memory(target, address, 1, 1, &value);
2845 if (retval != ERROR_OK)
2846 LOG_DEBUG("failed: %i", retval);
2848 return retval;
2851 static int find_target(struct command_invocation *cmd, const char *name)
2853 struct target *target = get_target(name);
2854 if (!target) {
2855 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2856 return ERROR_FAIL;
2858 if (!target->tap->enabled) {
2859 command_print(cmd, "Target: TAP %s is disabled, "
2860 "can't be the current target\n",
2861 target->tap->dotted_name);
2862 return ERROR_FAIL;
2865 cmd->ctx->current_target = target;
2866 if (cmd->ctx->current_target_override)
2867 cmd->ctx->current_target_override = target;
2869 return ERROR_OK;
2873 COMMAND_HANDLER(handle_targets_command)
2875 int retval = ERROR_OK;
2876 if (CMD_ARGC == 1) {
2877 retval = find_target(CMD, CMD_ARGV[0]);
2878 if (retval == ERROR_OK) {
2879 /* we're done! */
2880 return retval;
2884 struct target *target = all_targets;
2885 command_print(CMD, " TargetName Type Endian TapName State ");
2886 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2887 while (target) {
2888 const char *state;
2889 char marker = ' ';
2891 if (target->tap->enabled)
2892 state = target_state_name(target);
2893 else
2894 state = "tap-disabled";
2896 if (CMD_CTX->current_target == target)
2897 marker = '*';
2899 /* keep columns lined up to match the headers above */
2900 command_print(CMD,
2901 "%2d%c %-18s %-10s %-6s %-18s %s",
2902 target->target_number,
2903 marker,
2904 target_name(target),
2905 target_type_name(target),
2906 jim_nvp_value2name_simple(nvp_target_endian,
2907 target->endianness)->name,
2908 target->tap->dotted_name,
2909 state);
2910 target = target->next;
2913 return retval;
2916 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2918 static int power_dropout;
2919 static int srst_asserted;
2921 static int run_power_restore;
2922 static int run_power_dropout;
2923 static int run_srst_asserted;
2924 static int run_srst_deasserted;
2926 static int sense_handler(void)
2928 static int prev_srst_asserted;
2929 static int prev_power_dropout;
2931 int retval = jtag_power_dropout(&power_dropout);
2932 if (retval != ERROR_OK)
2933 return retval;
2935 int power_restored;
2936 power_restored = prev_power_dropout && !power_dropout;
2937 if (power_restored)
2938 run_power_restore = 1;
2940 int64_t current = timeval_ms();
2941 static int64_t last_power;
2942 bool wait_more = last_power + 2000 > current;
2943 if (power_dropout && !wait_more) {
2944 run_power_dropout = 1;
2945 last_power = current;
2948 retval = jtag_srst_asserted(&srst_asserted);
2949 if (retval != ERROR_OK)
2950 return retval;
2952 int srst_deasserted;
2953 srst_deasserted = prev_srst_asserted && !srst_asserted;
2955 static int64_t last_srst;
2956 wait_more = last_srst + 2000 > current;
2957 if (srst_deasserted && !wait_more) {
2958 run_srst_deasserted = 1;
2959 last_srst = current;
2962 if (!prev_srst_asserted && srst_asserted)
2963 run_srst_asserted = 1;
2965 prev_srst_asserted = srst_asserted;
2966 prev_power_dropout = power_dropout;
2968 if (srst_deasserted || power_restored) {
2969 /* Other than logging the event we can't do anything here.
2970 * Issuing a reset is a particularly bad idea as we might
2971 * be inside a reset already.
2975 return ERROR_OK;
2978 /* process target state changes */
2979 static int handle_target(void *priv)
2981 Jim_Interp *interp = (Jim_Interp *)priv;
2982 int retval = ERROR_OK;
2984 if (!is_jtag_poll_safe()) {
2985 /* polling is disabled currently */
2986 return ERROR_OK;
2989 /* we do not want to recurse here... */
2990 static int recursive;
2991 if (!recursive) {
2992 recursive = 1;
2993 sense_handler();
2994 /* danger! running these procedures can trigger srst assertions and power dropouts.
2995 * We need to avoid an infinite loop/recursion here and we do that by
2996 * clearing the flags after running these events.
2998 int did_something = 0;
2999 if (run_srst_asserted) {
3000 LOG_INFO("srst asserted detected, running srst_asserted proc.");
3001 Jim_Eval(interp, "srst_asserted");
3002 did_something = 1;
3004 if (run_srst_deasserted) {
3005 Jim_Eval(interp, "srst_deasserted");
3006 did_something = 1;
3008 if (run_power_dropout) {
3009 LOG_INFO("Power dropout detected, running power_dropout proc.");
3010 Jim_Eval(interp, "power_dropout");
3011 did_something = 1;
3013 if (run_power_restore) {
3014 Jim_Eval(interp, "power_restore");
3015 did_something = 1;
3018 if (did_something) {
3019 /* clear detect flags */
3020 sense_handler();
3023 /* clear action flags */
3025 run_srst_asserted = 0;
3026 run_srst_deasserted = 0;
3027 run_power_restore = 0;
3028 run_power_dropout = 0;
3030 recursive = 0;
3033 /* Poll targets for state changes unless that's globally disabled.
3034 * Skip targets that are currently disabled.
3036 for (struct target *target = all_targets;
3037 is_jtag_poll_safe() && target;
3038 target = target->next) {
3040 if (!target_was_examined(target))
3041 continue;
3043 if (!target->tap->enabled)
3044 continue;
3046 if (target->backoff.times > target->backoff.count) {
3047 /* do not poll this time as we failed previously */
3048 target->backoff.count++;
3049 continue;
3051 target->backoff.count = 0;
3053 /* only poll target if we've got power and srst isn't asserted */
3054 if (!power_dropout && !srst_asserted) {
3055 /* polling may fail silently until the target has been examined */
3056 retval = target_poll(target);
3057 if (retval != ERROR_OK) {
3058 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3059 if (target->backoff.times * polling_interval < 5000) {
3060 target->backoff.times *= 2;
3061 target->backoff.times++;
3064 /* Tell GDB to halt the debugger. This allows the user to
3065 * run monitor commands to handle the situation.
3067 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3069 if (target->backoff.times > 0) {
3070 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3071 target_reset_examined(target);
3072 retval = target_examine_one(target);
3073 /* Target examination could have failed due to unstable connection,
3074 * but we set the examined flag anyway to repoll it later */
3075 if (retval != ERROR_OK) {
3076 target_set_examined(target);
3077 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3078 target->backoff.times * polling_interval);
3079 return retval;
3083 /* Since we succeeded, we reset backoff count */
3084 target->backoff.times = 0;
3088 return retval;
3091 COMMAND_HANDLER(handle_reg_command)
3093 LOG_DEBUG("-");
3095 struct target *target = get_current_target(CMD_CTX);
3096 struct reg *reg = NULL;
3098 /* list all available registers for the current target */
3099 if (CMD_ARGC == 0) {
3100 struct reg_cache *cache = target->reg_cache;
3102 unsigned int count = 0;
3103 while (cache) {
3104 unsigned i;
3106 command_print(CMD, "===== %s", cache->name);
3108 for (i = 0, reg = cache->reg_list;
3109 i < cache->num_regs;
3110 i++, reg++, count++) {
3111 if (reg->exist == false || reg->hidden)
3112 continue;
3113 /* only print cached values if they are valid */
3114 if (reg->valid) {
3115 char *value = buf_to_hex_str(reg->value,
3116 reg->size);
3117 command_print(CMD,
3118 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3119 count, reg->name,
3120 reg->size, value,
3121 reg->dirty
3122 ? " (dirty)"
3123 : "");
3124 free(value);
3125 } else {
3126 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3127 count, reg->name,
3128 reg->size);
3131 cache = cache->next;
3134 return ERROR_OK;
3137 /* access a single register by its ordinal number */
3138 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3139 unsigned num;
3140 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3142 struct reg_cache *cache = target->reg_cache;
3143 unsigned int count = 0;
3144 while (cache) {
3145 unsigned i;
3146 for (i = 0; i < cache->num_regs; i++) {
3147 if (count++ == num) {
3148 reg = &cache->reg_list[i];
3149 break;
3152 if (reg)
3153 break;
3154 cache = cache->next;
3157 if (!reg) {
3158 command_print(CMD, "%i is out of bounds, the current target "
3159 "has only %i registers (0 - %i)", num, count, count - 1);
3160 return ERROR_OK;
3162 } else {
3163 /* access a single register by its name */
3164 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3166 if (!reg)
3167 goto not_found;
3170 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3172 if (!reg->exist)
3173 goto not_found;
3175 /* display a register */
3176 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3177 && (CMD_ARGV[1][0] <= '9')))) {
3178 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3179 reg->valid = 0;
3181 if (reg->valid == 0) {
3182 int retval = reg->type->get(reg);
3183 if (retval != ERROR_OK) {
3184 LOG_ERROR("Could not read register '%s'", reg->name);
3185 return retval;
3188 char *value = buf_to_hex_str(reg->value, reg->size);
3189 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3190 free(value);
3191 return ERROR_OK;
3194 /* set register value */
3195 if (CMD_ARGC == 2) {
3196 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3197 if (!buf)
3198 return ERROR_FAIL;
3199 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3201 int retval = reg->type->set(reg, buf);
3202 if (retval != ERROR_OK) {
3203 LOG_ERROR("Could not write to register '%s'", reg->name);
3204 } else {
3205 char *value = buf_to_hex_str(reg->value, reg->size);
3206 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3207 free(value);
3210 free(buf);
3212 return retval;
3215 return ERROR_COMMAND_SYNTAX_ERROR;
3217 not_found:
3218 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3219 return ERROR_OK;
3222 COMMAND_HANDLER(handle_poll_command)
3224 int retval = ERROR_OK;
3225 struct target *target = get_current_target(CMD_CTX);
3227 if (CMD_ARGC == 0) {
3228 command_print(CMD, "background polling: %s",
3229 jtag_poll_get_enabled() ? "on" : "off");
3230 command_print(CMD, "TAP: %s (%s)",
3231 target->tap->dotted_name,
3232 target->tap->enabled ? "enabled" : "disabled");
3233 if (!target->tap->enabled)
3234 return ERROR_OK;
3235 retval = target_poll(target);
3236 if (retval != ERROR_OK)
3237 return retval;
3238 retval = target_arch_state(target);
3239 if (retval != ERROR_OK)
3240 return retval;
3241 } else if (CMD_ARGC == 1) {
3242 bool enable;
3243 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3244 jtag_poll_set_enabled(enable);
3245 } else
3246 return ERROR_COMMAND_SYNTAX_ERROR;
3248 return retval;
3251 COMMAND_HANDLER(handle_wait_halt_command)
3253 if (CMD_ARGC > 1)
3254 return ERROR_COMMAND_SYNTAX_ERROR;
3256 unsigned ms = DEFAULT_HALT_TIMEOUT;
3257 if (1 == CMD_ARGC) {
3258 int retval = parse_uint(CMD_ARGV[0], &ms);
3259 if (retval != ERROR_OK)
3260 return ERROR_COMMAND_SYNTAX_ERROR;
3263 struct target *target = get_current_target(CMD_CTX);
3264 return target_wait_state(target, TARGET_HALTED, ms);
3267 /* wait for target state to change. The trick here is to have a low
3268 * latency for short waits and not to suck up all the CPU time
3269 * on longer waits.
3271 * After 500ms, keep_alive() is invoked
3273 int target_wait_state(struct target *target, enum target_state state, int ms)
3275 int retval;
3276 int64_t then = 0, cur;
3277 bool once = true;
3279 for (;;) {
3280 retval = target_poll(target);
3281 if (retval != ERROR_OK)
3282 return retval;
3283 if (target->state == state)
3284 break;
3285 cur = timeval_ms();
3286 if (once) {
3287 once = false;
3288 then = timeval_ms();
3289 LOG_DEBUG("waiting for target %s...",
3290 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3293 if (cur-then > 500)
3294 keep_alive();
3296 if ((cur-then) > ms) {
3297 LOG_ERROR("timed out while waiting for target %s",
3298 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3299 return ERROR_FAIL;
3303 return ERROR_OK;
3306 COMMAND_HANDLER(handle_halt_command)
3308 LOG_DEBUG("-");
3310 struct target *target = get_current_target(CMD_CTX);
3312 target->verbose_halt_msg = true;
3314 int retval = target_halt(target);
3315 if (retval != ERROR_OK)
3316 return retval;
3318 if (CMD_ARGC == 1) {
3319 unsigned wait_local;
3320 retval = parse_uint(CMD_ARGV[0], &wait_local);
3321 if (retval != ERROR_OK)
3322 return ERROR_COMMAND_SYNTAX_ERROR;
3323 if (!wait_local)
3324 return ERROR_OK;
3327 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3330 COMMAND_HANDLER(handle_soft_reset_halt_command)
3332 struct target *target = get_current_target(CMD_CTX);
3334 LOG_USER("requesting target halt and executing a soft reset");
3336 target_soft_reset_halt(target);
3338 return ERROR_OK;
3341 COMMAND_HANDLER(handle_reset_command)
3343 if (CMD_ARGC > 1)
3344 return ERROR_COMMAND_SYNTAX_ERROR;
3346 enum target_reset_mode reset_mode = RESET_RUN;
3347 if (CMD_ARGC == 1) {
3348 const struct jim_nvp *n;
3349 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3350 if ((!n->name) || (n->value == RESET_UNKNOWN))
3351 return ERROR_COMMAND_SYNTAX_ERROR;
3352 reset_mode = n->value;
3355 /* reset *all* targets */
3356 return target_process_reset(CMD, reset_mode);
3360 COMMAND_HANDLER(handle_resume_command)
3362 int current = 1;
3363 if (CMD_ARGC > 1)
3364 return ERROR_COMMAND_SYNTAX_ERROR;
3366 struct target *target = get_current_target(CMD_CTX);
3368 /* with no CMD_ARGV, resume from current pc, addr = 0,
3369 * with one arguments, addr = CMD_ARGV[0],
3370 * handle breakpoints, not debugging */
3371 target_addr_t addr = 0;
3372 if (CMD_ARGC == 1) {
3373 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3374 current = 0;
3377 return target_resume(target, current, addr, 1, 0);
3380 COMMAND_HANDLER(handle_step_command)
3382 if (CMD_ARGC > 1)
3383 return ERROR_COMMAND_SYNTAX_ERROR;
3385 LOG_DEBUG("-");
3387 /* with no CMD_ARGV, step from current pc, addr = 0,
3388 * with one argument addr = CMD_ARGV[0],
3389 * handle breakpoints, debugging */
3390 target_addr_t addr = 0;
3391 int current_pc = 1;
3392 if (CMD_ARGC == 1) {
3393 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3394 current_pc = 0;
3397 struct target *target = get_current_target(CMD_CTX);
3399 return target_step(target, current_pc, addr, 1);
3402 void target_handle_md_output(struct command_invocation *cmd,
3403 struct target *target, target_addr_t address, unsigned size,
3404 unsigned count, const uint8_t *buffer)
3406 const unsigned line_bytecnt = 32;
3407 unsigned line_modulo = line_bytecnt / size;
3409 char output[line_bytecnt * 4 + 1];
3410 unsigned output_len = 0;
3412 const char *value_fmt;
3413 switch (size) {
3414 case 8:
3415 value_fmt = "%16.16"PRIx64" ";
3416 break;
3417 case 4:
3418 value_fmt = "%8.8"PRIx64" ";
3419 break;
3420 case 2:
3421 value_fmt = "%4.4"PRIx64" ";
3422 break;
3423 case 1:
3424 value_fmt = "%2.2"PRIx64" ";
3425 break;
3426 default:
3427 /* "can't happen", caller checked */
3428 LOG_ERROR("invalid memory read size: %u", size);
3429 return;
3432 for (unsigned i = 0; i < count; i++) {
3433 if (i % line_modulo == 0) {
3434 output_len += snprintf(output + output_len,
3435 sizeof(output) - output_len,
3436 TARGET_ADDR_FMT ": ",
3437 (address + (i * size)));
3440 uint64_t value = 0;
3441 const uint8_t *value_ptr = buffer + i * size;
3442 switch (size) {
3443 case 8:
3444 value = target_buffer_get_u64(target, value_ptr);
3445 break;
3446 case 4:
3447 value = target_buffer_get_u32(target, value_ptr);
3448 break;
3449 case 2:
3450 value = target_buffer_get_u16(target, value_ptr);
3451 break;
3452 case 1:
3453 value = *value_ptr;
3455 output_len += snprintf(output + output_len,
3456 sizeof(output) - output_len,
3457 value_fmt, value);
3459 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3460 command_print(cmd, "%s", output);
3461 output_len = 0;
3466 COMMAND_HANDLER(handle_md_command)
3468 if (CMD_ARGC < 1)
3469 return ERROR_COMMAND_SYNTAX_ERROR;
3471 unsigned size = 0;
3472 switch (CMD_NAME[2]) {
3473 case 'd':
3474 size = 8;
3475 break;
3476 case 'w':
3477 size = 4;
3478 break;
3479 case 'h':
3480 size = 2;
3481 break;
3482 case 'b':
3483 size = 1;
3484 break;
3485 default:
3486 return ERROR_COMMAND_SYNTAX_ERROR;
3489 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3490 int (*fn)(struct target *target,
3491 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3492 if (physical) {
3493 CMD_ARGC--;
3494 CMD_ARGV++;
3495 fn = target_read_phys_memory;
3496 } else
3497 fn = target_read_memory;
3498 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3499 return ERROR_COMMAND_SYNTAX_ERROR;
3501 target_addr_t address;
3502 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3504 unsigned count = 1;
3505 if (CMD_ARGC == 2)
3506 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3508 uint8_t *buffer = calloc(count, size);
3509 if (!buffer) {
3510 LOG_ERROR("Failed to allocate md read buffer");
3511 return ERROR_FAIL;
3514 struct target *target = get_current_target(CMD_CTX);
3515 int retval = fn(target, address, size, count, buffer);
3516 if (retval == ERROR_OK)
3517 target_handle_md_output(CMD, target, address, size, count, buffer);
3519 free(buffer);
3521 return retval;
3524 typedef int (*target_write_fn)(struct target *target,
3525 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3527 static int target_fill_mem(struct target *target,
3528 target_addr_t address,
3529 target_write_fn fn,
3530 unsigned data_size,
3531 /* value */
3532 uint64_t b,
3533 /* count */
3534 unsigned c)
3536 /* We have to write in reasonably large chunks to be able
3537 * to fill large memory areas with any sane speed */
3538 const unsigned chunk_size = 16384;
3539 uint8_t *target_buf = malloc(chunk_size * data_size);
3540 if (!target_buf) {
3541 LOG_ERROR("Out of memory");
3542 return ERROR_FAIL;
3545 for (unsigned i = 0; i < chunk_size; i++) {
3546 switch (data_size) {
3547 case 8:
3548 target_buffer_set_u64(target, target_buf + i * data_size, b);
3549 break;
3550 case 4:
3551 target_buffer_set_u32(target, target_buf + i * data_size, b);
3552 break;
3553 case 2:
3554 target_buffer_set_u16(target, target_buf + i * data_size, b);
3555 break;
3556 case 1:
3557 target_buffer_set_u8(target, target_buf + i * data_size, b);
3558 break;
3559 default:
3560 exit(-1);
3564 int retval = ERROR_OK;
3566 for (unsigned x = 0; x < c; x += chunk_size) {
3567 unsigned current;
3568 current = c - x;
3569 if (current > chunk_size)
3570 current = chunk_size;
3571 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3572 if (retval != ERROR_OK)
3573 break;
3574 /* avoid GDB timeouts */
3575 keep_alive();
3577 free(target_buf);
3579 return retval;
3583 COMMAND_HANDLER(handle_mw_command)
3585 if (CMD_ARGC < 2)
3586 return ERROR_COMMAND_SYNTAX_ERROR;
3587 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3588 target_write_fn fn;
3589 if (physical) {
3590 CMD_ARGC--;
3591 CMD_ARGV++;
3592 fn = target_write_phys_memory;
3593 } else
3594 fn = target_write_memory;
3595 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3596 return ERROR_COMMAND_SYNTAX_ERROR;
3598 target_addr_t address;
3599 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3601 uint64_t value;
3602 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3604 unsigned count = 1;
3605 if (CMD_ARGC == 3)
3606 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3608 struct target *target = get_current_target(CMD_CTX);
3609 unsigned wordsize;
3610 switch (CMD_NAME[2]) {
3611 case 'd':
3612 wordsize = 8;
3613 break;
3614 case 'w':
3615 wordsize = 4;
3616 break;
3617 case 'h':
3618 wordsize = 2;
3619 break;
3620 case 'b':
3621 wordsize = 1;
3622 break;
3623 default:
3624 return ERROR_COMMAND_SYNTAX_ERROR;
3627 return target_fill_mem(target, address, fn, wordsize, value, count);
3630 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3631 target_addr_t *min_address, target_addr_t *max_address)
3633 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3634 return ERROR_COMMAND_SYNTAX_ERROR;
3636 /* a base address isn't always necessary,
3637 * default to 0x0 (i.e. don't relocate) */
3638 if (CMD_ARGC >= 2) {
3639 target_addr_t addr;
3640 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3641 image->base_address = addr;
3642 image->base_address_set = true;
3643 } else
3644 image->base_address_set = false;
3646 image->start_address_set = false;
3648 if (CMD_ARGC >= 4)
3649 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3650 if (CMD_ARGC == 5) {
3651 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3652 /* use size (given) to find max (required) */
3653 *max_address += *min_address;
3656 if (*min_address > *max_address)
3657 return ERROR_COMMAND_SYNTAX_ERROR;
3659 return ERROR_OK;
3662 COMMAND_HANDLER(handle_load_image_command)
3664 uint8_t *buffer;
3665 size_t buf_cnt;
3666 uint32_t image_size;
3667 target_addr_t min_address = 0;
3668 target_addr_t max_address = -1;
3669 struct image image;
3671 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3672 &image, &min_address, &max_address);
3673 if (retval != ERROR_OK)
3674 return retval;
3676 struct target *target = get_current_target(CMD_CTX);
3678 struct duration bench;
3679 duration_start(&bench);
3681 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3682 return ERROR_FAIL;
3684 image_size = 0x0;
3685 retval = ERROR_OK;
3686 for (unsigned int i = 0; i < image.num_sections; i++) {
3687 buffer = malloc(image.sections[i].size);
3688 if (!buffer) {
3689 command_print(CMD,
3690 "error allocating buffer for section (%d bytes)",
3691 (int)(image.sections[i].size));
3692 retval = ERROR_FAIL;
3693 break;
3696 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3697 if (retval != ERROR_OK) {
3698 free(buffer);
3699 break;
3702 uint32_t offset = 0;
3703 uint32_t length = buf_cnt;
3705 /* DANGER!!! beware of unsigned comparison here!!! */
3707 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3708 (image.sections[i].base_address < max_address)) {
3710 if (image.sections[i].base_address < min_address) {
3711 /* clip addresses below */
3712 offset += min_address-image.sections[i].base_address;
3713 length -= offset;
3716 if (image.sections[i].base_address + buf_cnt > max_address)
3717 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3719 retval = target_write_buffer(target,
3720 image.sections[i].base_address + offset, length, buffer + offset);
3721 if (retval != ERROR_OK) {
3722 free(buffer);
3723 break;
3725 image_size += length;
3726 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3727 (unsigned int)length,
3728 image.sections[i].base_address + offset);
3731 free(buffer);
3734 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3735 command_print(CMD, "downloaded %" PRIu32 " bytes "
3736 "in %fs (%0.3f KiB/s)", image_size,
3737 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3740 image_close(&image);
3742 return retval;
3746 COMMAND_HANDLER(handle_dump_image_command)
3748 struct fileio *fileio;
3749 uint8_t *buffer;
3750 int retval, retvaltemp;
3751 target_addr_t address, size;
3752 struct duration bench;
3753 struct target *target = get_current_target(CMD_CTX);
3755 if (CMD_ARGC != 3)
3756 return ERROR_COMMAND_SYNTAX_ERROR;
3758 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3759 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3761 uint32_t buf_size = (size > 4096) ? 4096 : size;
3762 buffer = malloc(buf_size);
3763 if (!buffer)
3764 return ERROR_FAIL;
3766 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3767 if (retval != ERROR_OK) {
3768 free(buffer);
3769 return retval;
3772 duration_start(&bench);
3774 while (size > 0) {
3775 size_t size_written;
3776 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3777 retval = target_read_buffer(target, address, this_run_size, buffer);
3778 if (retval != ERROR_OK)
3779 break;
3781 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3782 if (retval != ERROR_OK)
3783 break;
3785 size -= this_run_size;
3786 address += this_run_size;
3789 free(buffer);
3791 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3792 size_t filesize;
3793 retval = fileio_size(fileio, &filesize);
3794 if (retval != ERROR_OK)
3795 return retval;
3796 command_print(CMD,
3797 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3798 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3801 retvaltemp = fileio_close(fileio);
3802 if (retvaltemp != ERROR_OK)
3803 return retvaltemp;
3805 return retval;
3808 enum verify_mode {
3809 IMAGE_TEST = 0,
3810 IMAGE_VERIFY = 1,
3811 IMAGE_CHECKSUM_ONLY = 2
3814 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3816 uint8_t *buffer;
3817 size_t buf_cnt;
3818 uint32_t image_size;
3819 int retval;
3820 uint32_t checksum = 0;
3821 uint32_t mem_checksum = 0;
3823 struct image image;
3825 struct target *target = get_current_target(CMD_CTX);
3827 if (CMD_ARGC < 1)
3828 return ERROR_COMMAND_SYNTAX_ERROR;
3830 if (!target) {
3831 LOG_ERROR("no target selected");
3832 return ERROR_FAIL;
3835 struct duration bench;
3836 duration_start(&bench);
3838 if (CMD_ARGC >= 2) {
3839 target_addr_t addr;
3840 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3841 image.base_address = addr;
3842 image.base_address_set = true;
3843 } else {
3844 image.base_address_set = false;
3845 image.base_address = 0x0;
3848 image.start_address_set = false;
3850 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3851 if (retval != ERROR_OK)
3852 return retval;
3854 image_size = 0x0;
3855 int diffs = 0;
3856 retval = ERROR_OK;
3857 for (unsigned int i = 0; i < image.num_sections; i++) {
3858 buffer = malloc(image.sections[i].size);
3859 if (!buffer) {
3860 command_print(CMD,
3861 "error allocating buffer for section (%" PRIu32 " bytes)",
3862 image.sections[i].size);
3863 break;
3865 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3866 if (retval != ERROR_OK) {
3867 free(buffer);
3868 break;
3871 if (verify >= IMAGE_VERIFY) {
3872 /* calculate checksum of image */
3873 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3874 if (retval != ERROR_OK) {
3875 free(buffer);
3876 break;
3879 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3880 if (retval != ERROR_OK) {
3881 free(buffer);
3882 break;
3884 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3885 LOG_ERROR("checksum mismatch");
3886 free(buffer);
3887 retval = ERROR_FAIL;
3888 goto done;
3890 if (checksum != mem_checksum) {
3891 /* failed crc checksum, fall back to a binary compare */
3892 uint8_t *data;
3894 if (diffs == 0)
3895 LOG_ERROR("checksum mismatch - attempting binary compare");
3897 data = malloc(buf_cnt);
3899 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3900 if (retval == ERROR_OK) {
3901 uint32_t t;
3902 for (t = 0; t < buf_cnt; t++) {
3903 if (data[t] != buffer[t]) {
3904 command_print(CMD,
3905 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3906 diffs,
3907 (unsigned)(t + image.sections[i].base_address),
3908 data[t],
3909 buffer[t]);
3910 if (diffs++ >= 127) {
3911 command_print(CMD, "More than 128 errors, the rest are not printed.");
3912 free(data);
3913 free(buffer);
3914 goto done;
3917 keep_alive();
3920 free(data);
3922 } else {
3923 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3924 image.sections[i].base_address,
3925 buf_cnt);
3928 free(buffer);
3929 image_size += buf_cnt;
3931 if (diffs > 0)
3932 command_print(CMD, "No more differences found.");
3933 done:
3934 if (diffs > 0)
3935 retval = ERROR_FAIL;
3936 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3937 command_print(CMD, "verified %" PRIu32 " bytes "
3938 "in %fs (%0.3f KiB/s)", image_size,
3939 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3942 image_close(&image);
3944 return retval;
3947 COMMAND_HANDLER(handle_verify_image_checksum_command)
3949 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3952 COMMAND_HANDLER(handle_verify_image_command)
3954 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3957 COMMAND_HANDLER(handle_test_image_command)
3959 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3962 static int handle_bp_command_list(struct command_invocation *cmd)
3964 struct target *target = get_current_target(cmd->ctx);
3965 struct breakpoint *breakpoint = target->breakpoints;
3966 while (breakpoint) {
3967 if (breakpoint->type == BKPT_SOFT) {
3968 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3969 breakpoint->length);
3970 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, %i, 0x%s",
3971 breakpoint->address,
3972 breakpoint->length,
3973 breakpoint->set, buf);
3974 free(buf);
3975 } else {
3976 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3977 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3978 breakpoint->asid,
3979 breakpoint->length, breakpoint->set);
3980 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3981 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3982 breakpoint->address,
3983 breakpoint->length, breakpoint->set);
3984 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3985 breakpoint->asid);
3986 } else
3987 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3988 breakpoint->address,
3989 breakpoint->length, breakpoint->set);
3992 breakpoint = breakpoint->next;
3994 return ERROR_OK;
3997 static int handle_bp_command_set(struct command_invocation *cmd,
3998 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
4000 struct target *target = get_current_target(cmd->ctx);
4001 int retval;
4003 if (asid == 0) {
4004 retval = breakpoint_add(target, addr, length, hw);
4005 /* error is always logged in breakpoint_add(), do not print it again */
4006 if (retval == ERROR_OK)
4007 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
4009 } else if (addr == 0) {
4010 if (!target->type->add_context_breakpoint) {
4011 LOG_ERROR("Context breakpoint not available");
4012 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4014 retval = context_breakpoint_add(target, asid, length, hw);
4015 /* error is always logged in context_breakpoint_add(), do not print it again */
4016 if (retval == ERROR_OK)
4017 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
4019 } else {
4020 if (!target->type->add_hybrid_breakpoint) {
4021 LOG_ERROR("Hybrid breakpoint not available");
4022 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4024 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
4025 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
4026 if (retval == ERROR_OK)
4027 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
4029 return retval;
4032 COMMAND_HANDLER(handle_bp_command)
4034 target_addr_t addr;
4035 uint32_t asid;
4036 uint32_t length;
4037 int hw = BKPT_SOFT;
4039 switch (CMD_ARGC) {
4040 case 0:
4041 return handle_bp_command_list(CMD);
4043 case 2:
4044 asid = 0;
4045 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4046 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4047 return handle_bp_command_set(CMD, addr, asid, length, hw);
4049 case 3:
4050 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4051 hw = BKPT_HARD;
4052 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4053 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4054 asid = 0;
4055 return handle_bp_command_set(CMD, addr, asid, length, hw);
4056 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4057 hw = BKPT_HARD;
4058 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4059 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4060 addr = 0;
4061 return handle_bp_command_set(CMD, addr, asid, length, hw);
4063 /* fallthrough */
4064 case 4:
4065 hw = BKPT_HARD;
4066 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4067 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4068 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4069 return handle_bp_command_set(CMD, addr, asid, length, hw);
4071 default:
4072 return ERROR_COMMAND_SYNTAX_ERROR;
4076 COMMAND_HANDLER(handle_rbp_command)
4078 if (CMD_ARGC != 1)
4079 return ERROR_COMMAND_SYNTAX_ERROR;
4081 struct target *target = get_current_target(CMD_CTX);
4083 if (!strcmp(CMD_ARGV[0], "all")) {
4084 breakpoint_remove_all(target);
4085 } else {
4086 target_addr_t addr;
4087 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4089 breakpoint_remove(target, addr);
4092 return ERROR_OK;
4095 COMMAND_HANDLER(handle_wp_command)
4097 struct target *target = get_current_target(CMD_CTX);
4099 if (CMD_ARGC == 0) {
4100 struct watchpoint *watchpoint = target->watchpoints;
4102 while (watchpoint) {
4103 command_print(CMD, "address: " TARGET_ADDR_FMT
4104 ", len: 0x%8.8" PRIx32
4105 ", r/w/a: %i, value: 0x%8.8" PRIx32
4106 ", mask: 0x%8.8" PRIx32,
4107 watchpoint->address,
4108 watchpoint->length,
4109 (int)watchpoint->rw,
4110 watchpoint->value,
4111 watchpoint->mask);
4112 watchpoint = watchpoint->next;
4114 return ERROR_OK;
4117 enum watchpoint_rw type = WPT_ACCESS;
4118 target_addr_t addr = 0;
4119 uint32_t length = 0;
4120 uint32_t data_value = 0x0;
4121 uint32_t data_mask = 0xffffffff;
4123 switch (CMD_ARGC) {
4124 case 5:
4125 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4126 /* fall through */
4127 case 4:
4128 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4129 /* fall through */
4130 case 3:
4131 switch (CMD_ARGV[2][0]) {
4132 case 'r':
4133 type = WPT_READ;
4134 break;
4135 case 'w':
4136 type = WPT_WRITE;
4137 break;
4138 case 'a':
4139 type = WPT_ACCESS;
4140 break;
4141 default:
4142 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4143 return ERROR_COMMAND_SYNTAX_ERROR;
4145 /* fall through */
4146 case 2:
4147 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4148 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4149 break;
4151 default:
4152 return ERROR_COMMAND_SYNTAX_ERROR;
4155 int retval = watchpoint_add(target, addr, length, type,
4156 data_value, data_mask);
4157 if (retval != ERROR_OK)
4158 LOG_ERROR("Failure setting watchpoints");
4160 return retval;
4163 COMMAND_HANDLER(handle_rwp_command)
4165 if (CMD_ARGC != 1)
4166 return ERROR_COMMAND_SYNTAX_ERROR;
4168 target_addr_t addr;
4169 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4171 struct target *target = get_current_target(CMD_CTX);
4172 watchpoint_remove(target, addr);
4174 return ERROR_OK;
4178 * Translate a virtual address to a physical address.
4180 * The low-level target implementation must have logged a detailed error
4181 * which is forwarded to telnet/GDB session.
4183 COMMAND_HANDLER(handle_virt2phys_command)
4185 if (CMD_ARGC != 1)
4186 return ERROR_COMMAND_SYNTAX_ERROR;
4188 target_addr_t va;
4189 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4190 target_addr_t pa;
4192 struct target *target = get_current_target(CMD_CTX);
4193 int retval = target->type->virt2phys(target, va, &pa);
4194 if (retval == ERROR_OK)
4195 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4197 return retval;
4200 static void write_data(FILE *f, const void *data, size_t len)
4202 size_t written = fwrite(data, 1, len, f);
4203 if (written != len)
4204 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4207 static void write_long(FILE *f, int l, struct target *target)
4209 uint8_t val[4];
4211 target_buffer_set_u32(target, val, l);
4212 write_data(f, val, 4);
4215 static void write_string(FILE *f, char *s)
4217 write_data(f, s, strlen(s));
4220 typedef unsigned char UNIT[2]; /* unit of profiling */
4222 /* Dump a gmon.out histogram file. */
4223 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4224 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4226 uint32_t i;
4227 FILE *f = fopen(filename, "w");
4228 if (!f)
4229 return;
4230 write_string(f, "gmon");
4231 write_long(f, 0x00000001, target); /* Version */
4232 write_long(f, 0, target); /* padding */
4233 write_long(f, 0, target); /* padding */
4234 write_long(f, 0, target); /* padding */
4236 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4237 write_data(f, &zero, 1);
4239 /* figure out bucket size */
4240 uint32_t min;
4241 uint32_t max;
4242 if (with_range) {
4243 min = start_address;
4244 max = end_address;
4245 } else {
4246 min = samples[0];
4247 max = samples[0];
4248 for (i = 0; i < sample_num; i++) {
4249 if (min > samples[i])
4250 min = samples[i];
4251 if (max < samples[i])
4252 max = samples[i];
4255 /* max should be (largest sample + 1)
4256 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4257 max++;
4260 int address_space = max - min;
4261 assert(address_space >= 2);
4263 /* FIXME: What is the reasonable number of buckets?
4264 * The profiling result will be more accurate if there are enough buckets. */
4265 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4266 uint32_t num_buckets = address_space / sizeof(UNIT);
4267 if (num_buckets > max_buckets)
4268 num_buckets = max_buckets;
4269 int *buckets = malloc(sizeof(int) * num_buckets);
4270 if (!buckets) {
4271 fclose(f);
4272 return;
4274 memset(buckets, 0, sizeof(int) * num_buckets);
4275 for (i = 0; i < sample_num; i++) {
4276 uint32_t address = samples[i];
4278 if ((address < min) || (max <= address))
4279 continue;
4281 long long a = address - min;
4282 long long b = num_buckets;
4283 long long c = address_space;
4284 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4285 buckets[index_t]++;
4288 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4289 write_long(f, min, target); /* low_pc */
4290 write_long(f, max, target); /* high_pc */
4291 write_long(f, num_buckets, target); /* # of buckets */
4292 float sample_rate = sample_num / (duration_ms / 1000.0);
4293 write_long(f, sample_rate, target);
4294 write_string(f, "seconds");
4295 for (i = 0; i < (15-strlen("seconds")); i++)
4296 write_data(f, &zero, 1);
4297 write_string(f, "s");
4299 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4301 char *data = malloc(2 * num_buckets);
4302 if (data) {
4303 for (i = 0; i < num_buckets; i++) {
4304 int val;
4305 val = buckets[i];
4306 if (val > 65535)
4307 val = 65535;
4308 data[i * 2] = val&0xff;
4309 data[i * 2 + 1] = (val >> 8) & 0xff;
4311 free(buckets);
4312 write_data(f, data, num_buckets * 2);
4313 free(data);
4314 } else
4315 free(buckets);
4317 fclose(f);
4320 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4321 * which will be used as a random sampling of PC */
4322 COMMAND_HANDLER(handle_profile_command)
4324 struct target *target = get_current_target(CMD_CTX);
4326 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4327 return ERROR_COMMAND_SYNTAX_ERROR;
4329 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4330 uint32_t offset;
4331 uint32_t num_of_samples;
4332 int retval = ERROR_OK;
4333 bool halted_before_profiling = target->state == TARGET_HALTED;
4335 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4337 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4338 if (!samples) {
4339 LOG_ERROR("No memory to store samples.");
4340 return ERROR_FAIL;
4343 uint64_t timestart_ms = timeval_ms();
4345 * Some cores let us sample the PC without the
4346 * annoying halt/resume step; for example, ARMv7 PCSR.
4347 * Provide a way to use that more efficient mechanism.
4349 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4350 &num_of_samples, offset);
4351 if (retval != ERROR_OK) {
4352 free(samples);
4353 return retval;
4355 uint32_t duration_ms = timeval_ms() - timestart_ms;
4357 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4359 retval = target_poll(target);
4360 if (retval != ERROR_OK) {
4361 free(samples);
4362 return retval;
4365 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4366 /* The target was halted before we started and is running now. Halt it,
4367 * for consistency. */
4368 retval = target_halt(target);
4369 if (retval != ERROR_OK) {
4370 free(samples);
4371 return retval;
4373 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4374 /* The target was running before we started and is halted now. Resume
4375 * it, for consistency. */
4376 retval = target_resume(target, 1, 0, 0, 0);
4377 if (retval != ERROR_OK) {
4378 free(samples);
4379 return retval;
4383 retval = target_poll(target);
4384 if (retval != ERROR_OK) {
4385 free(samples);
4386 return retval;
4389 uint32_t start_address = 0;
4390 uint32_t end_address = 0;
4391 bool with_range = false;
4392 if (CMD_ARGC == 4) {
4393 with_range = true;
4394 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4395 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4398 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4399 with_range, start_address, end_address, target, duration_ms);
4400 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4402 free(samples);
4403 return retval;
4406 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4408 char *namebuf;
4409 Jim_Obj *obj_name, *obj_val;
4410 int result;
4412 namebuf = alloc_printf("%s(%d)", varname, idx);
4413 if (!namebuf)
4414 return JIM_ERR;
4416 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4417 jim_wide wide_val = val;
4418 obj_val = Jim_NewWideObj(interp, wide_val);
4419 if (!obj_name || !obj_val) {
4420 free(namebuf);
4421 return JIM_ERR;
4424 Jim_IncrRefCount(obj_name);
4425 Jim_IncrRefCount(obj_val);
4426 result = Jim_SetVariable(interp, obj_name, obj_val);
4427 Jim_DecrRefCount(interp, obj_name);
4428 Jim_DecrRefCount(interp, obj_val);
4429 free(namebuf);
4430 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4431 return result;
4434 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4436 struct command_context *context;
4437 struct target *target;
4439 context = current_command_context(interp);
4440 assert(context);
4442 target = get_current_target(context);
4443 if (!target) {
4444 LOG_ERROR("mem2array: no current target");
4445 return JIM_ERR;
4448 return target_mem2array(interp, target, argc - 1, argv + 1);
4451 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4453 int e;
4455 /* argv[0] = name of array to receive the data
4456 * argv[1] = desired element width in bits
4457 * argv[2] = memory address
4458 * argv[3] = count of times to read
4459 * argv[4] = optional "phys"
4461 if (argc < 4 || argc > 5) {
4462 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4463 return JIM_ERR;
4466 /* Arg 0: Name of the array variable */
4467 const char *varname = Jim_GetString(argv[0], NULL);
4469 /* Arg 1: Bit width of one element */
4470 long l;
4471 e = Jim_GetLong(interp, argv[1], &l);
4472 if (e != JIM_OK)
4473 return e;
4474 const unsigned int width_bits = l;
4476 if (width_bits != 8 &&
4477 width_bits != 16 &&
4478 width_bits != 32 &&
4479 width_bits != 64) {
4480 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4481 Jim_AppendStrings(interp, Jim_GetResult(interp),
4482 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4483 return JIM_ERR;
4485 const unsigned int width = width_bits / 8;
4487 /* Arg 2: Memory address */
4488 jim_wide wide_addr;
4489 e = Jim_GetWide(interp, argv[2], &wide_addr);
4490 if (e != JIM_OK)
4491 return e;
4492 target_addr_t addr = (target_addr_t)wide_addr;
4494 /* Arg 3: Number of elements to read */
4495 e = Jim_GetLong(interp, argv[3], &l);
4496 if (e != JIM_OK)
4497 return e;
4498 size_t len = l;
4500 /* Arg 4: phys */
4501 bool is_phys = false;
4502 if (argc > 4) {
4503 int str_len = 0;
4504 const char *phys = Jim_GetString(argv[4], &str_len);
4505 if (!strncmp(phys, "phys", str_len))
4506 is_phys = true;
4507 else
4508 return JIM_ERR;
4511 /* Argument checks */
4512 if (len == 0) {
4513 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4514 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4515 return JIM_ERR;
4517 if ((addr + (len * width)) < addr) {
4518 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4519 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4520 return JIM_ERR;
4522 if (len > 65536) {
4523 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4524 Jim_AppendStrings(interp, Jim_GetResult(interp),
4525 "mem2array: too large read request, exceeds 64K items", NULL);
4526 return JIM_ERR;
4529 if ((width == 1) ||
4530 ((width == 2) && ((addr & 1) == 0)) ||
4531 ((width == 4) && ((addr & 3) == 0)) ||
4532 ((width == 8) && ((addr & 7) == 0))) {
4533 /* alignment correct */
4534 } else {
4535 char buf[100];
4536 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4537 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4538 addr,
4539 width);
4540 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4541 return JIM_ERR;
4544 /* Transfer loop */
4546 /* index counter */
4547 size_t idx = 0;
4549 const size_t buffersize = 4096;
4550 uint8_t *buffer = malloc(buffersize);
4551 if (!buffer)
4552 return JIM_ERR;
4554 /* assume ok */
4555 e = JIM_OK;
4556 while (len) {
4557 /* Slurp... in buffer size chunks */
4558 const unsigned int max_chunk_len = buffersize / width;
4559 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4561 int retval;
4562 if (is_phys)
4563 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4564 else
4565 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4566 if (retval != ERROR_OK) {
4567 /* BOO !*/
4568 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4569 addr,
4570 width,
4571 chunk_len);
4572 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4573 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4574 e = JIM_ERR;
4575 break;
4576 } else {
4577 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4578 uint64_t v = 0;
4579 switch (width) {
4580 case 8:
4581 v = target_buffer_get_u64(target, &buffer[i*width]);
4582 break;
4583 case 4:
4584 v = target_buffer_get_u32(target, &buffer[i*width]);
4585 break;
4586 case 2:
4587 v = target_buffer_get_u16(target, &buffer[i*width]);
4588 break;
4589 case 1:
4590 v = buffer[i] & 0x0ff;
4591 break;
4593 new_u64_array_element(interp, varname, idx, v);
4595 len -= chunk_len;
4596 addr += chunk_len * width;
4600 free(buffer);
4602 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4604 return e;
4607 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4609 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4610 if (!namebuf)
4611 return JIM_ERR;
4613 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4614 if (!obj_name) {
4615 free(namebuf);
4616 return JIM_ERR;
4619 Jim_IncrRefCount(obj_name);
4620 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4621 Jim_DecrRefCount(interp, obj_name);
4622 free(namebuf);
4623 if (!obj_val)
4624 return JIM_ERR;
4626 jim_wide wide_val;
4627 int result = Jim_GetWide(interp, obj_val, &wide_val);
4628 *val = wide_val;
4629 return result;
4632 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4634 struct command_context *context;
4635 struct target *target;
4637 context = current_command_context(interp);
4638 assert(context);
4640 target = get_current_target(context);
4641 if (!target) {
4642 LOG_ERROR("array2mem: no current target");
4643 return JIM_ERR;
4646 return target_array2mem(interp, target, argc-1, argv + 1);
4649 static int target_array2mem(Jim_Interp *interp, struct target *target,
4650 int argc, Jim_Obj *const *argv)
4652 int e;
4654 /* argv[0] = name of array from which to read the data
4655 * argv[1] = desired element width in bits
4656 * argv[2] = memory address
4657 * argv[3] = number of elements to write
4658 * argv[4] = optional "phys"
4660 if (argc < 4 || argc > 5) {
4661 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4662 return JIM_ERR;
4665 /* Arg 0: Name of the array variable */
4666 const char *varname = Jim_GetString(argv[0], NULL);
4668 /* Arg 1: Bit width of one element */
4669 long l;
4670 e = Jim_GetLong(interp, argv[1], &l);
4671 if (e != JIM_OK)
4672 return e;
4673 const unsigned int width_bits = l;
4675 if (width_bits != 8 &&
4676 width_bits != 16 &&
4677 width_bits != 32 &&
4678 width_bits != 64) {
4679 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4680 Jim_AppendStrings(interp, Jim_GetResult(interp),
4681 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4682 return JIM_ERR;
4684 const unsigned int width = width_bits / 8;
4686 /* Arg 2: Memory address */
4687 jim_wide wide_addr;
4688 e = Jim_GetWide(interp, argv[2], &wide_addr);
4689 if (e != JIM_OK)
4690 return e;
4691 target_addr_t addr = (target_addr_t)wide_addr;
4693 /* Arg 3: Number of elements to write */
4694 e = Jim_GetLong(interp, argv[3], &l);
4695 if (e != JIM_OK)
4696 return e;
4697 size_t len = l;
4699 /* Arg 4: Phys */
4700 bool is_phys = false;
4701 if (argc > 4) {
4702 int str_len = 0;
4703 const char *phys = Jim_GetString(argv[4], &str_len);
4704 if (!strncmp(phys, "phys", str_len))
4705 is_phys = true;
4706 else
4707 return JIM_ERR;
4710 /* Argument checks */
4711 if (len == 0) {
4712 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4713 Jim_AppendStrings(interp, Jim_GetResult(interp),
4714 "array2mem: zero width read?", NULL);
4715 return JIM_ERR;
4718 if ((addr + (len * width)) < addr) {
4719 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4720 Jim_AppendStrings(interp, Jim_GetResult(interp),
4721 "array2mem: addr + len - wraps to zero?", NULL);
4722 return JIM_ERR;
4725 if (len > 65536) {
4726 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4727 Jim_AppendStrings(interp, Jim_GetResult(interp),
4728 "array2mem: too large memory write request, exceeds 64K items", NULL);
4729 return JIM_ERR;
4732 if ((width == 1) ||
4733 ((width == 2) && ((addr & 1) == 0)) ||
4734 ((width == 4) && ((addr & 3) == 0)) ||
4735 ((width == 8) && ((addr & 7) == 0))) {
4736 /* alignment correct */
4737 } else {
4738 char buf[100];
4739 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4740 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4741 addr,
4742 width);
4743 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4744 return JIM_ERR;
4747 /* Transfer loop */
4749 /* assume ok */
4750 e = JIM_OK;
4752 const size_t buffersize = 4096;
4753 uint8_t *buffer = malloc(buffersize);
4754 if (!buffer)
4755 return JIM_ERR;
4757 /* index counter */
4758 size_t idx = 0;
4760 while (len) {
4761 /* Slurp... in buffer size chunks */
4762 const unsigned int max_chunk_len = buffersize / width;
4764 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4766 /* Fill the buffer */
4767 for (size_t i = 0; i < chunk_len; i++, idx++) {
4768 uint64_t v = 0;
4769 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4770 free(buffer);
4771 return JIM_ERR;
4773 switch (width) {
4774 case 8:
4775 target_buffer_set_u64(target, &buffer[i * width], v);
4776 break;
4777 case 4:
4778 target_buffer_set_u32(target, &buffer[i * width], v);
4779 break;
4780 case 2:
4781 target_buffer_set_u16(target, &buffer[i * width], v);
4782 break;
4783 case 1:
4784 buffer[i] = v & 0x0ff;
4785 break;
4788 len -= chunk_len;
4790 /* Write the buffer to memory */
4791 int retval;
4792 if (is_phys)
4793 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4794 else
4795 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4796 if (retval != ERROR_OK) {
4797 /* BOO !*/
4798 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4799 addr,
4800 width,
4801 chunk_len);
4802 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4803 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4804 e = JIM_ERR;
4805 break;
4807 addr += chunk_len * width;
4810 free(buffer);
4812 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4814 return e;
4817 /* FIX? should we propagate errors here rather than printing them
4818 * and continuing?
4820 void target_handle_event(struct target *target, enum target_event e)
4822 struct target_event_action *teap;
4823 int retval;
4825 for (teap = target->event_action; teap; teap = teap->next) {
4826 if (teap->event == e) {
4827 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
4828 target->target_number,
4829 target_name(target),
4830 target_type_name(target),
4832 target_event_name(e),
4833 Jim_GetString(teap->body, NULL));
4835 /* Override current target by the target an event
4836 * is issued from (lot of scripts need it).
4837 * Return back to previous override as soon
4838 * as the handler processing is done */
4839 struct command_context *cmd_ctx = current_command_context(teap->interp);
4840 struct target *saved_target_override = cmd_ctx->current_target_override;
4841 cmd_ctx->current_target_override = target;
4843 retval = Jim_EvalObj(teap->interp, teap->body);
4845 cmd_ctx->current_target_override = saved_target_override;
4847 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
4848 return;
4850 if (retval == JIM_RETURN)
4851 retval = teap->interp->returnCode;
4853 if (retval != JIM_OK) {
4854 Jim_MakeErrorMessage(teap->interp);
4855 LOG_USER("Error executing event %s on target %s:\n%s",
4856 target_event_name(e),
4857 target_name(target),
4858 Jim_GetString(Jim_GetResult(teap->interp), NULL));
4859 /* clean both error code and stacktrace before return */
4860 Jim_Eval(teap->interp, "error \"\" \"\"");
4866 static int target_jim_get_reg(Jim_Interp *interp, int argc,
4867 Jim_Obj * const *argv)
4869 bool force = false;
4871 if (argc == 3) {
4872 const char *option = Jim_GetString(argv[1], NULL);
4874 if (!strcmp(option, "-force")) {
4875 argc--;
4876 argv++;
4877 force = true;
4878 } else {
4879 Jim_SetResultFormatted(interp, "invalid option '%s'", option);
4880 return JIM_ERR;
4884 if (argc != 2) {
4885 Jim_WrongNumArgs(interp, 1, argv, "[-force] list");
4886 return JIM_ERR;
4889 const int length = Jim_ListLength(interp, argv[1]);
4891 Jim_Obj *result_dict = Jim_NewDictObj(interp, NULL, 0);
4893 if (!result_dict)
4894 return JIM_ERR;
4896 struct command_context *cmd_ctx = current_command_context(interp);
4897 assert(cmd_ctx != NULL);
4898 const struct target *target = get_current_target(cmd_ctx);
4900 for (int i = 0; i < length; i++) {
4901 Jim_Obj *elem = Jim_ListGetIndex(interp, argv[1], i);
4903 if (!elem)
4904 return JIM_ERR;
4906 const char *reg_name = Jim_String(elem);
4908 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
4909 false);
4911 if (!reg || !reg->exist) {
4912 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
4913 return JIM_ERR;
4916 if (force) {
4917 int retval = reg->type->get(reg);
4919 if (retval != ERROR_OK) {
4920 Jim_SetResultFormatted(interp, "failed to read register '%s'",
4921 reg_name);
4922 return JIM_ERR;
4926 char *reg_value = buf_to_hex_str(reg->value, reg->size);
4928 if (!reg_value) {
4929 LOG_ERROR("Failed to allocate memory");
4930 return JIM_ERR;
4933 char *tmp = alloc_printf("0x%s", reg_value);
4935 free(reg_value);
4937 if (!tmp) {
4938 LOG_ERROR("Failed to allocate memory");
4939 return JIM_ERR;
4942 Jim_DictAddElement(interp, result_dict, elem,
4943 Jim_NewStringObj(interp, tmp, -1));
4945 free(tmp);
4948 Jim_SetResult(interp, result_dict);
4950 return JIM_OK;
4953 static int target_jim_set_reg(Jim_Interp *interp, int argc,
4954 Jim_Obj * const *argv)
4956 if (argc != 2) {
4957 Jim_WrongNumArgs(interp, 1, argv, "dict");
4958 return JIM_ERR;
4961 int tmp;
4962 Jim_Obj **dict = Jim_DictPairs(interp, argv[1], &tmp);
4964 if (!dict)
4965 return JIM_ERR;
4967 const unsigned int length = tmp;
4968 struct command_context *cmd_ctx = current_command_context(interp);
4969 assert(cmd_ctx);
4970 const struct target *target = get_current_target(cmd_ctx);
4972 for (unsigned int i = 0; i < length; i += 2) {
4973 const char *reg_name = Jim_String(dict[i]);
4974 const char *reg_value = Jim_String(dict[i + 1]);
4975 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
4976 false);
4978 if (!reg || !reg->exist) {
4979 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
4980 return JIM_ERR;
4983 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
4985 if (!buf) {
4986 LOG_ERROR("Failed to allocate memory");
4987 return JIM_ERR;
4990 str_to_buf(reg_value, strlen(reg_value), buf, reg->size, 0);
4991 int retval = reg->type->set(reg, buf);
4992 free(buf);
4994 if (retval != ERROR_OK) {
4995 Jim_SetResultFormatted(interp, "failed to set '%s' to register '%s'",
4996 reg_value, reg_name);
4997 return JIM_ERR;
5001 return JIM_OK;
5005 * Returns true only if the target has a handler for the specified event.
5007 bool target_has_event_action(struct target *target, enum target_event event)
5009 struct target_event_action *teap;
5011 for (teap = target->event_action; teap; teap = teap->next) {
5012 if (teap->event == event)
5013 return true;
5015 return false;
5018 enum target_cfg_param {
5019 TCFG_TYPE,
5020 TCFG_EVENT,
5021 TCFG_WORK_AREA_VIRT,
5022 TCFG_WORK_AREA_PHYS,
5023 TCFG_WORK_AREA_SIZE,
5024 TCFG_WORK_AREA_BACKUP,
5025 TCFG_ENDIAN,
5026 TCFG_COREID,
5027 TCFG_CHAIN_POSITION,
5028 TCFG_DBGBASE,
5029 TCFG_RTOS,
5030 TCFG_DEFER_EXAMINE,
5031 TCFG_GDB_PORT,
5032 TCFG_GDB_MAX_CONNECTIONS,
5035 static struct jim_nvp nvp_config_opts[] = {
5036 { .name = "-type", .value = TCFG_TYPE },
5037 { .name = "-event", .value = TCFG_EVENT },
5038 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
5039 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
5040 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
5041 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
5042 { .name = "-endian", .value = TCFG_ENDIAN },
5043 { .name = "-coreid", .value = TCFG_COREID },
5044 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
5045 { .name = "-dbgbase", .value = TCFG_DBGBASE },
5046 { .name = "-rtos", .value = TCFG_RTOS },
5047 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
5048 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
5049 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
5050 { .name = NULL, .value = -1 }
5053 static int target_configure(struct jim_getopt_info *goi, struct target *target)
5055 struct jim_nvp *n;
5056 Jim_Obj *o;
5057 jim_wide w;
5058 int e;
5060 /* parse config or cget options ... */
5061 while (goi->argc > 0) {
5062 Jim_SetEmptyResult(goi->interp);
5063 /* jim_getopt_debug(goi); */
5065 if (target->type->target_jim_configure) {
5066 /* target defines a configure function */
5067 /* target gets first dibs on parameters */
5068 e = (*(target->type->target_jim_configure))(target, goi);
5069 if (e == JIM_OK) {
5070 /* more? */
5071 continue;
5073 if (e == JIM_ERR) {
5074 /* An error */
5075 return e;
5077 /* otherwise we 'continue' below */
5079 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
5080 if (e != JIM_OK) {
5081 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
5082 return e;
5084 switch (n->value) {
5085 case TCFG_TYPE:
5086 /* not settable */
5087 if (goi->isconfigure) {
5088 Jim_SetResultFormatted(goi->interp,
5089 "not settable: %s", n->name);
5090 return JIM_ERR;
5091 } else {
5092 no_params:
5093 if (goi->argc != 0) {
5094 Jim_WrongNumArgs(goi->interp,
5095 goi->argc, goi->argv,
5096 "NO PARAMS");
5097 return JIM_ERR;
5100 Jim_SetResultString(goi->interp,
5101 target_type_name(target), -1);
5102 /* loop for more */
5103 break;
5104 case TCFG_EVENT:
5105 if (goi->argc == 0) {
5106 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
5107 return JIM_ERR;
5110 e = jim_getopt_nvp(goi, nvp_target_event, &n);
5111 if (e != JIM_OK) {
5112 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
5113 return e;
5116 if (goi->isconfigure) {
5117 if (goi->argc != 1) {
5118 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
5119 return JIM_ERR;
5121 } else {
5122 if (goi->argc != 0) {
5123 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
5124 return JIM_ERR;
5129 struct target_event_action *teap;
5131 teap = target->event_action;
5132 /* replace existing? */
5133 while (teap) {
5134 if (teap->event == (enum target_event)n->value)
5135 break;
5136 teap = teap->next;
5139 if (goi->isconfigure) {
5140 /* START_DEPRECATED_TPIU */
5141 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5142 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5143 /* END_DEPRECATED_TPIU */
5145 bool replace = true;
5146 if (!teap) {
5147 /* create new */
5148 teap = calloc(1, sizeof(*teap));
5149 replace = false;
5151 teap->event = n->value;
5152 teap->interp = goi->interp;
5153 jim_getopt_obj(goi, &o);
5154 if (teap->body)
5155 Jim_DecrRefCount(teap->interp, teap->body);
5156 teap->body = Jim_DuplicateObj(goi->interp, o);
5158 * FIXME:
5159 * Tcl/TK - "tk events" have a nice feature.
5160 * See the "BIND" command.
5161 * We should support that here.
5162 * You can specify %X and %Y in the event code.
5163 * The idea is: %T - target name.
5164 * The idea is: %N - target number
5165 * The idea is: %E - event name.
5167 Jim_IncrRefCount(teap->body);
5169 if (!replace) {
5170 /* add to head of event list */
5171 teap->next = target->event_action;
5172 target->event_action = teap;
5174 Jim_SetEmptyResult(goi->interp);
5175 } else {
5176 /* get */
5177 if (!teap)
5178 Jim_SetEmptyResult(goi->interp);
5179 else
5180 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5183 /* loop for more */
5184 break;
5186 case TCFG_WORK_AREA_VIRT:
5187 if (goi->isconfigure) {
5188 target_free_all_working_areas(target);
5189 e = jim_getopt_wide(goi, &w);
5190 if (e != JIM_OK)
5191 return e;
5192 target->working_area_virt = w;
5193 target->working_area_virt_spec = true;
5194 } else {
5195 if (goi->argc != 0)
5196 goto no_params;
5198 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5199 /* loop for more */
5200 break;
5202 case TCFG_WORK_AREA_PHYS:
5203 if (goi->isconfigure) {
5204 target_free_all_working_areas(target);
5205 e = jim_getopt_wide(goi, &w);
5206 if (e != JIM_OK)
5207 return e;
5208 target->working_area_phys = w;
5209 target->working_area_phys_spec = true;
5210 } else {
5211 if (goi->argc != 0)
5212 goto no_params;
5214 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5215 /* loop for more */
5216 break;
5218 case TCFG_WORK_AREA_SIZE:
5219 if (goi->isconfigure) {
5220 target_free_all_working_areas(target);
5221 e = jim_getopt_wide(goi, &w);
5222 if (e != JIM_OK)
5223 return e;
5224 target->working_area_size = w;
5225 } else {
5226 if (goi->argc != 0)
5227 goto no_params;
5229 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5230 /* loop for more */
5231 break;
5233 case TCFG_WORK_AREA_BACKUP:
5234 if (goi->isconfigure) {
5235 target_free_all_working_areas(target);
5236 e = jim_getopt_wide(goi, &w);
5237 if (e != JIM_OK)
5238 return e;
5239 /* make this exactly 1 or 0 */
5240 target->backup_working_area = (!!w);
5241 } else {
5242 if (goi->argc != 0)
5243 goto no_params;
5245 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5246 /* loop for more e*/
5247 break;
5250 case TCFG_ENDIAN:
5251 if (goi->isconfigure) {
5252 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5253 if (e != JIM_OK) {
5254 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5255 return e;
5257 target->endianness = n->value;
5258 } else {
5259 if (goi->argc != 0)
5260 goto no_params;
5262 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5263 if (!n->name) {
5264 target->endianness = TARGET_LITTLE_ENDIAN;
5265 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5267 Jim_SetResultString(goi->interp, n->name, -1);
5268 /* loop for more */
5269 break;
5271 case TCFG_COREID:
5272 if (goi->isconfigure) {
5273 e = jim_getopt_wide(goi, &w);
5274 if (e != JIM_OK)
5275 return e;
5276 target->coreid = (int32_t)w;
5277 } else {
5278 if (goi->argc != 0)
5279 goto no_params;
5281 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5282 /* loop for more */
5283 break;
5285 case TCFG_CHAIN_POSITION:
5286 if (goi->isconfigure) {
5287 Jim_Obj *o_t;
5288 struct jtag_tap *tap;
5290 if (target->has_dap) {
5291 Jim_SetResultString(goi->interp,
5292 "target requires -dap parameter instead of -chain-position!", -1);
5293 return JIM_ERR;
5296 target_free_all_working_areas(target);
5297 e = jim_getopt_obj(goi, &o_t);
5298 if (e != JIM_OK)
5299 return e;
5300 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5301 if (!tap)
5302 return JIM_ERR;
5303 target->tap = tap;
5304 target->tap_configured = true;
5305 } else {
5306 if (goi->argc != 0)
5307 goto no_params;
5309 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5310 /* loop for more e*/
5311 break;
5312 case TCFG_DBGBASE:
5313 if (goi->isconfigure) {
5314 e = jim_getopt_wide(goi, &w);
5315 if (e != JIM_OK)
5316 return e;
5317 target->dbgbase = (uint32_t)w;
5318 target->dbgbase_set = true;
5319 } else {
5320 if (goi->argc != 0)
5321 goto no_params;
5323 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5324 /* loop for more */
5325 break;
5326 case TCFG_RTOS:
5327 /* RTOS */
5329 int result = rtos_create(goi, target);
5330 if (result != JIM_OK)
5331 return result;
5333 /* loop for more */
5334 break;
5336 case TCFG_DEFER_EXAMINE:
5337 /* DEFER_EXAMINE */
5338 target->defer_examine = true;
5339 /* loop for more */
5340 break;
5342 case TCFG_GDB_PORT:
5343 if (goi->isconfigure) {
5344 struct command_context *cmd_ctx = current_command_context(goi->interp);
5345 if (cmd_ctx->mode != COMMAND_CONFIG) {
5346 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5347 return JIM_ERR;
5350 const char *s;
5351 e = jim_getopt_string(goi, &s, NULL);
5352 if (e != JIM_OK)
5353 return e;
5354 free(target->gdb_port_override);
5355 target->gdb_port_override = strdup(s);
5356 } else {
5357 if (goi->argc != 0)
5358 goto no_params;
5360 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5361 /* loop for more */
5362 break;
5364 case TCFG_GDB_MAX_CONNECTIONS:
5365 if (goi->isconfigure) {
5366 struct command_context *cmd_ctx = current_command_context(goi->interp);
5367 if (cmd_ctx->mode != COMMAND_CONFIG) {
5368 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5369 return JIM_ERR;
5372 e = jim_getopt_wide(goi, &w);
5373 if (e != JIM_OK)
5374 return e;
5375 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5376 } else {
5377 if (goi->argc != 0)
5378 goto no_params;
5380 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5381 break;
5383 } /* while (goi->argc) */
5386 /* done - we return */
5387 return JIM_OK;
5390 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5392 struct command *c = jim_to_command(interp);
5393 struct jim_getopt_info goi;
5395 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5396 goi.isconfigure = !strcmp(c->name, "configure");
5397 if (goi.argc < 1) {
5398 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5399 "missing: -option ...");
5400 return JIM_ERR;
5402 struct command_context *cmd_ctx = current_command_context(interp);
5403 assert(cmd_ctx);
5404 struct target *target = get_current_target(cmd_ctx);
5405 return target_configure(&goi, target);
5408 static int jim_target_mem2array(Jim_Interp *interp,
5409 int argc, Jim_Obj *const *argv)
5411 struct command_context *cmd_ctx = current_command_context(interp);
5412 assert(cmd_ctx);
5413 struct target *target = get_current_target(cmd_ctx);
5414 return target_mem2array(interp, target, argc - 1, argv + 1);
5417 static int jim_target_array2mem(Jim_Interp *interp,
5418 int argc, Jim_Obj *const *argv)
5420 struct command_context *cmd_ctx = current_command_context(interp);
5421 assert(cmd_ctx);
5422 struct target *target = get_current_target(cmd_ctx);
5423 return target_array2mem(interp, target, argc - 1, argv + 1);
5426 static int jim_target_tap_disabled(Jim_Interp *interp)
5428 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5429 return JIM_ERR;
5432 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5434 bool allow_defer = false;
5436 struct jim_getopt_info goi;
5437 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5438 if (goi.argc > 1) {
5439 const char *cmd_name = Jim_GetString(argv[0], NULL);
5440 Jim_SetResultFormatted(goi.interp,
5441 "usage: %s ['allow-defer']", cmd_name);
5442 return JIM_ERR;
5444 if (goi.argc > 0 &&
5445 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5446 /* consume it */
5447 Jim_Obj *obj;
5448 int e = jim_getopt_obj(&goi, &obj);
5449 if (e != JIM_OK)
5450 return e;
5451 allow_defer = true;
5454 struct command_context *cmd_ctx = current_command_context(interp);
5455 assert(cmd_ctx);
5456 struct target *target = get_current_target(cmd_ctx);
5457 if (!target->tap->enabled)
5458 return jim_target_tap_disabled(interp);
5460 if (allow_defer && target->defer_examine) {
5461 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5462 LOG_INFO("Use arp_examine command to examine it manually!");
5463 return JIM_OK;
5466 int e = target->type->examine(target);
5467 if (e != ERROR_OK) {
5468 target_reset_examined(target);
5469 return JIM_ERR;
5472 target_set_examined(target);
5474 return JIM_OK;
5477 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5479 struct command_context *cmd_ctx = current_command_context(interp);
5480 assert(cmd_ctx);
5481 struct target *target = get_current_target(cmd_ctx);
5483 Jim_SetResultBool(interp, target_was_examined(target));
5484 return JIM_OK;
5487 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5489 struct command_context *cmd_ctx = current_command_context(interp);
5490 assert(cmd_ctx);
5491 struct target *target = get_current_target(cmd_ctx);
5493 Jim_SetResultBool(interp, target->defer_examine);
5494 return JIM_OK;
5497 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5499 if (argc != 1) {
5500 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5501 return JIM_ERR;
5503 struct command_context *cmd_ctx = current_command_context(interp);
5504 assert(cmd_ctx);
5505 struct target *target = get_current_target(cmd_ctx);
5507 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5508 return JIM_ERR;
5510 return JIM_OK;
5513 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5515 if (argc != 1) {
5516 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5517 return JIM_ERR;
5519 struct command_context *cmd_ctx = current_command_context(interp);
5520 assert(cmd_ctx);
5521 struct target *target = get_current_target(cmd_ctx);
5522 if (!target->tap->enabled)
5523 return jim_target_tap_disabled(interp);
5525 int e;
5526 if (!(target_was_examined(target)))
5527 e = ERROR_TARGET_NOT_EXAMINED;
5528 else
5529 e = target->type->poll(target);
5530 if (e != ERROR_OK)
5531 return JIM_ERR;
5532 return JIM_OK;
5535 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5537 struct jim_getopt_info goi;
5538 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5540 if (goi.argc != 2) {
5541 Jim_WrongNumArgs(interp, 0, argv,
5542 "([tT]|[fF]|assert|deassert) BOOL");
5543 return JIM_ERR;
5546 struct jim_nvp *n;
5547 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5548 if (e != JIM_OK) {
5549 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5550 return e;
5552 /* the halt or not param */
5553 jim_wide a;
5554 e = jim_getopt_wide(&goi, &a);
5555 if (e != JIM_OK)
5556 return e;
5558 struct command_context *cmd_ctx = current_command_context(interp);
5559 assert(cmd_ctx);
5560 struct target *target = get_current_target(cmd_ctx);
5561 if (!target->tap->enabled)
5562 return jim_target_tap_disabled(interp);
5564 if (!target->type->assert_reset || !target->type->deassert_reset) {
5565 Jim_SetResultFormatted(interp,
5566 "No target-specific reset for %s",
5567 target_name(target));
5568 return JIM_ERR;
5571 if (target->defer_examine)
5572 target_reset_examined(target);
5574 /* determine if we should halt or not. */
5575 target->reset_halt = (a != 0);
5576 /* When this happens - all workareas are invalid. */
5577 target_free_all_working_areas_restore(target, 0);
5579 /* do the assert */
5580 if (n->value == NVP_ASSERT)
5581 e = target->type->assert_reset(target);
5582 else
5583 e = target->type->deassert_reset(target);
5584 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5587 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5589 if (argc != 1) {
5590 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5591 return JIM_ERR;
5593 struct command_context *cmd_ctx = current_command_context(interp);
5594 assert(cmd_ctx);
5595 struct target *target = get_current_target(cmd_ctx);
5596 if (!target->tap->enabled)
5597 return jim_target_tap_disabled(interp);
5598 int e = target->type->halt(target);
5599 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5602 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5604 struct jim_getopt_info goi;
5605 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5607 /* params: <name> statename timeoutmsecs */
5608 if (goi.argc != 2) {
5609 const char *cmd_name = Jim_GetString(argv[0], NULL);
5610 Jim_SetResultFormatted(goi.interp,
5611 "%s <state_name> <timeout_in_msec>", cmd_name);
5612 return JIM_ERR;
5615 struct jim_nvp *n;
5616 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5617 if (e != JIM_OK) {
5618 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5619 return e;
5621 jim_wide a;
5622 e = jim_getopt_wide(&goi, &a);
5623 if (e != JIM_OK)
5624 return e;
5625 struct command_context *cmd_ctx = current_command_context(interp);
5626 assert(cmd_ctx);
5627 struct target *target = get_current_target(cmd_ctx);
5628 if (!target->tap->enabled)
5629 return jim_target_tap_disabled(interp);
5631 e = target_wait_state(target, n->value, a);
5632 if (e != ERROR_OK) {
5633 Jim_Obj *obj = Jim_NewIntObj(interp, e);
5634 Jim_SetResultFormatted(goi.interp,
5635 "target: %s wait %s fails (%#s) %s",
5636 target_name(target), n->name,
5637 obj, target_strerror_safe(e));
5638 return JIM_ERR;
5640 return JIM_OK;
5642 /* List for human, Events defined for this target.
5643 * scripts/programs should use 'name cget -event NAME'
5645 COMMAND_HANDLER(handle_target_event_list)
5647 struct target *target = get_current_target(CMD_CTX);
5648 struct target_event_action *teap = target->event_action;
5650 command_print(CMD, "Event actions for target (%d) %s\n",
5651 target->target_number,
5652 target_name(target));
5653 command_print(CMD, "%-25s | Body", "Event");
5654 command_print(CMD, "------------------------- | "
5655 "----------------------------------------");
5656 while (teap) {
5657 command_print(CMD, "%-25s | %s",
5658 target_event_name(teap->event),
5659 Jim_GetString(teap->body, NULL));
5660 teap = teap->next;
5662 command_print(CMD, "***END***");
5663 return ERROR_OK;
5665 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5667 if (argc != 1) {
5668 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5669 return JIM_ERR;
5671 struct command_context *cmd_ctx = current_command_context(interp);
5672 assert(cmd_ctx);
5673 struct target *target = get_current_target(cmd_ctx);
5674 Jim_SetResultString(interp, target_state_name(target), -1);
5675 return JIM_OK;
5677 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5679 struct jim_getopt_info goi;
5680 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5681 if (goi.argc != 1) {
5682 const char *cmd_name = Jim_GetString(argv[0], NULL);
5683 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5684 return JIM_ERR;
5686 struct jim_nvp *n;
5687 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5688 if (e != JIM_OK) {
5689 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5690 return e;
5692 struct command_context *cmd_ctx = current_command_context(interp);
5693 assert(cmd_ctx);
5694 struct target *target = get_current_target(cmd_ctx);
5695 target_handle_event(target, n->value);
5696 return JIM_OK;
5699 static const struct command_registration target_instance_command_handlers[] = {
5701 .name = "configure",
5702 .mode = COMMAND_ANY,
5703 .jim_handler = jim_target_configure,
5704 .help = "configure a new target for use",
5705 .usage = "[target_attribute ...]",
5708 .name = "cget",
5709 .mode = COMMAND_ANY,
5710 .jim_handler = jim_target_configure,
5711 .help = "returns the specified target attribute",
5712 .usage = "target_attribute",
5715 .name = "mwd",
5716 .handler = handle_mw_command,
5717 .mode = COMMAND_EXEC,
5718 .help = "Write 64-bit word(s) to target memory",
5719 .usage = "address data [count]",
5722 .name = "mww",
5723 .handler = handle_mw_command,
5724 .mode = COMMAND_EXEC,
5725 .help = "Write 32-bit word(s) to target memory",
5726 .usage = "address data [count]",
5729 .name = "mwh",
5730 .handler = handle_mw_command,
5731 .mode = COMMAND_EXEC,
5732 .help = "Write 16-bit half-word(s) to target memory",
5733 .usage = "address data [count]",
5736 .name = "mwb",
5737 .handler = handle_mw_command,
5738 .mode = COMMAND_EXEC,
5739 .help = "Write byte(s) to target memory",
5740 .usage = "address data [count]",
5743 .name = "mdd",
5744 .handler = handle_md_command,
5745 .mode = COMMAND_EXEC,
5746 .help = "Display target memory as 64-bit words",
5747 .usage = "address [count]",
5750 .name = "mdw",
5751 .handler = handle_md_command,
5752 .mode = COMMAND_EXEC,
5753 .help = "Display target memory as 32-bit words",
5754 .usage = "address [count]",
5757 .name = "mdh",
5758 .handler = handle_md_command,
5759 .mode = COMMAND_EXEC,
5760 .help = "Display target memory as 16-bit half-words",
5761 .usage = "address [count]",
5764 .name = "mdb",
5765 .handler = handle_md_command,
5766 .mode = COMMAND_EXEC,
5767 .help = "Display target memory as 8-bit bytes",
5768 .usage = "address [count]",
5771 .name = "array2mem",
5772 .mode = COMMAND_EXEC,
5773 .jim_handler = jim_target_array2mem,
5774 .help = "Writes Tcl array of 8/16/32 bit numbers "
5775 "to target memory",
5776 .usage = "arrayname bitwidth address count",
5779 .name = "mem2array",
5780 .mode = COMMAND_EXEC,
5781 .jim_handler = jim_target_mem2array,
5782 .help = "Loads Tcl array of 8/16/32 bit numbers "
5783 "from target memory",
5784 .usage = "arrayname bitwidth address count",
5787 .name = "get_reg",
5788 .mode = COMMAND_EXEC,
5789 .jim_handler = target_jim_get_reg,
5790 .help = "Get register values from the target",
5791 .usage = "list",
5794 .name = "set_reg",
5795 .mode = COMMAND_EXEC,
5796 .jim_handler = target_jim_set_reg,
5797 .help = "Set target register values",
5798 .usage = "dict",
5801 .name = "eventlist",
5802 .handler = handle_target_event_list,
5803 .mode = COMMAND_EXEC,
5804 .help = "displays a table of events defined for this target",
5805 .usage = "",
5808 .name = "curstate",
5809 .mode = COMMAND_EXEC,
5810 .jim_handler = jim_target_current_state,
5811 .help = "displays the current state of this target",
5814 .name = "arp_examine",
5815 .mode = COMMAND_EXEC,
5816 .jim_handler = jim_target_examine,
5817 .help = "used internally for reset processing",
5818 .usage = "['allow-defer']",
5821 .name = "was_examined",
5822 .mode = COMMAND_EXEC,
5823 .jim_handler = jim_target_was_examined,
5824 .help = "used internally for reset processing",
5827 .name = "examine_deferred",
5828 .mode = COMMAND_EXEC,
5829 .jim_handler = jim_target_examine_deferred,
5830 .help = "used internally for reset processing",
5833 .name = "arp_halt_gdb",
5834 .mode = COMMAND_EXEC,
5835 .jim_handler = jim_target_halt_gdb,
5836 .help = "used internally for reset processing to halt GDB",
5839 .name = "arp_poll",
5840 .mode = COMMAND_EXEC,
5841 .jim_handler = jim_target_poll,
5842 .help = "used internally for reset processing",
5845 .name = "arp_reset",
5846 .mode = COMMAND_EXEC,
5847 .jim_handler = jim_target_reset,
5848 .help = "used internally for reset processing",
5851 .name = "arp_halt",
5852 .mode = COMMAND_EXEC,
5853 .jim_handler = jim_target_halt,
5854 .help = "used internally for reset processing",
5857 .name = "arp_waitstate",
5858 .mode = COMMAND_EXEC,
5859 .jim_handler = jim_target_wait_state,
5860 .help = "used internally for reset processing",
5863 .name = "invoke-event",
5864 .mode = COMMAND_EXEC,
5865 .jim_handler = jim_target_invoke_event,
5866 .help = "invoke handler for specified event",
5867 .usage = "event_name",
5869 COMMAND_REGISTRATION_DONE
5872 static int target_create(struct jim_getopt_info *goi)
5874 Jim_Obj *new_cmd;
5875 Jim_Cmd *cmd;
5876 const char *cp;
5877 int e;
5878 int x;
5879 struct target *target;
5880 struct command_context *cmd_ctx;
5882 cmd_ctx = current_command_context(goi->interp);
5883 assert(cmd_ctx);
5885 if (goi->argc < 3) {
5886 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
5887 return JIM_ERR;
5890 /* COMMAND */
5891 jim_getopt_obj(goi, &new_cmd);
5892 /* does this command exist? */
5893 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
5894 if (cmd) {
5895 cp = Jim_GetString(new_cmd, NULL);
5896 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
5897 return JIM_ERR;
5900 /* TYPE */
5901 e = jim_getopt_string(goi, &cp, NULL);
5902 if (e != JIM_OK)
5903 return e;
5904 struct transport *tr = get_current_transport();
5905 if (tr->override_target) {
5906 e = tr->override_target(&cp);
5907 if (e != ERROR_OK) {
5908 LOG_ERROR("The selected transport doesn't support this target");
5909 return JIM_ERR;
5911 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
5913 /* now does target type exist */
5914 for (x = 0 ; target_types[x] ; x++) {
5915 if (strcmp(cp, target_types[x]->name) == 0) {
5916 /* found */
5917 break;
5920 if (!target_types[x]) {
5921 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
5922 for (x = 0 ; target_types[x] ; x++) {
5923 if (target_types[x + 1]) {
5924 Jim_AppendStrings(goi->interp,
5925 Jim_GetResult(goi->interp),
5926 target_types[x]->name,
5927 ", ", NULL);
5928 } else {
5929 Jim_AppendStrings(goi->interp,
5930 Jim_GetResult(goi->interp),
5931 " or ",
5932 target_types[x]->name, NULL);
5935 return JIM_ERR;
5938 /* Create it */
5939 target = calloc(1, sizeof(struct target));
5940 if (!target) {
5941 LOG_ERROR("Out of memory");
5942 return JIM_ERR;
5945 /* set empty smp cluster */
5946 target->smp_targets = &empty_smp_targets;
5948 /* set target number */
5949 target->target_number = new_target_number();
5951 /* allocate memory for each unique target type */
5952 target->type = malloc(sizeof(struct target_type));
5953 if (!target->type) {
5954 LOG_ERROR("Out of memory");
5955 free(target);
5956 return JIM_ERR;
5959 memcpy(target->type, target_types[x], sizeof(struct target_type));
5961 /* default to first core, override with -coreid */
5962 target->coreid = 0;
5964 target->working_area = 0x0;
5965 target->working_area_size = 0x0;
5966 target->working_areas = NULL;
5967 target->backup_working_area = 0;
5969 target->state = TARGET_UNKNOWN;
5970 target->debug_reason = DBG_REASON_UNDEFINED;
5971 target->reg_cache = NULL;
5972 target->breakpoints = NULL;
5973 target->watchpoints = NULL;
5974 target->next = NULL;
5975 target->arch_info = NULL;
5977 target->verbose_halt_msg = true;
5979 target->halt_issued = false;
5981 /* initialize trace information */
5982 target->trace_info = calloc(1, sizeof(struct trace));
5983 if (!target->trace_info) {
5984 LOG_ERROR("Out of memory");
5985 free(target->type);
5986 free(target);
5987 return JIM_ERR;
5990 target->dbgmsg = NULL;
5991 target->dbg_msg_enabled = 0;
5993 target->endianness = TARGET_ENDIAN_UNKNOWN;
5995 target->rtos = NULL;
5996 target->rtos_auto_detect = false;
5998 target->gdb_port_override = NULL;
5999 target->gdb_max_connections = 1;
6001 /* Do the rest as "configure" options */
6002 goi->isconfigure = 1;
6003 e = target_configure(goi, target);
6005 if (e == JIM_OK) {
6006 if (target->has_dap) {
6007 if (!target->dap_configured) {
6008 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
6009 e = JIM_ERR;
6011 } else {
6012 if (!target->tap_configured) {
6013 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
6014 e = JIM_ERR;
6017 /* tap must be set after target was configured */
6018 if (!target->tap)
6019 e = JIM_ERR;
6022 if (e != JIM_OK) {
6023 rtos_destroy(target);
6024 free(target->gdb_port_override);
6025 free(target->trace_info);
6026 free(target->type);
6027 free(target);
6028 return e;
6031 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
6032 /* default endian to little if not specified */
6033 target->endianness = TARGET_LITTLE_ENDIAN;
6036 cp = Jim_GetString(new_cmd, NULL);
6037 target->cmd_name = strdup(cp);
6038 if (!target->cmd_name) {
6039 LOG_ERROR("Out of memory");
6040 rtos_destroy(target);
6041 free(target->gdb_port_override);
6042 free(target->trace_info);
6043 free(target->type);
6044 free(target);
6045 return JIM_ERR;
6048 if (target->type->target_create) {
6049 e = (*(target->type->target_create))(target, goi->interp);
6050 if (e != ERROR_OK) {
6051 LOG_DEBUG("target_create failed");
6052 free(target->cmd_name);
6053 rtos_destroy(target);
6054 free(target->gdb_port_override);
6055 free(target->trace_info);
6056 free(target->type);
6057 free(target);
6058 return JIM_ERR;
6062 /* create the target specific commands */
6063 if (target->type->commands) {
6064 e = register_commands(cmd_ctx, NULL, target->type->commands);
6065 if (e != ERROR_OK)
6066 LOG_ERROR("unable to register '%s' commands", cp);
6069 /* now - create the new target name command */
6070 const struct command_registration target_subcommands[] = {
6072 .chain = target_instance_command_handlers,
6075 .chain = target->type->commands,
6077 COMMAND_REGISTRATION_DONE
6079 const struct command_registration target_commands[] = {
6081 .name = cp,
6082 .mode = COMMAND_ANY,
6083 .help = "target command group",
6084 .usage = "",
6085 .chain = target_subcommands,
6087 COMMAND_REGISTRATION_DONE
6089 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
6090 if (e != ERROR_OK) {
6091 if (target->type->deinit_target)
6092 target->type->deinit_target(target);
6093 free(target->cmd_name);
6094 rtos_destroy(target);
6095 free(target->gdb_port_override);
6096 free(target->trace_info);
6097 free(target->type);
6098 free(target);
6099 return JIM_ERR;
6102 /* append to end of list */
6103 append_to_list_all_targets(target);
6105 cmd_ctx->current_target = target;
6106 return JIM_OK;
6109 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6111 if (argc != 1) {
6112 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6113 return JIM_ERR;
6115 struct command_context *cmd_ctx = current_command_context(interp);
6116 assert(cmd_ctx);
6118 struct target *target = get_current_target_or_null(cmd_ctx);
6119 if (target)
6120 Jim_SetResultString(interp, target_name(target), -1);
6121 return JIM_OK;
6124 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6126 if (argc != 1) {
6127 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6128 return JIM_ERR;
6130 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6131 for (unsigned x = 0; target_types[x]; x++) {
6132 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6133 Jim_NewStringObj(interp, target_types[x]->name, -1));
6135 return JIM_OK;
6138 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6140 if (argc != 1) {
6141 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6142 return JIM_ERR;
6144 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6145 struct target *target = all_targets;
6146 while (target) {
6147 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6148 Jim_NewStringObj(interp, target_name(target), -1));
6149 target = target->next;
6151 return JIM_OK;
6154 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6156 int i;
6157 const char *targetname;
6158 int retval, len;
6159 struct target *target = NULL;
6160 struct target_list *head, *new;
6162 retval = 0;
6163 LOG_DEBUG("%d", argc);
6164 /* argv[1] = target to associate in smp
6165 * argv[2] = target to associate in smp
6166 * argv[3] ...
6169 struct list_head *lh = malloc(sizeof(*lh));
6170 if (!lh) {
6171 LOG_ERROR("Out of memory");
6172 return JIM_ERR;
6174 INIT_LIST_HEAD(lh);
6176 for (i = 1; i < argc; i++) {
6178 targetname = Jim_GetString(argv[i], &len);
6179 target = get_target(targetname);
6180 LOG_DEBUG("%s ", targetname);
6181 if (target) {
6182 new = malloc(sizeof(struct target_list));
6183 new->target = target;
6184 list_add_tail(&new->lh, lh);
6187 /* now parse the list of cpu and put the target in smp mode*/
6188 foreach_smp_target(head, lh) {
6189 target = head->target;
6190 target->smp = 1;
6191 target->smp_targets = lh;
6194 if (target && target->rtos)
6195 retval = rtos_smp_init(head->target);
6197 return retval;
6201 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6203 struct jim_getopt_info goi;
6204 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6205 if (goi.argc < 3) {
6206 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6207 "<name> <target_type> [<target_options> ...]");
6208 return JIM_ERR;
6210 return target_create(&goi);
6213 static const struct command_registration target_subcommand_handlers[] = {
6215 .name = "init",
6216 .mode = COMMAND_CONFIG,
6217 .handler = handle_target_init_command,
6218 .help = "initialize targets",
6219 .usage = "",
6222 .name = "create",
6223 .mode = COMMAND_CONFIG,
6224 .jim_handler = jim_target_create,
6225 .usage = "name type '-chain-position' name [options ...]",
6226 .help = "Creates and selects a new target",
6229 .name = "current",
6230 .mode = COMMAND_ANY,
6231 .jim_handler = jim_target_current,
6232 .help = "Returns the currently selected target",
6235 .name = "types",
6236 .mode = COMMAND_ANY,
6237 .jim_handler = jim_target_types,
6238 .help = "Returns the available target types as "
6239 "a list of strings",
6242 .name = "names",
6243 .mode = COMMAND_ANY,
6244 .jim_handler = jim_target_names,
6245 .help = "Returns the names of all targets as a list of strings",
6248 .name = "smp",
6249 .mode = COMMAND_ANY,
6250 .jim_handler = jim_target_smp,
6251 .usage = "targetname1 targetname2 ...",
6252 .help = "gather several target in a smp list"
6255 COMMAND_REGISTRATION_DONE
6258 struct fast_load {
6259 target_addr_t address;
6260 uint8_t *data;
6261 int length;
6265 static int fastload_num;
6266 static struct fast_load *fastload;
6268 static void free_fastload(void)
6270 if (fastload) {
6271 for (int i = 0; i < fastload_num; i++)
6272 free(fastload[i].data);
6273 free(fastload);
6274 fastload = NULL;
6278 COMMAND_HANDLER(handle_fast_load_image_command)
6280 uint8_t *buffer;
6281 size_t buf_cnt;
6282 uint32_t image_size;
6283 target_addr_t min_address = 0;
6284 target_addr_t max_address = -1;
6286 struct image image;
6288 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6289 &image, &min_address, &max_address);
6290 if (retval != ERROR_OK)
6291 return retval;
6293 struct duration bench;
6294 duration_start(&bench);
6296 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6297 if (retval != ERROR_OK)
6298 return retval;
6300 image_size = 0x0;
6301 retval = ERROR_OK;
6302 fastload_num = image.num_sections;
6303 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6304 if (!fastload) {
6305 command_print(CMD, "out of memory");
6306 image_close(&image);
6307 return ERROR_FAIL;
6309 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6310 for (unsigned int i = 0; i < image.num_sections; i++) {
6311 buffer = malloc(image.sections[i].size);
6312 if (!buffer) {
6313 command_print(CMD, "error allocating buffer for section (%d bytes)",
6314 (int)(image.sections[i].size));
6315 retval = ERROR_FAIL;
6316 break;
6319 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6320 if (retval != ERROR_OK) {
6321 free(buffer);
6322 break;
6325 uint32_t offset = 0;
6326 uint32_t length = buf_cnt;
6328 /* DANGER!!! beware of unsigned comparison here!!! */
6330 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6331 (image.sections[i].base_address < max_address)) {
6332 if (image.sections[i].base_address < min_address) {
6333 /* clip addresses below */
6334 offset += min_address-image.sections[i].base_address;
6335 length -= offset;
6338 if (image.sections[i].base_address + buf_cnt > max_address)
6339 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6341 fastload[i].address = image.sections[i].base_address + offset;
6342 fastload[i].data = malloc(length);
6343 if (!fastload[i].data) {
6344 free(buffer);
6345 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6346 length);
6347 retval = ERROR_FAIL;
6348 break;
6350 memcpy(fastload[i].data, buffer + offset, length);
6351 fastload[i].length = length;
6353 image_size += length;
6354 command_print(CMD, "%u bytes written at address 0x%8.8x",
6355 (unsigned int)length,
6356 ((unsigned int)(image.sections[i].base_address + offset)));
6359 free(buffer);
6362 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6363 command_print(CMD, "Loaded %" PRIu32 " bytes "
6364 "in %fs (%0.3f KiB/s)", image_size,
6365 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6367 command_print(CMD,
6368 "WARNING: image has not been loaded to target!"
6369 "You can issue a 'fast_load' to finish loading.");
6372 image_close(&image);
6374 if (retval != ERROR_OK)
6375 free_fastload();
6377 return retval;
6380 COMMAND_HANDLER(handle_fast_load_command)
6382 if (CMD_ARGC > 0)
6383 return ERROR_COMMAND_SYNTAX_ERROR;
6384 if (!fastload) {
6385 LOG_ERROR("No image in memory");
6386 return ERROR_FAIL;
6388 int i;
6389 int64_t ms = timeval_ms();
6390 int size = 0;
6391 int retval = ERROR_OK;
6392 for (i = 0; i < fastload_num; i++) {
6393 struct target *target = get_current_target(CMD_CTX);
6394 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6395 (unsigned int)(fastload[i].address),
6396 (unsigned int)(fastload[i].length));
6397 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6398 if (retval != ERROR_OK)
6399 break;
6400 size += fastload[i].length;
6402 if (retval == ERROR_OK) {
6403 int64_t after = timeval_ms();
6404 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6406 return retval;
6409 static const struct command_registration target_command_handlers[] = {
6411 .name = "targets",
6412 .handler = handle_targets_command,
6413 .mode = COMMAND_ANY,
6414 .help = "change current default target (one parameter) "
6415 "or prints table of all targets (no parameters)",
6416 .usage = "[target]",
6419 .name = "target",
6420 .mode = COMMAND_CONFIG,
6421 .help = "configure target",
6422 .chain = target_subcommand_handlers,
6423 .usage = "",
6425 COMMAND_REGISTRATION_DONE
6428 int target_register_commands(struct command_context *cmd_ctx)
6430 return register_commands(cmd_ctx, NULL, target_command_handlers);
6433 static bool target_reset_nag = true;
6435 bool get_target_reset_nag(void)
6437 return target_reset_nag;
6440 COMMAND_HANDLER(handle_target_reset_nag)
6442 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6443 &target_reset_nag, "Nag after each reset about options to improve "
6444 "performance");
6447 COMMAND_HANDLER(handle_ps_command)
6449 struct target *target = get_current_target(CMD_CTX);
6450 char *display;
6451 if (target->state != TARGET_HALTED) {
6452 LOG_INFO("target not halted !!");
6453 return ERROR_OK;
6456 if ((target->rtos) && (target->rtos->type)
6457 && (target->rtos->type->ps_command)) {
6458 display = target->rtos->type->ps_command(target);
6459 command_print(CMD, "%s", display);
6460 free(display);
6461 return ERROR_OK;
6462 } else {
6463 LOG_INFO("failed");
6464 return ERROR_TARGET_FAILURE;
6468 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6470 if (text)
6471 command_print_sameline(cmd, "%s", text);
6472 for (int i = 0; i < size; i++)
6473 command_print_sameline(cmd, " %02x", buf[i]);
6474 command_print(cmd, " ");
6477 COMMAND_HANDLER(handle_test_mem_access_command)
6479 struct target *target = get_current_target(CMD_CTX);
6480 uint32_t test_size;
6481 int retval = ERROR_OK;
6483 if (target->state != TARGET_HALTED) {
6484 LOG_INFO("target not halted !!");
6485 return ERROR_FAIL;
6488 if (CMD_ARGC != 1)
6489 return ERROR_COMMAND_SYNTAX_ERROR;
6491 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6493 /* Test reads */
6494 size_t num_bytes = test_size + 4;
6496 struct working_area *wa = NULL;
6497 retval = target_alloc_working_area(target, num_bytes, &wa);
6498 if (retval != ERROR_OK) {
6499 LOG_ERROR("Not enough working area");
6500 return ERROR_FAIL;
6503 uint8_t *test_pattern = malloc(num_bytes);
6505 for (size_t i = 0; i < num_bytes; i++)
6506 test_pattern[i] = rand();
6508 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6509 if (retval != ERROR_OK) {
6510 LOG_ERROR("Test pattern write failed");
6511 goto out;
6514 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6515 for (int size = 1; size <= 4; size *= 2) {
6516 for (int offset = 0; offset < 4; offset++) {
6517 uint32_t count = test_size / size;
6518 size_t host_bufsiz = (count + 2) * size + host_offset;
6519 uint8_t *read_ref = malloc(host_bufsiz);
6520 uint8_t *read_buf = malloc(host_bufsiz);
6522 for (size_t i = 0; i < host_bufsiz; i++) {
6523 read_ref[i] = rand();
6524 read_buf[i] = read_ref[i];
6526 command_print_sameline(CMD,
6527 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6528 size, offset, host_offset ? "un" : "");
6530 struct duration bench;
6531 duration_start(&bench);
6533 retval = target_read_memory(target, wa->address + offset, size, count,
6534 read_buf + size + host_offset);
6536 duration_measure(&bench);
6538 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6539 command_print(CMD, "Unsupported alignment");
6540 goto next;
6541 } else if (retval != ERROR_OK) {
6542 command_print(CMD, "Memory read failed");
6543 goto next;
6546 /* replay on host */
6547 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6549 /* check result */
6550 int result = memcmp(read_ref, read_buf, host_bufsiz);
6551 if (result == 0) {
6552 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6553 duration_elapsed(&bench),
6554 duration_kbps(&bench, count * size));
6555 } else {
6556 command_print(CMD, "Compare failed");
6557 binprint(CMD, "ref:", read_ref, host_bufsiz);
6558 binprint(CMD, "buf:", read_buf, host_bufsiz);
6560 next:
6561 free(read_ref);
6562 free(read_buf);
6567 out:
6568 free(test_pattern);
6570 target_free_working_area(target, wa);
6572 /* Test writes */
6573 num_bytes = test_size + 4 + 4 + 4;
6575 retval = target_alloc_working_area(target, num_bytes, &wa);
6576 if (retval != ERROR_OK) {
6577 LOG_ERROR("Not enough working area");
6578 return ERROR_FAIL;
6581 test_pattern = malloc(num_bytes);
6583 for (size_t i = 0; i < num_bytes; i++)
6584 test_pattern[i] = rand();
6586 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6587 for (int size = 1; size <= 4; size *= 2) {
6588 for (int offset = 0; offset < 4; offset++) {
6589 uint32_t count = test_size / size;
6590 size_t host_bufsiz = count * size + host_offset;
6591 uint8_t *read_ref = malloc(num_bytes);
6592 uint8_t *read_buf = malloc(num_bytes);
6593 uint8_t *write_buf = malloc(host_bufsiz);
6595 for (size_t i = 0; i < host_bufsiz; i++)
6596 write_buf[i] = rand();
6597 command_print_sameline(CMD,
6598 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6599 size, offset, host_offset ? "un" : "");
6601 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6602 if (retval != ERROR_OK) {
6603 command_print(CMD, "Test pattern write failed");
6604 goto nextw;
6607 /* replay on host */
6608 memcpy(read_ref, test_pattern, num_bytes);
6609 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6611 struct duration bench;
6612 duration_start(&bench);
6614 retval = target_write_memory(target, wa->address + size + offset, size, count,
6615 write_buf + host_offset);
6617 duration_measure(&bench);
6619 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6620 command_print(CMD, "Unsupported alignment");
6621 goto nextw;
6622 } else if (retval != ERROR_OK) {
6623 command_print(CMD, "Memory write failed");
6624 goto nextw;
6627 /* read back */
6628 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6629 if (retval != ERROR_OK) {
6630 command_print(CMD, "Test pattern write failed");
6631 goto nextw;
6634 /* check result */
6635 int result = memcmp(read_ref, read_buf, num_bytes);
6636 if (result == 0) {
6637 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6638 duration_elapsed(&bench),
6639 duration_kbps(&bench, count * size));
6640 } else {
6641 command_print(CMD, "Compare failed");
6642 binprint(CMD, "ref:", read_ref, num_bytes);
6643 binprint(CMD, "buf:", read_buf, num_bytes);
6645 nextw:
6646 free(read_ref);
6647 free(read_buf);
6652 free(test_pattern);
6654 target_free_working_area(target, wa);
6655 return retval;
6658 static const struct command_registration target_exec_command_handlers[] = {
6660 .name = "fast_load_image",
6661 .handler = handle_fast_load_image_command,
6662 .mode = COMMAND_ANY,
6663 .help = "Load image into server memory for later use by "
6664 "fast_load; primarily for profiling",
6665 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6666 "[min_address [max_length]]",
6669 .name = "fast_load",
6670 .handler = handle_fast_load_command,
6671 .mode = COMMAND_EXEC,
6672 .help = "loads active fast load image to current target "
6673 "- mainly for profiling purposes",
6674 .usage = "",
6677 .name = "profile",
6678 .handler = handle_profile_command,
6679 .mode = COMMAND_EXEC,
6680 .usage = "seconds filename [start end]",
6681 .help = "profiling samples the CPU PC",
6683 /** @todo don't register virt2phys() unless target supports it */
6685 .name = "virt2phys",
6686 .handler = handle_virt2phys_command,
6687 .mode = COMMAND_ANY,
6688 .help = "translate a virtual address into a physical address",
6689 .usage = "virtual_address",
6692 .name = "reg",
6693 .handler = handle_reg_command,
6694 .mode = COMMAND_EXEC,
6695 .help = "display (reread from target with \"force\") or set a register; "
6696 "with no arguments, displays all registers and their values",
6697 .usage = "[(register_number|register_name) [(value|'force')]]",
6700 .name = "poll",
6701 .handler = handle_poll_command,
6702 .mode = COMMAND_EXEC,
6703 .help = "poll target state; or reconfigure background polling",
6704 .usage = "['on'|'off']",
6707 .name = "wait_halt",
6708 .handler = handle_wait_halt_command,
6709 .mode = COMMAND_EXEC,
6710 .help = "wait up to the specified number of milliseconds "
6711 "(default 5000) for a previously requested halt",
6712 .usage = "[milliseconds]",
6715 .name = "halt",
6716 .handler = handle_halt_command,
6717 .mode = COMMAND_EXEC,
6718 .help = "request target to halt, then wait up to the specified "
6719 "number of milliseconds (default 5000) for it to complete",
6720 .usage = "[milliseconds]",
6723 .name = "resume",
6724 .handler = handle_resume_command,
6725 .mode = COMMAND_EXEC,
6726 .help = "resume target execution from current PC or address",
6727 .usage = "[address]",
6730 .name = "reset",
6731 .handler = handle_reset_command,
6732 .mode = COMMAND_EXEC,
6733 .usage = "[run|halt|init]",
6734 .help = "Reset all targets into the specified mode. "
6735 "Default reset mode is run, if not given.",
6738 .name = "soft_reset_halt",
6739 .handler = handle_soft_reset_halt_command,
6740 .mode = COMMAND_EXEC,
6741 .usage = "",
6742 .help = "halt the target and do a soft reset",
6745 .name = "step",
6746 .handler = handle_step_command,
6747 .mode = COMMAND_EXEC,
6748 .help = "step one instruction from current PC or address",
6749 .usage = "[address]",
6752 .name = "mdd",
6753 .handler = handle_md_command,
6754 .mode = COMMAND_EXEC,
6755 .help = "display memory double-words",
6756 .usage = "['phys'] address [count]",
6759 .name = "mdw",
6760 .handler = handle_md_command,
6761 .mode = COMMAND_EXEC,
6762 .help = "display memory words",
6763 .usage = "['phys'] address [count]",
6766 .name = "mdh",
6767 .handler = handle_md_command,
6768 .mode = COMMAND_EXEC,
6769 .help = "display memory half-words",
6770 .usage = "['phys'] address [count]",
6773 .name = "mdb",
6774 .handler = handle_md_command,
6775 .mode = COMMAND_EXEC,
6776 .help = "display memory bytes",
6777 .usage = "['phys'] address [count]",
6780 .name = "mwd",
6781 .handler = handle_mw_command,
6782 .mode = COMMAND_EXEC,
6783 .help = "write memory double-word",
6784 .usage = "['phys'] address value [count]",
6787 .name = "mww",
6788 .handler = handle_mw_command,
6789 .mode = COMMAND_EXEC,
6790 .help = "write memory word",
6791 .usage = "['phys'] address value [count]",
6794 .name = "mwh",
6795 .handler = handle_mw_command,
6796 .mode = COMMAND_EXEC,
6797 .help = "write memory half-word",
6798 .usage = "['phys'] address value [count]",
6801 .name = "mwb",
6802 .handler = handle_mw_command,
6803 .mode = COMMAND_EXEC,
6804 .help = "write memory byte",
6805 .usage = "['phys'] address value [count]",
6808 .name = "bp",
6809 .handler = handle_bp_command,
6810 .mode = COMMAND_EXEC,
6811 .help = "list or set hardware or software breakpoint",
6812 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
6815 .name = "rbp",
6816 .handler = handle_rbp_command,
6817 .mode = COMMAND_EXEC,
6818 .help = "remove breakpoint",
6819 .usage = "'all' | address",
6822 .name = "wp",
6823 .handler = handle_wp_command,
6824 .mode = COMMAND_EXEC,
6825 .help = "list (no params) or create watchpoints",
6826 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
6829 .name = "rwp",
6830 .handler = handle_rwp_command,
6831 .mode = COMMAND_EXEC,
6832 .help = "remove watchpoint",
6833 .usage = "address",
6836 .name = "load_image",
6837 .handler = handle_load_image_command,
6838 .mode = COMMAND_EXEC,
6839 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6840 "[min_address] [max_length]",
6843 .name = "dump_image",
6844 .handler = handle_dump_image_command,
6845 .mode = COMMAND_EXEC,
6846 .usage = "filename address size",
6849 .name = "verify_image_checksum",
6850 .handler = handle_verify_image_checksum_command,
6851 .mode = COMMAND_EXEC,
6852 .usage = "filename [offset [type]]",
6855 .name = "verify_image",
6856 .handler = handle_verify_image_command,
6857 .mode = COMMAND_EXEC,
6858 .usage = "filename [offset [type]]",
6861 .name = "test_image",
6862 .handler = handle_test_image_command,
6863 .mode = COMMAND_EXEC,
6864 .usage = "filename [offset [type]]",
6867 .name = "mem2array",
6868 .mode = COMMAND_EXEC,
6869 .jim_handler = jim_mem2array,
6870 .help = "read 8/16/32 bit memory and return as a TCL array "
6871 "for script processing",
6872 .usage = "arrayname bitwidth address count",
6875 .name = "array2mem",
6876 .mode = COMMAND_EXEC,
6877 .jim_handler = jim_array2mem,
6878 .help = "convert a TCL array to memory locations "
6879 "and write the 8/16/32 bit values",
6880 .usage = "arrayname bitwidth address count",
6883 .name = "get_reg",
6884 .mode = COMMAND_EXEC,
6885 .jim_handler = target_jim_get_reg,
6886 .help = "Get register values from the target",
6887 .usage = "list",
6890 .name = "set_reg",
6891 .mode = COMMAND_EXEC,
6892 .jim_handler = target_jim_set_reg,
6893 .help = "Set target register values",
6894 .usage = "dict",
6897 .name = "reset_nag",
6898 .handler = handle_target_reset_nag,
6899 .mode = COMMAND_ANY,
6900 .help = "Nag after each reset about options that could have been "
6901 "enabled to improve performance.",
6902 .usage = "['enable'|'disable']",
6905 .name = "ps",
6906 .handler = handle_ps_command,
6907 .mode = COMMAND_EXEC,
6908 .help = "list all tasks",
6909 .usage = "",
6912 .name = "test_mem_access",
6913 .handler = handle_test_mem_access_command,
6914 .mode = COMMAND_EXEC,
6915 .help = "Test the target's memory access functions",
6916 .usage = "size",
6919 COMMAND_REGISTRATION_DONE
6921 static int target_register_user_commands(struct command_context *cmd_ctx)
6923 int retval = ERROR_OK;
6924 retval = target_request_register_commands(cmd_ctx);
6925 if (retval != ERROR_OK)
6926 return retval;
6928 retval = trace_register_commands(cmd_ctx);
6929 if (retval != ERROR_OK)
6930 return retval;
6933 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);