drivers/ulink: Group adapter commands
[openocd.git] / src / target / xscale.c
blobdd383b6e46d50ef831ab23562d637ca09df9411e
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
23 ***************************************************************************/
25 #ifdef HAVE_CONFIG_H
26 #include "config.h"
27 #endif
29 #include "breakpoints.h"
30 #include "xscale.h"
31 #include "target_type.h"
32 #include "arm_jtag.h"
33 #include "arm_simulator.h"
34 #include "arm_disassembler.h"
35 #include <helper/time_support.h>
36 #include "register.h"
37 #include "image.h"
38 #include "arm_opcodes.h"
39 #include "armv4_5.h"
42 * Important XScale documents available as of October 2009 include:
44 * Intel XScale® Core Developer’s Manual, January 2004
45 * Order Number: 273473-002
46 * This has a chapter detailing debug facilities, and punts some
47 * details to chip-specific microarchitecture documents.
49 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
50 * Document Number: 273539-005
51 * Less detailed than the developer's manual, but summarizes those
52 * missing details (for most XScales) and gives LOTS of notes about
53 * debugger/handler interaction issues. Presents a simpler reset
54 * and load-handler sequence than the arch doc. (Note, OpenOCD
55 * doesn't currently support "Hot-Debug" as defined there.)
57 * Chip-specific microarchitecture documents may also be useful.
60 /* forward declarations */
61 static int xscale_resume(struct target *, int current,
62 target_addr_t address, int handle_breakpoints, int debug_execution);
63 static int xscale_debug_entry(struct target *);
64 static int xscale_restore_banked(struct target *);
65 static int xscale_get_reg(struct reg *reg);
66 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
67 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
68 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
69 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
70 static int xscale_read_trace(struct target *);
72 /* This XScale "debug handler" is loaded into the processor's
73 * mini-ICache, which is 2K of code writable only via JTAG.
75 static const uint8_t xscale_debug_handler[] = {
76 #include "../../contrib/loaders/debug/xscale/debug_handler.inc"
79 static const char *const xscale_reg_list[] = {
80 "XSCALE_MAINID", /* 0 */
81 "XSCALE_CACHETYPE",
82 "XSCALE_CTRL",
83 "XSCALE_AUXCTRL",
84 "XSCALE_TTB",
85 "XSCALE_DAC",
86 "XSCALE_FSR",
87 "XSCALE_FAR",
88 "XSCALE_PID",
89 "XSCALE_CPACCESS",
90 "XSCALE_IBCR0", /* 10 */
91 "XSCALE_IBCR1",
92 "XSCALE_DBR0",
93 "XSCALE_DBR1",
94 "XSCALE_DBCON",
95 "XSCALE_TBREG",
96 "XSCALE_CHKPT0",
97 "XSCALE_CHKPT1",
98 "XSCALE_DCSR",
99 "XSCALE_TX",
100 "XSCALE_RX", /* 20 */
101 "XSCALE_TXRXCTRL",
104 static const struct xscale_reg xscale_reg_arch_info[] = {
105 {XSCALE_MAINID, NULL},
106 {XSCALE_CACHETYPE, NULL},
107 {XSCALE_CTRL, NULL},
108 {XSCALE_AUXCTRL, NULL},
109 {XSCALE_TTB, NULL},
110 {XSCALE_DAC, NULL},
111 {XSCALE_FSR, NULL},
112 {XSCALE_FAR, NULL},
113 {XSCALE_PID, NULL},
114 {XSCALE_CPACCESS, NULL},
115 {XSCALE_IBCR0, NULL},
116 {XSCALE_IBCR1, NULL},
117 {XSCALE_DBR0, NULL},
118 {XSCALE_DBR1, NULL},
119 {XSCALE_DBCON, NULL},
120 {XSCALE_TBREG, NULL},
121 {XSCALE_CHKPT0, NULL},
122 {XSCALE_CHKPT1, NULL},
123 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
124 {-1, NULL}, /* TX accessed via JTAG */
125 {-1, NULL}, /* RX accessed via JTAG */
126 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
129 /* convenience wrapper to access XScale specific registers */
130 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
132 uint8_t buf[4] = { 0 };
134 buf_set_u32(buf, 0, 32, value);
136 return xscale_set_reg(reg, buf);
139 static const char xscale_not[] = "target is not an XScale";
141 static int xscale_verify_pointer(struct command_invocation *cmd,
142 struct xscale_common *xscale)
144 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
145 command_print(cmd, xscale_not);
146 return ERROR_TARGET_INVALID;
148 return ERROR_OK;
151 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
153 assert(tap);
155 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr) {
156 struct scan_field field;
157 uint8_t scratch[4] = { 0 };
159 memset(&field, 0, sizeof(field));
160 field.num_bits = tap->ir_length;
161 field.out_value = scratch;
162 buf_set_u32(scratch, 0, field.num_bits, new_instr);
164 jtag_add_ir_scan(tap, &field, end_state);
167 return ERROR_OK;
170 static int xscale_read_dcsr(struct target *target)
172 struct xscale_common *xscale = target_to_xscale(target);
173 int retval;
174 struct scan_field fields[3];
175 uint8_t field0 = 0x0;
176 uint8_t field0_check_value = 0x2;
177 uint8_t field0_check_mask = 0x7;
178 uint8_t field2 = 0x0;
179 uint8_t field2_check_value = 0x0;
180 uint8_t field2_check_mask = 0x1;
182 xscale_jtag_set_instr(target->tap,
183 XSCALE_SELDCSR << xscale->xscale_variant,
184 TAP_DRPAUSE);
186 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
187 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
189 memset(&fields, 0, sizeof(fields));
191 fields[0].num_bits = 3;
192 fields[0].out_value = &field0;
193 uint8_t tmp;
194 fields[0].in_value = &tmp;
196 fields[1].num_bits = 32;
197 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
199 fields[2].num_bits = 1;
200 fields[2].out_value = &field2;
201 uint8_t tmp2;
202 fields[2].in_value = &tmp2;
204 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
206 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
207 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
209 retval = jtag_execute_queue();
210 if (retval != ERROR_OK) {
211 LOG_ERROR("JTAG error while reading DCSR");
212 return retval;
215 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = false;
216 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = true;
218 /* write the register with the value we just read
219 * on this second pass, only the first bit of field0 is guaranteed to be 0)
221 field0_check_mask = 0x1;
222 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
223 fields[1].in_value = NULL;
225 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
227 /* DANGER!!! this must be here. It will make sure that the arguments
228 * to jtag_set_check_value() does not go out of scope! */
229 return jtag_execute_queue();
233 static void xscale_getbuf(jtag_callback_data_t arg)
235 uint8_t *in = (uint8_t *)arg;
236 *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
239 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
241 if (num_words == 0)
242 return ERROR_COMMAND_SYNTAX_ERROR;
244 struct xscale_common *xscale = target_to_xscale(target);
245 int retval = ERROR_OK;
246 tap_state_t path[3];
247 struct scan_field fields[3];
248 uint8_t *field0 = malloc(num_words * 1);
249 uint8_t field0_check_value = 0x2;
250 uint8_t field0_check_mask = 0x6;
251 uint32_t *field1 = malloc(num_words * 4);
252 uint8_t field2_check_value = 0x0;
253 uint8_t field2_check_mask = 0x1;
254 int words_done = 0;
255 int words_scheduled = 0;
256 int i;
258 path[0] = TAP_DRSELECT;
259 path[1] = TAP_DRCAPTURE;
260 path[2] = TAP_DRSHIFT;
262 memset(&fields, 0, sizeof(fields));
264 fields[0].num_bits = 3;
265 uint8_t tmp;
266 fields[0].in_value = &tmp;
267 fields[0].check_value = &field0_check_value;
268 fields[0].check_mask = &field0_check_mask;
270 fields[1].num_bits = 32;
272 fields[2].num_bits = 1;
273 uint8_t tmp2;
274 fields[2].in_value = &tmp2;
275 fields[2].check_value = &field2_check_value;
276 fields[2].check_mask = &field2_check_mask;
278 xscale_jtag_set_instr(target->tap,
279 XSCALE_DBGTX << xscale->xscale_variant,
280 TAP_IDLE);
281 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above
282 *could be a no-op */
284 /* repeat until all words have been collected */
285 int attempts = 0;
286 while (words_done < num_words) {
287 /* schedule reads */
288 words_scheduled = 0;
289 for (i = words_done; i < num_words; i++) {
290 fields[0].in_value = &field0[i];
292 jtag_add_pathmove(3, path);
294 fields[1].in_value = (uint8_t *)(field1 + i);
296 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
298 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
300 words_scheduled++;
303 retval = jtag_execute_queue();
304 if (retval != ERROR_OK) {
305 LOG_ERROR("JTAG error while receiving data from debug handler");
306 break;
309 /* examine results */
310 for (i = words_done; i < num_words; i++) {
311 if (!(field0[i] & 1)) {
312 /* move backwards if necessary */
313 int j;
314 for (j = i; j < num_words - 1; j++) {
315 field0[j] = field0[j + 1];
316 field1[j] = field1[j + 1];
318 words_scheduled--;
321 if (words_scheduled == 0) {
322 if (attempts++ == 1000) {
323 LOG_ERROR(
324 "Failed to receiving data from debug handler after 1000 attempts");
325 retval = ERROR_TARGET_TIMEOUT;
326 break;
330 words_done += words_scheduled;
333 for (i = 0; i < num_words; i++)
334 *(buffer++) = buf_get_u32((uint8_t *)&field1[i], 0, 32);
336 free(field1);
338 return retval;
341 static int xscale_read_tx(struct target *target, int consume)
343 struct xscale_common *xscale = target_to_xscale(target);
344 tap_state_t path[3];
345 tap_state_t noconsume_path[6];
346 int retval;
347 struct timeval timeout, now;
348 struct scan_field fields[3];
349 uint8_t field0_in = 0x0;
350 uint8_t field0_check_value = 0x2;
351 uint8_t field0_check_mask = 0x6;
352 uint8_t field2_check_value = 0x0;
353 uint8_t field2_check_mask = 0x1;
355 xscale_jtag_set_instr(target->tap,
356 XSCALE_DBGTX << xscale->xscale_variant,
357 TAP_IDLE);
359 path[0] = TAP_DRSELECT;
360 path[1] = TAP_DRCAPTURE;
361 path[2] = TAP_DRSHIFT;
363 noconsume_path[0] = TAP_DRSELECT;
364 noconsume_path[1] = TAP_DRCAPTURE;
365 noconsume_path[2] = TAP_DREXIT1;
366 noconsume_path[3] = TAP_DRPAUSE;
367 noconsume_path[4] = TAP_DREXIT2;
368 noconsume_path[5] = TAP_DRSHIFT;
370 memset(&fields, 0, sizeof(fields));
372 fields[0].num_bits = 3;
373 fields[0].in_value = &field0_in;
375 fields[1].num_bits = 32;
376 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
378 fields[2].num_bits = 1;
379 uint8_t tmp;
380 fields[2].in_value = &tmp;
382 gettimeofday(&timeout, NULL);
383 timeval_add_time(&timeout, 1, 0);
385 for (;; ) {
386 /* if we want to consume the register content (i.e. clear TX_READY),
387 * we have to go straight from Capture-DR to Shift-DR
388 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
390 if (consume)
391 jtag_add_pathmove(3, path);
392 else
393 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
395 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
397 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
398 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
400 retval = jtag_execute_queue();
401 if (retval != ERROR_OK) {
402 LOG_ERROR("JTAG error while reading TX");
403 return ERROR_TARGET_TIMEOUT;
406 gettimeofday(&now, NULL);
407 if (timeval_compare(&now, &timeout) > 0) {
408 LOG_ERROR("time out reading TX register");
409 return ERROR_TARGET_TIMEOUT;
411 if (!((!(field0_in & 1)) && consume))
412 goto done;
413 if (debug_level >= 3) {
414 LOG_DEBUG("waiting 100ms");
415 alive_sleep(100); /* avoid flooding the logs */
416 } else
417 keep_alive();
419 done:
421 if (!(field0_in & 1))
422 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
424 return ERROR_OK;
427 static int xscale_write_rx(struct target *target)
429 struct xscale_common *xscale = target_to_xscale(target);
430 int retval;
431 struct timeval timeout, now;
432 struct scan_field fields[3];
433 uint8_t field0_out = 0x0;
434 uint8_t field0_in = 0x0;
435 uint8_t field0_check_value = 0x2;
436 uint8_t field0_check_mask = 0x6;
437 uint8_t field2 = 0x0;
438 uint8_t field2_check_value = 0x0;
439 uint8_t field2_check_mask = 0x1;
441 xscale_jtag_set_instr(target->tap,
442 XSCALE_DBGRX << xscale->xscale_variant,
443 TAP_IDLE);
445 memset(&fields, 0, sizeof(fields));
447 fields[0].num_bits = 3;
448 fields[0].out_value = &field0_out;
449 fields[0].in_value = &field0_in;
451 fields[1].num_bits = 32;
452 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
454 fields[2].num_bits = 1;
455 fields[2].out_value = &field2;
456 uint8_t tmp;
457 fields[2].in_value = &tmp;
459 gettimeofday(&timeout, NULL);
460 timeval_add_time(&timeout, 1, 0);
462 /* poll until rx_read is low */
463 LOG_DEBUG("polling RX");
464 for (;;) {
465 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
467 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
468 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
470 retval = jtag_execute_queue();
471 if (retval != ERROR_OK) {
472 LOG_ERROR("JTAG error while writing RX");
473 return retval;
476 gettimeofday(&now, NULL);
477 if ((now.tv_sec > timeout.tv_sec) ||
478 ((now.tv_sec == timeout.tv_sec) && (now.tv_usec > timeout.tv_usec))) {
479 LOG_ERROR("time out writing RX register");
480 return ERROR_TARGET_TIMEOUT;
482 if (!(field0_in & 1))
483 goto done;
484 if (debug_level >= 3) {
485 LOG_DEBUG("waiting 100ms");
486 alive_sleep(100); /* avoid flooding the logs */
487 } else
488 keep_alive();
490 done:
492 /* set rx_valid */
493 field2 = 0x1;
494 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
496 retval = jtag_execute_queue();
497 if (retval != ERROR_OK) {
498 LOG_ERROR("JTAG error while writing RX");
499 return retval;
502 return ERROR_OK;
505 /* send count elements of size byte to the debug handler */
506 static int xscale_send(struct target *target, const uint8_t *buffer, int count, int size)
508 struct xscale_common *xscale = target_to_xscale(target);
509 int retval;
510 int done_count = 0;
512 xscale_jtag_set_instr(target->tap,
513 XSCALE_DBGRX << xscale->xscale_variant,
514 TAP_IDLE);
516 static const uint8_t t0;
517 uint8_t t1[4] = { 0 };
518 static const uint8_t t2 = 1;
519 struct scan_field fields[3] = {
520 { .num_bits = 3, .out_value = &t0 },
521 { .num_bits = 32, .out_value = t1 },
522 { .num_bits = 1, .out_value = &t2 },
525 int endianness = target->endianness;
526 while (done_count++ < count) {
527 uint32_t t;
529 switch (size) {
530 case 4:
531 if (endianness == TARGET_LITTLE_ENDIAN)
532 t = le_to_h_u32(buffer);
533 else
534 t = be_to_h_u32(buffer);
535 break;
536 case 2:
537 if (endianness == TARGET_LITTLE_ENDIAN)
538 t = le_to_h_u16(buffer);
539 else
540 t = be_to_h_u16(buffer);
541 break;
542 case 1:
543 t = buffer[0];
544 break;
545 default:
546 LOG_ERROR("BUG: size neither 4, 2 nor 1");
547 return ERROR_COMMAND_SYNTAX_ERROR;
550 buf_set_u32(t1, 0, 32, t);
552 jtag_add_dr_scan(target->tap,
554 fields,
555 TAP_IDLE);
556 buffer += size;
559 retval = jtag_execute_queue();
560 if (retval != ERROR_OK) {
561 LOG_ERROR("JTAG error while sending data to debug handler");
562 return retval;
565 return ERROR_OK;
568 static int xscale_send_u32(struct target *target, uint32_t value)
570 struct xscale_common *xscale = target_to_xscale(target);
572 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
573 return xscale_write_rx(target);
576 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
578 struct xscale_common *xscale = target_to_xscale(target);
579 int retval;
580 struct scan_field fields[3];
581 uint8_t field0 = 0x0;
582 uint8_t field0_check_value = 0x2;
583 uint8_t field0_check_mask = 0x7;
584 uint8_t field2 = 0x0;
585 uint8_t field2_check_value = 0x0;
586 uint8_t field2_check_mask = 0x1;
588 if (hold_rst != -1)
589 xscale->hold_rst = hold_rst;
591 if (ext_dbg_brk != -1)
592 xscale->external_debug_break = ext_dbg_brk;
594 xscale_jtag_set_instr(target->tap,
595 XSCALE_SELDCSR << xscale->xscale_variant,
596 TAP_IDLE);
598 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
599 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
601 memset(&fields, 0, sizeof(fields));
603 fields[0].num_bits = 3;
604 fields[0].out_value = &field0;
605 uint8_t tmp;
606 fields[0].in_value = &tmp;
608 fields[1].num_bits = 32;
609 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
611 fields[2].num_bits = 1;
612 fields[2].out_value = &field2;
613 uint8_t tmp2;
614 fields[2].in_value = &tmp2;
616 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
618 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
619 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
621 retval = jtag_execute_queue();
622 if (retval != ERROR_OK) {
623 LOG_ERROR("JTAG error while writing DCSR");
624 return retval;
627 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = false;
628 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = true;
630 return ERROR_OK;
633 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
634 static unsigned int parity(unsigned int v)
636 /* unsigned int ov = v; */
637 v ^= v >> 16;
638 v ^= v >> 8;
639 v ^= v >> 4;
640 v &= 0xf;
641 /* LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1); */
642 return (0x6996 >> v) & 1;
645 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
647 struct xscale_common *xscale = target_to_xscale(target);
648 uint8_t packet[4] = { 0 };
649 uint8_t cmd = 0;
650 int word;
651 struct scan_field fields[2];
653 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
655 /* LDIC into IR */
656 xscale_jtag_set_instr(target->tap,
657 XSCALE_LDIC << xscale->xscale_variant,
658 TAP_IDLE);
660 /* CMD is b011 to load a cacheline into the Mini ICache.
661 * Loading into the main ICache is deprecated, and unused.
662 * It's followed by three zero bits, and 27 address bits.
664 buf_set_u32(&cmd, 0, 6, 0x3);
666 /* virtual address of desired cache line */
667 buf_set_u32(packet, 0, 27, va >> 5);
669 memset(&fields, 0, sizeof(fields));
671 fields[0].num_bits = 6;
672 fields[0].out_value = &cmd;
674 fields[1].num_bits = 27;
675 fields[1].out_value = packet;
677 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
679 /* rest of packet is a cacheline: 8 instructions, with parity */
680 fields[0].num_bits = 32;
681 fields[0].out_value = packet;
683 fields[1].num_bits = 1;
684 fields[1].out_value = &cmd;
686 for (word = 0; word < 8; word++) {
687 buf_set_u32(packet, 0, 32, buffer[word]);
689 uint32_t value;
690 memcpy(&value, packet, sizeof(uint32_t));
691 cmd = parity(value);
693 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
696 return jtag_execute_queue();
699 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
701 struct xscale_common *xscale = target_to_xscale(target);
702 uint8_t packet[4] = { 0 };
703 uint8_t cmd = 0;
704 struct scan_field fields[2];
706 xscale_jtag_set_instr(target->tap,
707 XSCALE_LDIC << xscale->xscale_variant,
708 TAP_IDLE);
710 /* CMD for invalidate IC line b000, bits [6:4] b000 */
711 buf_set_u32(&cmd, 0, 6, 0x0);
713 /* virtual address of desired cache line */
714 buf_set_u32(packet, 0, 27, va >> 5);
716 memset(&fields, 0, sizeof(fields));
718 fields[0].num_bits = 6;
719 fields[0].out_value = &cmd;
721 fields[1].num_bits = 27;
722 fields[1].out_value = packet;
724 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
726 return ERROR_OK;
729 static int xscale_update_vectors(struct target *target)
731 struct xscale_common *xscale = target_to_xscale(target);
732 int i;
733 int retval;
735 uint32_t low_reset_branch, high_reset_branch;
737 for (i = 1; i < 8; i++) {
738 /* if there's a static vector specified for this exception, override */
739 if (xscale->static_high_vectors_set & (1 << i))
740 xscale->high_vectors[i] = xscale->static_high_vectors[i];
741 else {
742 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
743 if (retval == ERROR_TARGET_TIMEOUT)
744 return retval;
745 if (retval != ERROR_OK) {
746 /* Some of these reads will fail as part of normal execution */
747 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
752 for (i = 1; i < 8; i++) {
753 if (xscale->static_low_vectors_set & (1 << i))
754 xscale->low_vectors[i] = xscale->static_low_vectors[i];
755 else {
756 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
757 if (retval == ERROR_TARGET_TIMEOUT)
758 return retval;
759 if (retval != ERROR_OK) {
760 /* Some of these reads will fail as part of normal execution */
761 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
766 /* calculate branches to debug handler */
767 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
768 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
770 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
771 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
773 /* invalidate and load exception vectors in mini i-cache */
774 xscale_invalidate_ic_line(target, 0x0);
775 xscale_invalidate_ic_line(target, 0xffff0000);
777 xscale_load_ic(target, 0x0, xscale->low_vectors);
778 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
780 return ERROR_OK;
783 static int xscale_arch_state(struct target *target)
785 struct xscale_common *xscale = target_to_xscale(target);
786 struct arm *arm = &xscale->arm;
788 static const char *state[] = {
789 "disabled", "enabled"
792 static const char *arch_dbg_reason[] = {
793 "", "\n(processor reset)", "\n(trace buffer full)"
796 if (arm->common_magic != ARM_COMMON_MAGIC) {
797 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
798 return ERROR_COMMAND_SYNTAX_ERROR;
801 arm_arch_state(target);
802 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
803 state[xscale->armv4_5_mmu.mmu_enabled],
804 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
805 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
806 arch_dbg_reason[xscale->arch_debug_reason]);
808 return ERROR_OK;
811 static int xscale_poll(struct target *target)
813 int retval = ERROR_OK;
815 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING)) {
816 enum target_state previous_state = target->state;
817 retval = xscale_read_tx(target, 0);
818 if (retval == ERROR_OK) {
820 /* there's data to read from the tx register, we entered debug state */
821 target->state = TARGET_HALTED;
823 /* process debug entry, fetching current mode regs */
824 retval = xscale_debug_entry(target);
825 } else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE) {
826 LOG_USER("error while polling TX register, reset CPU");
827 /* here we "lie" so GDB won't get stuck and a reset can be performed */
828 target->state = TARGET_HALTED;
831 /* debug_entry could have overwritten target state (i.e. immediate resume)
832 * don't signal event handlers in that case
834 if (target->state != TARGET_HALTED)
835 return ERROR_OK;
837 /* if target was running, signal that we halted
838 * otherwise we reentered from debug execution */
839 if (previous_state == TARGET_RUNNING)
840 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
841 else
842 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
845 return retval;
848 static int xscale_debug_entry(struct target *target)
850 struct xscale_common *xscale = target_to_xscale(target);
851 struct arm *arm = &xscale->arm;
852 uint32_t pc;
853 uint32_t buffer[10];
854 unsigned i;
855 int retval;
856 uint32_t moe;
858 /* clear external dbg break (will be written on next DCSR read) */
859 xscale->external_debug_break = 0;
860 retval = xscale_read_dcsr(target);
861 if (retval != ERROR_OK)
862 return retval;
864 /* get r0, pc, r1 to r7 and cpsr */
865 retval = xscale_receive(target, buffer, 10);
866 if (retval != ERROR_OK)
867 return retval;
869 /* move r0 from buffer to register cache */
870 buf_set_u32(arm->core_cache->reg_list[0].value, 0, 32, buffer[0]);
871 arm->core_cache->reg_list[0].dirty = true;
872 arm->core_cache->reg_list[0].valid = true;
873 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
875 /* move pc from buffer to register cache */
876 buf_set_u32(arm->pc->value, 0, 32, buffer[1]);
877 arm->pc->dirty = true;
878 arm->pc->valid = true;
879 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
881 /* move data from buffer to register cache */
882 for (i = 1; i <= 7; i++) {
883 buf_set_u32(arm->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
884 arm->core_cache->reg_list[i].dirty = true;
885 arm->core_cache->reg_list[i].valid = true;
886 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
889 arm_set_cpsr(arm, buffer[9]);
890 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
892 if (!is_arm_mode(arm->core_mode)) {
893 target->state = TARGET_UNKNOWN;
894 LOG_ERROR("cpsr contains invalid mode value - communication failure");
895 return ERROR_TARGET_FAILURE;
897 LOG_DEBUG("target entered debug state in %s mode",
898 arm_mode_name(arm->core_mode));
900 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
901 if (arm->spsr) {
902 xscale_receive(target, buffer, 8);
903 buf_set_u32(arm->spsr->value, 0, 32, buffer[7]);
904 arm->spsr->dirty = false;
905 arm->spsr->valid = true;
906 } else {
907 /* r8 to r14, but no spsr */
908 xscale_receive(target, buffer, 7);
911 /* move data from buffer to right banked register in cache */
912 for (i = 8; i <= 14; i++) {
913 struct reg *r = arm_reg_current(arm, i);
915 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
916 r->dirty = false;
917 r->valid = true;
920 /* mark xscale regs invalid to ensure they are retrieved from the
921 * debug handler if requested */
922 for (i = 0; i < xscale->reg_cache->num_regs; i++)
923 xscale->reg_cache->reg_list[i].valid = false;
925 /* examine debug reason */
926 xscale_read_dcsr(target);
927 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
929 /* stored PC (for calculating fixup) */
930 pc = buf_get_u32(arm->pc->value, 0, 32);
932 switch (moe) {
933 case 0x0: /* Processor reset */
934 target->debug_reason = DBG_REASON_DBGRQ;
935 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
936 pc -= 4;
937 break;
938 case 0x1: /* Instruction breakpoint hit */
939 target->debug_reason = DBG_REASON_BREAKPOINT;
940 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
941 pc -= 4;
942 break;
943 case 0x2: /* Data breakpoint hit */
944 target->debug_reason = DBG_REASON_WATCHPOINT;
945 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
946 pc -= 4;
947 break;
948 case 0x3: /* BKPT instruction executed */
949 target->debug_reason = DBG_REASON_BREAKPOINT;
950 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
951 pc -= 4;
952 break;
953 case 0x4: /* Ext. debug event */
954 target->debug_reason = DBG_REASON_DBGRQ;
955 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
956 pc -= 4;
957 break;
958 case 0x5: /* Vector trap occurred */
959 target->debug_reason = DBG_REASON_BREAKPOINT;
960 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
961 pc -= 4;
962 break;
963 case 0x6: /* Trace buffer full break */
964 target->debug_reason = DBG_REASON_DBGRQ;
965 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
966 pc -= 4;
967 break;
968 case 0x7: /* Reserved (may flag Hot-Debug support) */
969 default:
970 LOG_ERROR("Method of Entry is 'Reserved'");
971 exit(-1);
972 break;
975 /* apply PC fixup */
976 buf_set_u32(arm->pc->value, 0, 32, pc);
978 /* on the first debug entry, identify cache type */
979 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1) {
980 uint32_t cache_type_reg;
982 /* read cp15 cache type register */
983 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
984 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value,
986 32);
988 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
991 /* examine MMU and Cache settings
992 * read cp15 control register */
993 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
994 xscale->cp15_control_reg =
995 buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
996 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
997 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
998 (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
999 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1000 (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1002 /* tracing enabled, read collected trace data */
1003 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1004 xscale_read_trace(target);
1006 /* Resume if entered debug due to buffer fill and we're still collecting
1007 * trace data. Note that a debug exception due to trace buffer full
1008 * can only happen in fill mode. */
1009 if (xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL) {
1010 if (--xscale->trace.fill_counter > 0)
1011 xscale_resume(target, 1, 0x0, 1, 0);
1012 } else /* entered debug for other reason; reset counter */
1013 xscale->trace.fill_counter = 0;
1016 return ERROR_OK;
1019 static int xscale_halt(struct target *target)
1021 struct xscale_common *xscale = target_to_xscale(target);
1023 LOG_DEBUG("target->state: %s",
1024 target_state_name(target));
1026 if (target->state == TARGET_HALTED) {
1027 LOG_DEBUG("target was already halted");
1028 return ERROR_OK;
1029 } else if (target->state == TARGET_UNKNOWN) {
1030 /* this must not happen for a xscale target */
1031 LOG_ERROR("target was in unknown state when halt was requested");
1032 return ERROR_TARGET_INVALID;
1033 } else if (target->state == TARGET_RESET)
1034 LOG_DEBUG("target->state == TARGET_RESET");
1035 else {
1036 /* assert external dbg break */
1037 xscale->external_debug_break = 1;
1038 xscale_read_dcsr(target);
1040 target->debug_reason = DBG_REASON_DBGRQ;
1043 return ERROR_OK;
1046 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1048 struct xscale_common *xscale = target_to_xscale(target);
1049 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1050 int retval;
1052 if (xscale->ibcr0_used) {
1053 struct breakpoint *ibcr0_bp =
1054 breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1056 if (ibcr0_bp)
1057 xscale_unset_breakpoint(target, ibcr0_bp);
1058 else {
1059 LOG_ERROR(
1060 "BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1061 exit(-1);
1065 retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1);
1066 if (retval != ERROR_OK)
1067 return retval;
1069 return ERROR_OK;
1072 static int xscale_disable_single_step(struct target *target)
1074 struct xscale_common *xscale = target_to_xscale(target);
1075 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1076 int retval;
1078 retval = xscale_set_reg_u32(ibcr0, 0x0);
1079 if (retval != ERROR_OK)
1080 return retval;
1082 return ERROR_OK;
1085 static void xscale_enable_watchpoints(struct target *target)
1087 struct watchpoint *watchpoint = target->watchpoints;
1089 while (watchpoint) {
1090 if (watchpoint->set == 0)
1091 xscale_set_watchpoint(target, watchpoint);
1092 watchpoint = watchpoint->next;
1096 static void xscale_enable_breakpoints(struct target *target)
1098 struct breakpoint *breakpoint = target->breakpoints;
1100 /* set any pending breakpoints */
1101 while (breakpoint) {
1102 if (breakpoint->set == 0)
1103 xscale_set_breakpoint(target, breakpoint);
1104 breakpoint = breakpoint->next;
1108 static void xscale_free_trace_data(struct xscale_common *xscale)
1110 struct xscale_trace_data *td = xscale->trace.data;
1111 while (td) {
1112 struct xscale_trace_data *next_td = td->next;
1113 free(td->entries);
1114 free(td);
1115 td = next_td;
1117 xscale->trace.data = NULL;
1120 static int xscale_resume(struct target *target, int current,
1121 target_addr_t address, int handle_breakpoints, int debug_execution)
1123 struct xscale_common *xscale = target_to_xscale(target);
1124 struct arm *arm = &xscale->arm;
1125 uint32_t current_pc;
1126 int retval;
1127 int i;
1129 LOG_DEBUG("-");
1131 if (target->state != TARGET_HALTED) {
1132 LOG_WARNING("target not halted");
1133 return ERROR_TARGET_NOT_HALTED;
1136 if (!debug_execution)
1137 target_free_all_working_areas(target);
1139 /* update vector tables */
1140 retval = xscale_update_vectors(target);
1141 if (retval != ERROR_OK)
1142 return retval;
1144 /* current = 1: continue on current pc, otherwise continue at <address> */
1145 if (!current)
1146 buf_set_u32(arm->pc->value, 0, 32, address);
1148 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1150 /* if we're at the reset vector, we have to simulate the branch */
1151 if (current_pc == 0x0) {
1152 arm_simulate_step(target, NULL);
1153 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1156 /* the front-end may request us not to handle breakpoints */
1157 if (handle_breakpoints) {
1158 struct breakpoint *breakpoint;
1159 breakpoint = breakpoint_find(target,
1160 buf_get_u32(arm->pc->value, 0, 32));
1161 if (breakpoint) {
1162 uint32_t next_pc;
1163 enum trace_mode saved_trace_mode;
1165 /* there's a breakpoint at the current PC, we have to step over it */
1166 LOG_DEBUG("unset breakpoint at " TARGET_ADDR_FMT "",
1167 breakpoint->address);
1168 xscale_unset_breakpoint(target, breakpoint);
1170 /* calculate PC of next instruction */
1171 retval = arm_simulate_step(target, &next_pc);
1172 if (retval != ERROR_OK) {
1173 uint32_t current_opcode;
1174 target_read_u32(target, current_pc, &current_opcode);
1175 LOG_ERROR(
1176 "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "",
1177 current_opcode);
1180 LOG_DEBUG("enable single-step");
1181 xscale_enable_single_step(target, next_pc);
1183 /* restore banked registers */
1184 retval = xscale_restore_banked(target);
1185 if (retval != ERROR_OK)
1186 return retval;
1188 /* send resume request */
1189 xscale_send_u32(target, 0x30);
1191 /* send CPSR */
1192 xscale_send_u32(target,
1193 buf_get_u32(arm->cpsr->value, 0, 32));
1194 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1195 buf_get_u32(arm->cpsr->value, 0, 32));
1197 for (i = 7; i >= 0; i--) {
1198 /* send register */
1199 xscale_send_u32(target,
1200 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1201 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1202 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1205 /* send PC */
1206 xscale_send_u32(target,
1207 buf_get_u32(arm->pc->value, 0, 32));
1208 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1209 buf_get_u32(arm->pc->value, 0, 32));
1211 /* disable trace data collection in xscale_debug_entry() */
1212 saved_trace_mode = xscale->trace.mode;
1213 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1215 /* wait for and process debug entry */
1216 xscale_debug_entry(target);
1218 /* re-enable trace buffer, if enabled previously */
1219 xscale->trace.mode = saved_trace_mode;
1221 LOG_DEBUG("disable single-step");
1222 xscale_disable_single_step(target);
1224 LOG_DEBUG("set breakpoint at " TARGET_ADDR_FMT "",
1225 breakpoint->address);
1226 xscale_set_breakpoint(target, breakpoint);
1230 /* enable any pending breakpoints and watchpoints */
1231 xscale_enable_breakpoints(target);
1232 xscale_enable_watchpoints(target);
1234 /* restore banked registers */
1235 retval = xscale_restore_banked(target);
1236 if (retval != ERROR_OK)
1237 return retval;
1239 /* send resume request (command 0x30 or 0x31)
1240 * clean the trace buffer if it is to be enabled (0x62) */
1241 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1242 if (xscale->trace.mode == XSCALE_TRACE_FILL) {
1243 /* If trace enabled in fill mode and starting collection of new set
1244 * of buffers, initialize buffer counter and free previous buffers */
1245 if (xscale->trace.fill_counter == 0) {
1246 xscale->trace.fill_counter = xscale->trace.buffer_fill;
1247 xscale_free_trace_data(xscale);
1249 } else /* wrap mode; free previous buffer */
1250 xscale_free_trace_data(xscale);
1252 xscale_send_u32(target, 0x62);
1253 xscale_send_u32(target, 0x31);
1254 } else
1255 xscale_send_u32(target, 0x30);
1257 /* send CPSR */
1258 xscale_send_u32(target, buf_get_u32(arm->cpsr->value, 0, 32));
1259 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1260 buf_get_u32(arm->cpsr->value, 0, 32));
1262 for (i = 7; i >= 0; i--) {
1263 /* send register */
1264 xscale_send_u32(target, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1265 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1266 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1269 /* send PC */
1270 xscale_send_u32(target, buf_get_u32(arm->pc->value, 0, 32));
1271 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1272 buf_get_u32(arm->pc->value, 0, 32));
1274 target->debug_reason = DBG_REASON_NOTHALTED;
1276 if (!debug_execution) {
1277 /* registers are now invalid */
1278 register_cache_invalidate(arm->core_cache);
1279 target->state = TARGET_RUNNING;
1280 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1281 } else {
1282 target->state = TARGET_DEBUG_RUNNING;
1283 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1286 LOG_DEBUG("target resumed");
1288 return ERROR_OK;
1291 static int xscale_step_inner(struct target *target, int current,
1292 uint32_t address, int handle_breakpoints)
1294 struct xscale_common *xscale = target_to_xscale(target);
1295 struct arm *arm = &xscale->arm;
1296 uint32_t next_pc;
1297 int retval;
1298 int i;
1300 target->debug_reason = DBG_REASON_SINGLESTEP;
1302 /* calculate PC of next instruction */
1303 retval = arm_simulate_step(target, &next_pc);
1304 if (retval != ERROR_OK) {
1305 uint32_t current_opcode, current_pc;
1306 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1308 target_read_u32(target, current_pc, &current_opcode);
1309 LOG_ERROR(
1310 "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "",
1311 current_opcode);
1312 return retval;
1315 LOG_DEBUG("enable single-step");
1316 retval = xscale_enable_single_step(target, next_pc);
1317 if (retval != ERROR_OK)
1318 return retval;
1320 /* restore banked registers */
1321 retval = xscale_restore_banked(target);
1322 if (retval != ERROR_OK)
1323 return retval;
1325 /* send resume request (command 0x30 or 0x31)
1326 * clean the trace buffer if it is to be enabled (0x62) */
1327 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1328 retval = xscale_send_u32(target, 0x62);
1329 if (retval != ERROR_OK)
1330 return retval;
1331 retval = xscale_send_u32(target, 0x31);
1332 if (retval != ERROR_OK)
1333 return retval;
1334 } else {
1335 retval = xscale_send_u32(target, 0x30);
1336 if (retval != ERROR_OK)
1337 return retval;
1340 /* send CPSR */
1341 retval = xscale_send_u32(target,
1342 buf_get_u32(arm->cpsr->value, 0, 32));
1343 if (retval != ERROR_OK)
1344 return retval;
1345 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1346 buf_get_u32(arm->cpsr->value, 0, 32));
1348 for (i = 7; i >= 0; i--) {
1349 /* send register */
1350 retval = xscale_send_u32(target,
1351 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1352 if (retval != ERROR_OK)
1353 return retval;
1354 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i,
1355 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1358 /* send PC */
1359 retval = xscale_send_u32(target,
1360 buf_get_u32(arm->pc->value, 0, 32));
1361 if (retval != ERROR_OK)
1362 return retval;
1363 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1364 buf_get_u32(arm->pc->value, 0, 32));
1366 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1368 /* registers are now invalid */
1369 register_cache_invalidate(arm->core_cache);
1371 /* wait for and process debug entry */
1372 retval = xscale_debug_entry(target);
1373 if (retval != ERROR_OK)
1374 return retval;
1376 LOG_DEBUG("disable single-step");
1377 retval = xscale_disable_single_step(target);
1378 if (retval != ERROR_OK)
1379 return retval;
1381 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1383 return ERROR_OK;
1386 static int xscale_step(struct target *target, int current,
1387 target_addr_t address, int handle_breakpoints)
1389 struct arm *arm = target_to_arm(target);
1390 struct breakpoint *breakpoint = NULL;
1392 uint32_t current_pc;
1393 int retval;
1395 if (target->state != TARGET_HALTED) {
1396 LOG_WARNING("target not halted");
1397 return ERROR_TARGET_NOT_HALTED;
1400 /* current = 1: continue on current pc, otherwise continue at <address> */
1401 if (!current)
1402 buf_set_u32(arm->pc->value, 0, 32, address);
1404 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1406 /* if we're at the reset vector, we have to simulate the step */
1407 if (current_pc == 0x0) {
1408 retval = arm_simulate_step(target, NULL);
1409 if (retval != ERROR_OK)
1410 return retval;
1411 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1412 LOG_DEBUG("current pc %" PRIx32, current_pc);
1414 target->debug_reason = DBG_REASON_SINGLESTEP;
1415 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1417 return ERROR_OK;
1420 /* the front-end may request us not to handle breakpoints */
1421 if (handle_breakpoints)
1422 breakpoint = breakpoint_find(target,
1423 buf_get_u32(arm->pc->value, 0, 32));
1424 if (breakpoint) {
1425 retval = xscale_unset_breakpoint(target, breakpoint);
1426 if (retval != ERROR_OK)
1427 return retval;
1430 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1431 if (retval != ERROR_OK)
1432 return retval;
1434 if (breakpoint)
1435 xscale_set_breakpoint(target, breakpoint);
1437 LOG_DEBUG("target stepped");
1439 return ERROR_OK;
1443 static int xscale_assert_reset(struct target *target)
1445 struct xscale_common *xscale = target_to_xscale(target);
1447 /* TODO: apply hw reset signal in not examined state */
1448 if (!(target_was_examined(target))) {
1449 LOG_WARNING("Reset is not asserted because the target is not examined.");
1450 LOG_WARNING("Use a reset button or power cycle the target.");
1451 return ERROR_TARGET_NOT_EXAMINED;
1454 LOG_DEBUG("target->state: %s",
1455 target_state_name(target));
1457 /* assert reset */
1458 jtag_add_reset(0, 1);
1460 /* sleep 1ms, to be sure we fulfill any requirements */
1461 jtag_add_sleep(1000);
1462 jtag_execute_queue();
1464 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1465 * end up in T-L-R, which would reset JTAG
1467 xscale_jtag_set_instr(target->tap,
1468 XSCALE_SELDCSR << xscale->xscale_variant,
1469 TAP_IDLE);
1471 /* set Hold reset, Halt mode and Trap Reset */
1472 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1473 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1474 xscale_write_dcsr(target, 1, 0);
1476 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1477 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1478 jtag_execute_queue();
1480 target->state = TARGET_RESET;
1482 if (target->reset_halt) {
1483 int retval = target_halt(target);
1484 if (retval != ERROR_OK)
1485 return retval;
1488 return ERROR_OK;
1491 static int xscale_deassert_reset(struct target *target)
1493 struct xscale_common *xscale = target_to_xscale(target);
1494 struct breakpoint *breakpoint = target->breakpoints;
1496 LOG_DEBUG("-");
1498 xscale->ibcr_available = 2;
1499 xscale->ibcr0_used = 0;
1500 xscale->ibcr1_used = 0;
1502 xscale->dbr_available = 2;
1503 xscale->dbr0_used = 0;
1504 xscale->dbr1_used = 0;
1506 /* mark all hardware breakpoints as unset */
1507 while (breakpoint) {
1508 if (breakpoint->type == BKPT_HARD)
1509 breakpoint->set = 0;
1510 breakpoint = breakpoint->next;
1513 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1514 xscale_free_trace_data(xscale);
1516 register_cache_invalidate(xscale->arm.core_cache);
1518 /* FIXME mark hardware watchpoints got unset too. Also,
1519 * at least some of the XScale registers are invalid...
1523 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1524 * contents got invalidated. Safer to force that, so writing new
1525 * contents can't ever fail..
1528 uint32_t address;
1529 unsigned buf_cnt;
1530 const uint8_t *buffer = xscale_debug_handler;
1531 int retval;
1533 /* release SRST */
1534 jtag_add_reset(0, 0);
1536 /* wait 300ms; 150 and 100ms were not enough */
1537 jtag_add_sleep(300*1000);
1539 jtag_add_runtest(2030, TAP_IDLE);
1540 jtag_execute_queue();
1542 /* set Hold reset, Halt mode and Trap Reset */
1543 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1544 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1545 xscale_write_dcsr(target, 1, 0);
1547 /* Load the debug handler into the mini-icache. Since
1548 * it's using halt mode (not monitor mode), it runs in
1549 * "Special Debug State" for access to registers, memory,
1550 * coprocessors, trace data, etc.
1552 address = xscale->handler_address;
1553 for (unsigned binary_size = sizeof(xscale_debug_handler);
1554 binary_size > 0;
1555 binary_size -= buf_cnt, buffer += buf_cnt) {
1556 uint32_t cache_line[8];
1557 unsigned i;
1559 buf_cnt = binary_size;
1560 if (buf_cnt > 32)
1561 buf_cnt = 32;
1563 for (i = 0; i < buf_cnt; i += 4) {
1564 /* convert LE buffer to host-endian uint32_t */
1565 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1568 for (; i < 32; i += 4)
1569 cache_line[i / 4] = 0xe1a08008;
1571 /* only load addresses other than the reset vectors */
1572 if ((address % 0x400) != 0x0) {
1573 retval = xscale_load_ic(target, address,
1574 cache_line);
1575 if (retval != ERROR_OK)
1576 return retval;
1579 address += buf_cnt;
1582 retval = xscale_load_ic(target, 0x0,
1583 xscale->low_vectors);
1584 if (retval != ERROR_OK)
1585 return retval;
1586 retval = xscale_load_ic(target, 0xffff0000,
1587 xscale->high_vectors);
1588 if (retval != ERROR_OK)
1589 return retval;
1591 jtag_add_runtest(30, TAP_IDLE);
1593 jtag_add_sleep(100000);
1595 /* set Hold reset, Halt mode and Trap Reset */
1596 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1597 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1598 xscale_write_dcsr(target, 1, 0);
1600 /* clear Hold reset to let the target run (should enter debug handler) */
1601 xscale_write_dcsr(target, 0, 1);
1602 target->state = TARGET_RUNNING;
1604 if (!target->reset_halt) {
1605 jtag_add_sleep(10000);
1607 /* we should have entered debug now */
1608 xscale_debug_entry(target);
1609 target->state = TARGET_HALTED;
1611 /* resume the target */
1612 xscale_resume(target, 1, 0x0, 1, 0);
1616 return ERROR_OK;
1619 static int xscale_read_core_reg(struct target *target, struct reg *r,
1620 int num, enum arm_mode mode)
1622 /** \todo add debug handler support for core register reads */
1623 LOG_ERROR("not implemented");
1624 return ERROR_OK;
1627 static int xscale_write_core_reg(struct target *target, struct reg *r,
1628 int num, enum arm_mode mode, uint8_t *value)
1630 /** \todo add debug handler support for core register writes */
1631 LOG_ERROR("not implemented");
1632 return ERROR_OK;
1635 static int xscale_full_context(struct target *target)
1637 struct arm *arm = target_to_arm(target);
1639 uint32_t *buffer;
1641 int i, j;
1643 LOG_DEBUG("-");
1645 if (target->state != TARGET_HALTED) {
1646 LOG_WARNING("target not halted");
1647 return ERROR_TARGET_NOT_HALTED;
1650 buffer = malloc(4 * 8);
1652 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1653 * we can't enter User mode on an XScale (unpredictable),
1654 * but User shares registers with SYS
1656 for (i = 1; i < 7; i++) {
1657 enum arm_mode mode = armv4_5_number_to_mode(i);
1658 bool valid = true;
1659 struct reg *r;
1661 if (mode == ARM_MODE_USR)
1662 continue;
1664 /* check if there are invalid registers in the current mode
1666 for (j = 0; valid && j <= 16; j++) {
1667 if (!ARMV4_5_CORE_REG_MODE(arm->core_cache,
1668 mode, j).valid)
1669 valid = false;
1671 if (valid)
1672 continue;
1674 /* request banked registers */
1675 xscale_send_u32(target, 0x0);
1677 /* send CPSR for desired bank mode */
1678 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1680 /* get banked registers: r8 to r14; and SPSR
1681 * except in USR/SYS mode
1683 if (mode != ARM_MODE_SYS) {
1684 /* SPSR */
1685 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1686 mode, 16);
1688 xscale_receive(target, buffer, 8);
1690 buf_set_u32(r->value, 0, 32, buffer[7]);
1691 r->dirty = false;
1692 r->valid = true;
1693 } else
1694 xscale_receive(target, buffer, 7);
1696 /* move data from buffer to register cache */
1697 for (j = 8; j <= 14; j++) {
1698 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1699 mode, j);
1701 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1702 r->dirty = false;
1703 r->valid = true;
1707 free(buffer);
1709 return ERROR_OK;
1712 static int xscale_restore_banked(struct target *target)
1714 struct arm *arm = target_to_arm(target);
1716 int i, j;
1718 if (target->state != TARGET_HALTED) {
1719 LOG_WARNING("target not halted");
1720 return ERROR_TARGET_NOT_HALTED;
1723 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1724 * and check if any banked registers need to be written. Ignore
1725 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1726 * an XScale (unpredictable), but they share all registers.
1728 for (i = 1; i < 7; i++) {
1729 enum arm_mode mode = armv4_5_number_to_mode(i);
1730 struct reg *r;
1732 if (mode == ARM_MODE_USR)
1733 continue;
1735 /* check if there are dirty registers in this mode */
1736 for (j = 8; j <= 14; j++) {
1737 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1738 mode, j).dirty)
1739 goto dirty;
1742 /* if not USR/SYS, check if the SPSR needs to be written */
1743 if (mode != ARM_MODE_SYS) {
1744 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1745 mode, 16).dirty)
1746 goto dirty;
1749 /* there's nothing to flush for this mode */
1750 continue;
1752 dirty:
1753 /* command 0x1: "send banked registers" */
1754 xscale_send_u32(target, 0x1);
1756 /* send CPSR for desired mode */
1757 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1759 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1760 * but this protocol doesn't understand that nuance.
1762 for (j = 8; j <= 14; j++) {
1763 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1764 mode, j);
1765 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1766 r->dirty = false;
1769 /* send spsr if not in USR/SYS mode */
1770 if (mode != ARM_MODE_SYS) {
1771 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1772 mode, 16);
1773 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1774 r->dirty = false;
1778 return ERROR_OK;
1781 static int xscale_read_memory(struct target *target, target_addr_t address,
1782 uint32_t size, uint32_t count, uint8_t *buffer)
1784 struct xscale_common *xscale = target_to_xscale(target);
1785 uint32_t *buf32;
1786 uint32_t i;
1787 int retval;
1789 LOG_DEBUG("address: " TARGET_ADDR_FMT ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32,
1790 address,
1791 size,
1792 count);
1794 if (target->state != TARGET_HALTED) {
1795 LOG_WARNING("target not halted");
1796 return ERROR_TARGET_NOT_HALTED;
1799 /* sanitize arguments */
1800 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1801 return ERROR_COMMAND_SYNTAX_ERROR;
1803 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1804 return ERROR_TARGET_UNALIGNED_ACCESS;
1806 /* send memory read request (command 0x1n, n: access size) */
1807 retval = xscale_send_u32(target, 0x10 | size);
1808 if (retval != ERROR_OK)
1809 return retval;
1811 /* send base address for read request */
1812 retval = xscale_send_u32(target, address);
1813 if (retval != ERROR_OK)
1814 return retval;
1816 /* send number of requested data words */
1817 retval = xscale_send_u32(target, count);
1818 if (retval != ERROR_OK)
1819 return retval;
1821 /* receive data from target (count times 32-bit words in host endianness) */
1822 buf32 = malloc(4 * count);
1823 retval = xscale_receive(target, buf32, count);
1824 if (retval != ERROR_OK) {
1825 free(buf32);
1826 return retval;
1829 /* extract data from host-endian buffer into byte stream */
1830 for (i = 0; i < count; i++) {
1831 switch (size) {
1832 case 4:
1833 target_buffer_set_u32(target, buffer, buf32[i]);
1834 buffer += 4;
1835 break;
1836 case 2:
1837 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1838 buffer += 2;
1839 break;
1840 case 1:
1841 *buffer++ = buf32[i] & 0xff;
1842 break;
1843 default:
1844 LOG_ERROR("invalid read size");
1845 return ERROR_COMMAND_SYNTAX_ERROR;
1849 free(buf32);
1851 /* examine DCSR, to see if Sticky Abort (SA) got set */
1852 retval = xscale_read_dcsr(target);
1853 if (retval != ERROR_OK)
1854 return retval;
1855 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1) {
1856 /* clear SA bit */
1857 retval = xscale_send_u32(target, 0x60);
1858 if (retval != ERROR_OK)
1859 return retval;
1861 return ERROR_TARGET_DATA_ABORT;
1864 return ERROR_OK;
1867 static int xscale_read_phys_memory(struct target *target, target_addr_t address,
1868 uint32_t size, uint32_t count, uint8_t *buffer)
1870 struct xscale_common *xscale = target_to_xscale(target);
1872 /* with MMU inactive, there are only physical addresses */
1873 if (!xscale->armv4_5_mmu.mmu_enabled)
1874 return xscale_read_memory(target, address, size, count, buffer);
1876 /** \todo: provide a non-stub implementation of this routine. */
1877 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1878 target_name(target), __func__);
1879 return ERROR_FAIL;
1882 static int xscale_write_memory(struct target *target, target_addr_t address,
1883 uint32_t size, uint32_t count, const uint8_t *buffer)
1885 struct xscale_common *xscale = target_to_xscale(target);
1886 int retval;
1888 LOG_DEBUG("address: " TARGET_ADDR_FMT ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32,
1889 address,
1890 size,
1891 count);
1893 if (target->state != TARGET_HALTED) {
1894 LOG_WARNING("target not halted");
1895 return ERROR_TARGET_NOT_HALTED;
1898 /* sanitize arguments */
1899 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1900 return ERROR_COMMAND_SYNTAX_ERROR;
1902 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1903 return ERROR_TARGET_UNALIGNED_ACCESS;
1905 /* send memory write request (command 0x2n, n: access size) */
1906 retval = xscale_send_u32(target, 0x20 | size);
1907 if (retval != ERROR_OK)
1908 return retval;
1910 /* send base address for read request */
1911 retval = xscale_send_u32(target, address);
1912 if (retval != ERROR_OK)
1913 return retval;
1915 /* send number of requested data words to be written*/
1916 retval = xscale_send_u32(target, count);
1917 if (retval != ERROR_OK)
1918 return retval;
1920 /* extract data from host-endian buffer into byte stream */
1921 #if 0
1922 for (i = 0; i < count; i++) {
1923 switch (size) {
1924 case 4:
1925 value = target_buffer_get_u32(target, buffer);
1926 xscale_send_u32(target, value);
1927 buffer += 4;
1928 break;
1929 case 2:
1930 value = target_buffer_get_u16(target, buffer);
1931 xscale_send_u32(target, value);
1932 buffer += 2;
1933 break;
1934 case 1:
1935 value = *buffer;
1936 xscale_send_u32(target, value);
1937 buffer += 1;
1938 break;
1939 default:
1940 LOG_ERROR("should never get here");
1941 exit(-1);
1944 #endif
1945 retval = xscale_send(target, buffer, count, size);
1946 if (retval != ERROR_OK)
1947 return retval;
1949 /* examine DCSR, to see if Sticky Abort (SA) got set */
1950 retval = xscale_read_dcsr(target);
1951 if (retval != ERROR_OK)
1952 return retval;
1953 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1) {
1954 /* clear SA bit */
1955 retval = xscale_send_u32(target, 0x60);
1956 if (retval != ERROR_OK)
1957 return retval;
1959 LOG_ERROR("data abort writing memory");
1960 return ERROR_TARGET_DATA_ABORT;
1963 return ERROR_OK;
1966 static int xscale_write_phys_memory(struct target *target, target_addr_t address,
1967 uint32_t size, uint32_t count, const uint8_t *buffer)
1969 struct xscale_common *xscale = target_to_xscale(target);
1971 /* with MMU inactive, there are only physical addresses */
1972 if (!xscale->armv4_5_mmu.mmu_enabled)
1973 return xscale_write_memory(target, address, size, count, buffer);
1975 /** \todo: provide a non-stub implementation of this routine. */
1976 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1977 target_name(target), __func__);
1978 return ERROR_FAIL;
1981 static int xscale_get_ttb(struct target *target, uint32_t *result)
1983 struct xscale_common *xscale = target_to_xscale(target);
1984 uint32_t ttb;
1985 int retval;
1987 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
1988 if (retval != ERROR_OK)
1989 return retval;
1990 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
1992 *result = ttb;
1994 return ERROR_OK;
1997 static int xscale_disable_mmu_caches(struct target *target, int mmu,
1998 int d_u_cache, int i_cache)
2000 struct xscale_common *xscale = target_to_xscale(target);
2001 uint32_t cp15_control;
2002 int retval;
2004 /* read cp15 control register */
2005 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2006 if (retval != ERROR_OK)
2007 return retval;
2008 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2010 if (mmu)
2011 cp15_control &= ~0x1U;
2013 if (d_u_cache) {
2014 /* clean DCache */
2015 retval = xscale_send_u32(target, 0x50);
2016 if (retval != ERROR_OK)
2017 return retval;
2018 retval = xscale_send_u32(target, xscale->cache_clean_address);
2019 if (retval != ERROR_OK)
2020 return retval;
2022 /* invalidate DCache */
2023 retval = xscale_send_u32(target, 0x51);
2024 if (retval != ERROR_OK)
2025 return retval;
2027 cp15_control &= ~0x4U;
2030 if (i_cache) {
2031 /* invalidate ICache */
2032 retval = xscale_send_u32(target, 0x52);
2033 if (retval != ERROR_OK)
2034 return retval;
2035 cp15_control &= ~0x1000U;
2038 /* write new cp15 control register */
2039 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2040 if (retval != ERROR_OK)
2041 return retval;
2043 /* execute cpwait to ensure outstanding operations complete */
2044 retval = xscale_send_u32(target, 0x53);
2045 return retval;
2048 static int xscale_enable_mmu_caches(struct target *target, int mmu,
2049 int d_u_cache, int i_cache)
2051 struct xscale_common *xscale = target_to_xscale(target);
2052 uint32_t cp15_control;
2053 int retval;
2055 /* read cp15 control register */
2056 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2057 if (retval != ERROR_OK)
2058 return retval;
2059 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2061 if (mmu)
2062 cp15_control |= 0x1U;
2064 if (d_u_cache)
2065 cp15_control |= 0x4U;
2067 if (i_cache)
2068 cp15_control |= 0x1000U;
2070 /* write new cp15 control register */
2071 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2072 if (retval != ERROR_OK)
2073 return retval;
2075 /* execute cpwait to ensure outstanding operations complete */
2076 retval = xscale_send_u32(target, 0x53);
2077 return retval;
2080 static int xscale_set_breakpoint(struct target *target,
2081 struct breakpoint *breakpoint)
2083 int retval;
2084 struct xscale_common *xscale = target_to_xscale(target);
2086 if (target->state != TARGET_HALTED) {
2087 LOG_WARNING("target not halted");
2088 return ERROR_TARGET_NOT_HALTED;
2091 if (breakpoint->set) {
2092 LOG_WARNING("breakpoint already set");
2093 return ERROR_OK;
2096 if (breakpoint->type == BKPT_HARD) {
2097 uint32_t value = breakpoint->address | 1;
2098 if (!xscale->ibcr0_used) {
2099 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2100 xscale->ibcr0_used = 1;
2101 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2102 } else if (!xscale->ibcr1_used) {
2103 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2104 xscale->ibcr1_used = 1;
2105 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2106 } else {/* bug: availability previously verified in xscale_add_breakpoint() */
2107 LOG_ERROR("BUG: no hardware comparator available");
2108 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2110 } else if (breakpoint->type == BKPT_SOFT) {
2111 if (breakpoint->length == 4) {
2112 /* keep the original instruction in target endianness */
2113 retval = target_read_memory(target, breakpoint->address, 4, 1,
2114 breakpoint->orig_instr);
2115 if (retval != ERROR_OK)
2116 return retval;
2117 /* write the bkpt instruction in target endianness
2118 *(arm7_9->arm_bkpt is host endian) */
2119 retval = target_write_u32(target, breakpoint->address,
2120 xscale->arm_bkpt);
2121 if (retval != ERROR_OK)
2122 return retval;
2123 } else {
2124 /* keep the original instruction in target endianness */
2125 retval = target_read_memory(target, breakpoint->address, 2, 1,
2126 breakpoint->orig_instr);
2127 if (retval != ERROR_OK)
2128 return retval;
2129 /* write the bkpt instruction in target endianness
2130 *(arm7_9->arm_bkpt is host endian) */
2131 retval = target_write_u16(target, breakpoint->address,
2132 xscale->thumb_bkpt);
2133 if (retval != ERROR_OK)
2134 return retval;
2136 breakpoint->set = 1;
2138 xscale_send_u32(target, 0x50); /* clean dcache */
2139 xscale_send_u32(target, xscale->cache_clean_address);
2140 xscale_send_u32(target, 0x51); /* invalidate dcache */
2141 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2144 return ERROR_OK;
2147 static int xscale_add_breakpoint(struct target *target,
2148 struct breakpoint *breakpoint)
2150 struct xscale_common *xscale = target_to_xscale(target);
2152 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1)) {
2153 LOG_ERROR("no breakpoint unit available for hardware breakpoint");
2154 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2157 if ((breakpoint->length != 2) && (breakpoint->length != 4)) {
2158 LOG_ERROR("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2159 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2162 if (breakpoint->type == BKPT_HARD)
2163 xscale->ibcr_available--;
2165 return xscale_set_breakpoint(target, breakpoint);
2168 static int xscale_unset_breakpoint(struct target *target,
2169 struct breakpoint *breakpoint)
2171 int retval;
2172 struct xscale_common *xscale = target_to_xscale(target);
2174 if (target->state != TARGET_HALTED) {
2175 LOG_WARNING("target not halted");
2176 return ERROR_TARGET_NOT_HALTED;
2179 if (!breakpoint->set) {
2180 LOG_WARNING("breakpoint not set");
2181 return ERROR_OK;
2184 if (breakpoint->type == BKPT_HARD) {
2185 if (breakpoint->set == 1) {
2186 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2187 xscale->ibcr0_used = 0;
2188 } else if (breakpoint->set == 2) {
2189 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2190 xscale->ibcr1_used = 0;
2192 breakpoint->set = 0;
2193 } else {
2194 /* restore original instruction (kept in target endianness) */
2195 if (breakpoint->length == 4) {
2196 retval = target_write_memory(target, breakpoint->address, 4, 1,
2197 breakpoint->orig_instr);
2198 if (retval != ERROR_OK)
2199 return retval;
2200 } else {
2201 retval = target_write_memory(target, breakpoint->address, 2, 1,
2202 breakpoint->orig_instr);
2203 if (retval != ERROR_OK)
2204 return retval;
2206 breakpoint->set = 0;
2208 xscale_send_u32(target, 0x50); /* clean dcache */
2209 xscale_send_u32(target, xscale->cache_clean_address);
2210 xscale_send_u32(target, 0x51); /* invalidate dcache */
2211 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2214 return ERROR_OK;
2217 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2219 struct xscale_common *xscale = target_to_xscale(target);
2221 if (target->state != TARGET_HALTED) {
2222 LOG_ERROR("target not halted");
2223 return ERROR_TARGET_NOT_HALTED;
2226 if (breakpoint->set)
2227 xscale_unset_breakpoint(target, breakpoint);
2229 if (breakpoint->type == BKPT_HARD)
2230 xscale->ibcr_available++;
2232 return ERROR_OK;
2235 static int xscale_set_watchpoint(struct target *target,
2236 struct watchpoint *watchpoint)
2238 struct xscale_common *xscale = target_to_xscale(target);
2239 uint32_t enable = 0;
2240 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2241 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2243 if (target->state != TARGET_HALTED) {
2244 LOG_ERROR("target not halted");
2245 return ERROR_TARGET_NOT_HALTED;
2248 switch (watchpoint->rw) {
2249 case WPT_READ:
2250 enable = 0x3;
2251 break;
2252 case WPT_ACCESS:
2253 enable = 0x2;
2254 break;
2255 case WPT_WRITE:
2256 enable = 0x1;
2257 break;
2258 default:
2259 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2262 /* For watchpoint across more than one word, both DBR registers must
2263 be enlisted, with the second used as a mask. */
2264 if (watchpoint->length > 4) {
2265 if (xscale->dbr0_used || xscale->dbr1_used) {
2266 LOG_ERROR("BUG: sufficient hardware comparators unavailable");
2267 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2270 /* Write mask value to DBR1, based on the length argument.
2271 * Address bits ignored by the comparator are those set in mask. */
2272 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1],
2273 watchpoint->length - 1);
2274 xscale->dbr1_used = 1;
2275 enable |= 0x100; /* DBCON[M] */
2278 if (!xscale->dbr0_used) {
2279 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2280 dbcon_value |= enable;
2281 xscale_set_reg_u32(dbcon, dbcon_value);
2282 watchpoint->set = 1;
2283 xscale->dbr0_used = 1;
2284 } else if (!xscale->dbr1_used) {
2285 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2286 dbcon_value |= enable << 2;
2287 xscale_set_reg_u32(dbcon, dbcon_value);
2288 watchpoint->set = 2;
2289 xscale->dbr1_used = 1;
2290 } else {
2291 LOG_ERROR("BUG: no hardware comparator available");
2292 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2295 return ERROR_OK;
2298 static int xscale_add_watchpoint(struct target *target,
2299 struct watchpoint *watchpoint)
2301 struct xscale_common *xscale = target_to_xscale(target);
2303 if (xscale->dbr_available < 1) {
2304 LOG_ERROR("no more watchpoint registers available");
2305 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2308 if (watchpoint->value)
2309 LOG_WARNING("xscale does not support value, mask arguments; ignoring");
2311 /* check that length is a power of two */
2312 for (uint32_t len = watchpoint->length; len != 1; len /= 2) {
2313 if (len % 2) {
2314 LOG_ERROR("xscale requires that watchpoint length is a power of two");
2315 return ERROR_COMMAND_ARGUMENT_INVALID;
2319 if (watchpoint->length == 4) { /* single word watchpoint */
2320 xscale->dbr_available--;/* one DBR reg used */
2321 return ERROR_OK;
2324 /* watchpoints across multiple words require both DBR registers */
2325 if (xscale->dbr_available < 2) {
2326 LOG_ERROR("insufficient watchpoint registers available");
2327 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2330 if (watchpoint->length > watchpoint->address) {
2331 LOG_ERROR("xscale does not support watchpoints with length "
2332 "greater than address");
2333 return ERROR_COMMAND_ARGUMENT_INVALID;
2336 xscale->dbr_available = 0;
2337 return ERROR_OK;
2340 static int xscale_unset_watchpoint(struct target *target,
2341 struct watchpoint *watchpoint)
2343 struct xscale_common *xscale = target_to_xscale(target);
2344 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2345 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2347 if (target->state != TARGET_HALTED) {
2348 LOG_WARNING("target not halted");
2349 return ERROR_TARGET_NOT_HALTED;
2352 if (!watchpoint->set) {
2353 LOG_WARNING("breakpoint not set");
2354 return ERROR_OK;
2357 if (watchpoint->set == 1) {
2358 if (watchpoint->length > 4) {
2359 dbcon_value &= ~0x103; /* clear DBCON[M] as well */
2360 xscale->dbr1_used = 0; /* DBR1 was used for mask */
2361 } else
2362 dbcon_value &= ~0x3;
2364 xscale_set_reg_u32(dbcon, dbcon_value);
2365 xscale->dbr0_used = 0;
2366 } else if (watchpoint->set == 2) {
2367 dbcon_value &= ~0xc;
2368 xscale_set_reg_u32(dbcon, dbcon_value);
2369 xscale->dbr1_used = 0;
2371 watchpoint->set = 0;
2373 return ERROR_OK;
2376 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2378 struct xscale_common *xscale = target_to_xscale(target);
2380 if (target->state != TARGET_HALTED) {
2381 LOG_ERROR("target not halted");
2382 return ERROR_TARGET_NOT_HALTED;
2385 if (watchpoint->set)
2386 xscale_unset_watchpoint(target, watchpoint);
2388 if (watchpoint->length > 4)
2389 xscale->dbr_available++;/* both DBR regs now available */
2391 xscale->dbr_available++;
2393 return ERROR_OK;
2396 static int xscale_get_reg(struct reg *reg)
2398 struct xscale_reg *arch_info = reg->arch_info;
2399 struct target *target = arch_info->target;
2400 struct xscale_common *xscale = target_to_xscale(target);
2402 /* DCSR, TX and RX are accessible via JTAG */
2403 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2404 return xscale_read_dcsr(arch_info->target);
2405 else if (strcmp(reg->name, "XSCALE_TX") == 0) {
2406 /* 1 = consume register content */
2407 return xscale_read_tx(arch_info->target, 1);
2408 } else if (strcmp(reg->name, "XSCALE_RX") == 0) {
2409 /* can't read from RX register (host -> debug handler) */
2410 return ERROR_OK;
2411 } else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0) {
2412 /* can't (explicitly) read from TXRXCTRL register */
2413 return ERROR_OK;
2414 } else {/* Other DBG registers have to be transferred by the debug handler
2415 * send CP read request (command 0x40) */
2416 xscale_send_u32(target, 0x40);
2418 /* send CP register number */
2419 xscale_send_u32(target, arch_info->dbg_handler_number);
2421 /* read register value */
2422 xscale_read_tx(target, 1);
2423 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2425 reg->dirty = false;
2426 reg->valid = true;
2429 return ERROR_OK;
2432 static int xscale_set_reg(struct reg *reg, uint8_t *buf)
2434 struct xscale_reg *arch_info = reg->arch_info;
2435 struct target *target = arch_info->target;
2436 struct xscale_common *xscale = target_to_xscale(target);
2437 uint32_t value = buf_get_u32(buf, 0, 32);
2439 /* DCSR, TX and RX are accessible via JTAG */
2440 if (strcmp(reg->name, "XSCALE_DCSR") == 0) {
2441 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2442 return xscale_write_dcsr(arch_info->target, -1, -1);
2443 } else if (strcmp(reg->name, "XSCALE_RX") == 0) {
2444 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2445 return xscale_write_rx(arch_info->target);
2446 } else if (strcmp(reg->name, "XSCALE_TX") == 0) {
2447 /* can't write to TX register (debug-handler -> host) */
2448 return ERROR_OK;
2449 } else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0) {
2450 /* can't (explicitly) write to TXRXCTRL register */
2451 return ERROR_OK;
2452 } else {/* Other DBG registers have to be transferred by the debug handler
2453 * send CP write request (command 0x41) */
2454 xscale_send_u32(target, 0x41);
2456 /* send CP register number */
2457 xscale_send_u32(target, arch_info->dbg_handler_number);
2459 /* send CP register value */
2460 xscale_send_u32(target, value);
2461 buf_set_u32(reg->value, 0, 32, value);
2464 return ERROR_OK;
2467 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2469 struct xscale_common *xscale = target_to_xscale(target);
2470 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2471 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2473 /* send CP write request (command 0x41) */
2474 xscale_send_u32(target, 0x41);
2476 /* send CP register number */
2477 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2479 /* send CP register value */
2480 xscale_send_u32(target, value);
2481 buf_set_u32(dcsr->value, 0, 32, value);
2483 return ERROR_OK;
2486 static int xscale_read_trace(struct target *target)
2488 struct xscale_common *xscale = target_to_xscale(target);
2489 struct arm *arm = &xscale->arm;
2490 struct xscale_trace_data **trace_data_p;
2492 /* 258 words from debug handler
2493 * 256 trace buffer entries
2494 * 2 checkpoint addresses
2496 uint32_t trace_buffer[258];
2497 int is_address[256];
2498 int i, j;
2499 unsigned int num_checkpoints = 0;
2501 if (target->state != TARGET_HALTED) {
2502 LOG_WARNING("target must be stopped to read trace data");
2503 return ERROR_TARGET_NOT_HALTED;
2506 /* send read trace buffer command (command 0x61) */
2507 xscale_send_u32(target, 0x61);
2509 /* receive trace buffer content */
2510 xscale_receive(target, trace_buffer, 258);
2512 /* parse buffer backwards to identify address entries */
2513 for (i = 255; i >= 0; i--) {
2514 /* also count number of checkpointed entries */
2515 if ((trace_buffer[i] & 0xe0) == 0xc0)
2516 num_checkpoints++;
2518 is_address[i] = 0;
2519 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2520 ((trace_buffer[i] & 0xf0) == 0xd0)) {
2521 if (i > 0)
2522 is_address[--i] = 1;
2523 if (i > 0)
2524 is_address[--i] = 1;
2525 if (i > 0)
2526 is_address[--i] = 1;
2527 if (i > 0)
2528 is_address[--i] = 1;
2533 /* search first non-zero entry that is not part of an address */
2534 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2537 if (j == 256) {
2538 LOG_DEBUG("no trace data collected");
2539 return ERROR_XSCALE_NO_TRACE_DATA;
2542 /* account for possible partial address at buffer start (wrap mode only) */
2543 if (is_address[0]) { /* first entry is address; complete set of 4? */
2544 i = 1;
2545 while (i < 4)
2546 if (!is_address[i++])
2547 break;
2548 if (i < 4)
2549 j += i; /* partial address; can't use it */
2552 /* if first valid entry is indirect branch, can't use that either (no address) */
2553 if (((trace_buffer[j] & 0xf0) == 0x90) || ((trace_buffer[j] & 0xf0) == 0xd0))
2554 j++;
2556 /* walk linked list to terminating entry */
2557 for (trace_data_p = &xscale->trace.data; *trace_data_p;
2558 trace_data_p = &(*trace_data_p)->next)
2561 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2562 (*trace_data_p)->next = NULL;
2563 (*trace_data_p)->chkpt0 = trace_buffer[256];
2564 (*trace_data_p)->chkpt1 = trace_buffer[257];
2565 (*trace_data_p)->last_instruction = buf_get_u32(arm->pc->value, 0, 32);
2566 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2567 (*trace_data_p)->depth = 256 - j;
2568 (*trace_data_p)->num_checkpoints = num_checkpoints;
2570 for (i = j; i < 256; i++) {
2571 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2572 if (is_address[i])
2573 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2574 else
2575 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2578 return ERROR_OK;
2581 static int xscale_read_instruction(struct target *target, uint32_t pc,
2582 struct arm_instruction *instruction)
2584 struct xscale_common *const xscale = target_to_xscale(target);
2585 int section = -1;
2586 size_t size_read;
2587 uint32_t opcode;
2588 int retval;
2590 if (!xscale->trace.image)
2591 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2593 /* search for the section the current instruction belongs to */
2594 for (unsigned int i = 0; i < xscale->trace.image->num_sections; i++) {
2595 if ((xscale->trace.image->sections[i].base_address <= pc) &&
2596 (xscale->trace.image->sections[i].base_address +
2597 xscale->trace.image->sections[i].size > pc)) {
2598 section = i;
2599 break;
2603 if (section == -1) {
2604 /* current instruction couldn't be found in the image */
2605 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2608 if (xscale->trace.core_state == ARM_STATE_ARM) {
2609 uint8_t buf[4];
2610 retval = image_read_section(xscale->trace.image, section,
2611 pc - xscale->trace.image->sections[section].base_address,
2612 4, buf, &size_read);
2613 if (retval != ERROR_OK) {
2614 LOG_ERROR("error while reading instruction");
2615 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2617 opcode = target_buffer_get_u32(target, buf);
2618 arm_evaluate_opcode(opcode, pc, instruction);
2619 } else if (xscale->trace.core_state == ARM_STATE_THUMB) {
2620 uint8_t buf[2];
2621 retval = image_read_section(xscale->trace.image, section,
2622 pc - xscale->trace.image->sections[section].base_address,
2623 2, buf, &size_read);
2624 if (retval != ERROR_OK) {
2625 LOG_ERROR("error while reading instruction");
2626 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2628 opcode = target_buffer_get_u16(target, buf);
2629 thumb_evaluate_opcode(opcode, pc, instruction);
2630 } else {
2631 LOG_ERROR("BUG: unknown core state encountered");
2632 exit(-1);
2635 return ERROR_OK;
2638 /* Extract address encoded into trace data.
2639 * Write result to address referenced by argument 'target', or 0 if incomplete. */
2640 static inline void xscale_branch_address(struct xscale_trace_data *trace_data,
2641 int i, uint32_t *target)
2643 /* if there are less than four entries prior to the indirect branch message
2644 * we can't extract the address */
2645 if (i < 4)
2646 *target = 0;
2647 else {
2648 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2649 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2653 static inline void xscale_display_instruction(struct target *target, uint32_t pc,
2654 struct arm_instruction *instruction,
2655 struct command_invocation *cmd)
2657 int retval = xscale_read_instruction(target, pc, instruction);
2658 if (retval == ERROR_OK)
2659 command_print(cmd, "%s", instruction->text);
2660 else
2661 command_print(cmd, "0x%8.8" PRIx32 "\t<not found in image>", pc);
2664 static int xscale_analyze_trace(struct target *target, struct command_invocation *cmd)
2666 struct xscale_common *xscale = target_to_xscale(target);
2667 struct xscale_trace_data *trace_data = xscale->trace.data;
2668 int i, retval;
2669 uint32_t breakpoint_pc = 0;
2670 struct arm_instruction instruction;
2671 uint32_t current_pc = 0;/* initialized when address determined */
2673 if (!xscale->trace.image)
2674 LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
2676 /* loop for each trace buffer that was loaded from target */
2677 while (trace_data) {
2678 int chkpt = 0; /* incremented as checkpointed entries found */
2679 int j;
2681 /* FIXME: set this to correct mode when trace buffer is first enabled */
2682 xscale->trace.core_state = ARM_STATE_ARM;
2684 /* loop for each entry in this trace buffer */
2685 for (i = 0; i < trace_data->depth; i++) {
2686 int exception = 0;
2687 uint32_t chkpt_reg = 0x0;
2688 uint32_t branch_target = 0;
2689 int count;
2691 /* trace entry type is upper nybble of 'message byte' */
2692 int trace_msg_type = (trace_data->entries[i].data & 0xf0) >> 4;
2694 /* Target addresses of indirect branches are written into buffer
2695 * before the message byte representing the branch. Skip past it */
2696 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2697 continue;
2699 switch (trace_msg_type) {
2700 case 0: /* Exceptions */
2701 case 1:
2702 case 2:
2703 case 3:
2704 case 4:
2705 case 5:
2706 case 6:
2707 case 7:
2708 exception = (trace_data->entries[i].data & 0x70) >> 4;
2710 /* FIXME: vector table may be at ffff0000 */
2711 branch_target = (trace_data->entries[i].data & 0xf0) >> 2;
2712 break;
2714 case 8: /* Direct Branch */
2715 break;
2717 case 9: /* Indirect Branch */
2718 xscale_branch_address(trace_data, i, &branch_target);
2719 break;
2721 case 13: /* Checkpointed Indirect Branch */
2722 xscale_branch_address(trace_data, i, &branch_target);
2723 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2724 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is
2725 *oldest */
2726 else
2727 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and
2728 *newest */
2730 chkpt++;
2731 break;
2733 case 12: /* Checkpointed Direct Branch */
2734 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2735 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is
2736 *oldest */
2737 else
2738 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and
2739 *newest */
2741 /* if no current_pc, checkpoint will be starting point */
2742 if (current_pc == 0)
2743 branch_target = chkpt_reg;
2745 chkpt++;
2746 break;
2748 case 15:/* Roll-over */
2749 break;
2751 default:/* Reserved */
2752 LOG_WARNING("trace is suspect: invalid trace message byte");
2753 continue;
2757 /* If we don't have the current_pc yet, but we did get the branch target
2758 * (either from the trace buffer on indirect branch, or from a checkpoint reg),
2759 * then we can start displaying instructions at the next iteration, with
2760 * branch_target as the starting point.
2762 if (current_pc == 0) {
2763 current_pc = branch_target; /* remains 0 unless branch_target *obtained */
2764 continue;
2767 /* We have current_pc. Read and display the instructions from the image.
2768 * First, display count instructions (lower nybble of message byte). */
2769 count = trace_data->entries[i].data & 0x0f;
2770 for (j = 0; j < count; j++) {
2771 xscale_display_instruction(target, current_pc, &instruction,
2772 cmd);
2773 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2776 /* An additional instruction is implicitly added to count for
2777 * rollover and some exceptions: undef, swi, prefetch abort. */
2778 if ((trace_msg_type == 15) || (exception > 0 && exception < 4)) {
2779 xscale_display_instruction(target, current_pc, &instruction,
2780 cmd);
2781 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2784 if (trace_msg_type == 15) /* rollover */
2785 continue;
2787 if (exception) {
2788 command_print(cmd, "--- exception %i ---", exception);
2789 continue;
2792 /* not exception or rollover; next instruction is a branch and is
2793 * not included in the count */
2794 xscale_display_instruction(target, current_pc, &instruction, cmd);
2796 /* for direct branches, extract branch destination from instruction */
2797 if ((trace_msg_type == 8) || (trace_msg_type == 12)) {
2798 retval = xscale_read_instruction(target, current_pc, &instruction);
2799 if (retval == ERROR_OK)
2800 current_pc = instruction.info.b_bl_bx_blx.target_address;
2801 else
2802 current_pc = 0; /* branch destination unknown */
2804 /* direct branch w/ checkpoint; can also get from checkpoint reg */
2805 if (trace_msg_type == 12) {
2806 if (current_pc == 0)
2807 current_pc = chkpt_reg;
2808 else if (current_pc != chkpt_reg) /* sanity check */
2809 LOG_WARNING("trace is suspect: checkpoint register "
2810 "inconsistent with address from image");
2813 if (current_pc == 0)
2814 command_print(cmd, "address unknown");
2816 continue;
2819 /* indirect branch; the branch destination was read from trace buffer */
2820 if ((trace_msg_type == 9) || (trace_msg_type == 13)) {
2821 current_pc = branch_target;
2823 /* sanity check (checkpoint reg is redundant) */
2824 if ((trace_msg_type == 13) && (chkpt_reg != branch_target))
2825 LOG_WARNING("trace is suspect: checkpoint register "
2826 "inconsistent with address from trace buffer");
2829 } /* END: for (i = 0; i < trace_data->depth; i++) */
2831 breakpoint_pc = trace_data->last_instruction; /* used below */
2832 trace_data = trace_data->next;
2834 } /* END: while (trace_data) */
2836 /* Finally... display all instructions up to the value of the pc when the
2837 * debug break occurred (saved when trace data was collected from target).
2838 * This is necessary because the trace only records execution branches and 16
2839 * consecutive instructions (rollovers), so last few typically missed.
2841 if (current_pc == 0)
2842 return ERROR_OK;/* current_pc was never found */
2844 /* how many instructions remaining? */
2845 int gap_count = (breakpoint_pc - current_pc) /
2846 (xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2);
2848 /* should never be negative or over 16, but verify */
2849 if (gap_count < 0 || gap_count > 16) {
2850 LOG_WARNING("trace is suspect: excessive gap at end of trace");
2851 return ERROR_OK;/* bail; large number or negative value no good */
2854 /* display remaining instructions */
2855 for (i = 0; i < gap_count; i++) {
2856 xscale_display_instruction(target, current_pc, &instruction, cmd);
2857 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2860 return ERROR_OK;
2863 static const struct reg_arch_type xscale_reg_type = {
2864 .get = xscale_get_reg,
2865 .set = xscale_set_reg,
2868 static void xscale_build_reg_cache(struct target *target)
2870 struct xscale_common *xscale = target_to_xscale(target);
2871 struct arm *arm = &xscale->arm;
2872 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2873 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2874 int i;
2875 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2877 (*cache_p) = arm_build_reg_cache(target, arm);
2879 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2880 cache_p = &(*cache_p)->next;
2882 /* fill in values for the xscale reg cache */
2883 (*cache_p)->name = "XScale registers";
2884 (*cache_p)->next = NULL;
2885 (*cache_p)->reg_list = calloc(num_regs, sizeof(struct reg));
2886 (*cache_p)->num_regs = num_regs;
2888 for (i = 0; i < num_regs; i++) {
2889 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2890 (*cache_p)->reg_list[i].value = calloc(4, 1);
2891 (*cache_p)->reg_list[i].dirty = false;
2892 (*cache_p)->reg_list[i].valid = false;
2893 (*cache_p)->reg_list[i].size = 32;
2894 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2895 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2896 (*cache_p)->reg_list[i].exist = true;
2897 arch_info[i] = xscale_reg_arch_info[i];
2898 arch_info[i].target = target;
2901 xscale->reg_cache = (*cache_p);
2904 static void xscale_free_reg_cache(struct target *target)
2906 struct xscale_common *xscale = target_to_xscale(target);
2907 struct reg_cache *cache = xscale->reg_cache;
2909 for (unsigned int i = 0; i < ARRAY_SIZE(xscale_reg_arch_info); i++)
2910 free(cache->reg_list[i].value);
2912 free(cache->reg_list[0].arch_info);
2913 free(cache->reg_list);
2914 free(cache);
2916 arm_free_reg_cache(&xscale->arm);
2919 static int xscale_init_target(struct command_context *cmd_ctx,
2920 struct target *target)
2922 xscale_build_reg_cache(target);
2923 return ERROR_OK;
2926 static void xscale_deinit_target(struct target *target)
2928 struct xscale_common *xscale = target_to_xscale(target);
2930 xscale_free_reg_cache(target);
2931 free(xscale);
2934 static int xscale_init_arch_info(struct target *target,
2935 struct xscale_common *xscale, struct jtag_tap *tap)
2937 struct arm *arm;
2938 uint32_t high_reset_branch, low_reset_branch;
2939 int i;
2941 arm = &xscale->arm;
2943 /* store architecture specific data */
2944 xscale->common_magic = XSCALE_COMMON_MAGIC;
2946 /* PXA3xx with 11 bit IR shifts the JTAG instructions */
2947 if (tap->ir_length == 11)
2948 xscale->xscale_variant = XSCALE_PXA3XX;
2949 else
2950 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2952 /* the debug handler isn't installed (and thus not running) at this time */
2953 xscale->handler_address = 0xfe000800;
2955 /* clear the vectors we keep locally for reference */
2956 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2957 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2959 /* no user-specified vectors have been configured yet */
2960 xscale->static_low_vectors_set = 0x0;
2961 xscale->static_high_vectors_set = 0x0;
2963 /* calculate branches to debug handler */
2964 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2965 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2967 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2968 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2970 for (i = 1; i <= 7; i++) {
2971 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2972 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2975 /* 64kB aligned region used for DCache cleaning */
2976 xscale->cache_clean_address = 0xfffe0000;
2978 xscale->hold_rst = 0;
2979 xscale->external_debug_break = 0;
2981 xscale->ibcr_available = 2;
2982 xscale->ibcr0_used = 0;
2983 xscale->ibcr1_used = 0;
2985 xscale->dbr_available = 2;
2986 xscale->dbr0_used = 0;
2987 xscale->dbr1_used = 0;
2989 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
2990 target_name(target));
2992 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2993 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2995 xscale->vector_catch = 0x1;
2997 xscale->trace.data = NULL;
2998 xscale->trace.image = NULL;
2999 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3000 xscale->trace.buffer_fill = 0;
3001 xscale->trace.fill_counter = 0;
3003 /* prepare ARMv4/5 specific information */
3004 arm->arch_info = xscale;
3005 arm->core_type = ARM_CORE_TYPE_STD;
3006 arm->read_core_reg = xscale_read_core_reg;
3007 arm->write_core_reg = xscale_write_core_reg;
3008 arm->full_context = xscale_full_context;
3010 arm_init_arch_info(target, arm);
3012 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3013 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3014 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3015 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3016 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3017 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3018 xscale->armv4_5_mmu.has_tiny_pages = 1;
3019 xscale->armv4_5_mmu.mmu_enabled = 0;
3021 return ERROR_OK;
3024 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3026 struct xscale_common *xscale;
3028 if (sizeof(xscale_debug_handler) > 0x800) {
3029 LOG_ERROR("debug_handler.bin: larger than 2kb");
3030 return ERROR_FAIL;
3033 xscale = calloc(1, sizeof(*xscale));
3034 if (!xscale)
3035 return ERROR_FAIL;
3037 return xscale_init_arch_info(target, xscale, target->tap);
3040 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3042 struct target *target = NULL;
3043 struct xscale_common *xscale;
3044 int retval;
3045 uint32_t handler_address;
3047 if (CMD_ARGC < 2)
3048 return ERROR_COMMAND_SYNTAX_ERROR;
3050 target = get_target(CMD_ARGV[0]);
3051 if (!target) {
3052 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3053 return ERROR_FAIL;
3056 xscale = target_to_xscale(target);
3057 retval = xscale_verify_pointer(CMD, xscale);
3058 if (retval != ERROR_OK)
3059 return retval;
3061 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3063 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3064 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3065 xscale->handler_address = handler_address;
3066 else {
3067 LOG_ERROR(
3068 "xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3069 return ERROR_FAIL;
3072 return ERROR_OK;
3075 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3077 struct target *target = NULL;
3078 struct xscale_common *xscale;
3079 int retval;
3080 uint32_t cache_clean_address;
3082 if (CMD_ARGC < 2)
3083 return ERROR_COMMAND_SYNTAX_ERROR;
3085 target = get_target(CMD_ARGV[0]);
3086 if (!target) {
3087 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3088 return ERROR_FAIL;
3090 xscale = target_to_xscale(target);
3091 retval = xscale_verify_pointer(CMD, xscale);
3092 if (retval != ERROR_OK)
3093 return retval;
3095 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3097 if (cache_clean_address & 0xffff)
3098 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3099 else
3100 xscale->cache_clean_address = cache_clean_address;
3102 return ERROR_OK;
3105 COMMAND_HANDLER(xscale_handle_cache_info_command)
3107 struct target *target = get_current_target(CMD_CTX);
3108 struct xscale_common *xscale = target_to_xscale(target);
3109 int retval;
3111 retval = xscale_verify_pointer(CMD, xscale);
3112 if (retval != ERROR_OK)
3113 return retval;
3115 return armv4_5_handle_cache_info_command(CMD, &xscale->armv4_5_mmu.armv4_5_cache);
3118 static int xscale_virt2phys(struct target *target,
3119 target_addr_t virtual, target_addr_t *physical)
3121 struct xscale_common *xscale = target_to_xscale(target);
3122 uint32_t cb;
3124 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3125 LOG_ERROR(xscale_not);
3126 return ERROR_TARGET_INVALID;
3129 uint32_t ret;
3130 int retval = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu,
3131 virtual, &cb, &ret);
3132 if (retval != ERROR_OK)
3133 return retval;
3134 *physical = ret;
3135 return ERROR_OK;
3138 static int xscale_mmu(struct target *target, int *enabled)
3140 struct xscale_common *xscale = target_to_xscale(target);
3142 if (target->state != TARGET_HALTED) {
3143 LOG_ERROR("Target not halted");
3144 return ERROR_TARGET_INVALID;
3146 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3147 return ERROR_OK;
3150 COMMAND_HANDLER(xscale_handle_mmu_command)
3152 struct target *target = get_current_target(CMD_CTX);
3153 struct xscale_common *xscale = target_to_xscale(target);
3154 int retval;
3156 retval = xscale_verify_pointer(CMD, xscale);
3157 if (retval != ERROR_OK)
3158 return retval;
3160 if (target->state != TARGET_HALTED) {
3161 command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
3162 return ERROR_OK;
3165 if (CMD_ARGC >= 1) {
3166 bool enable;
3167 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3168 if (enable)
3169 xscale_enable_mmu_caches(target, 1, 0, 0);
3170 else
3171 xscale_disable_mmu_caches(target, 1, 0, 0);
3172 xscale->armv4_5_mmu.mmu_enabled = enable;
3175 command_print(CMD, "mmu %s",
3176 (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3178 return ERROR_OK;
3181 COMMAND_HANDLER(xscale_handle_idcache_command)
3183 struct target *target = get_current_target(CMD_CTX);
3184 struct xscale_common *xscale = target_to_xscale(target);
3186 int retval = xscale_verify_pointer(CMD, xscale);
3187 if (retval != ERROR_OK)
3188 return retval;
3190 if (target->state != TARGET_HALTED) {
3191 command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
3192 return ERROR_OK;
3195 bool icache = false;
3196 if (strcmp(CMD_NAME, "icache") == 0)
3197 icache = true;
3198 if (CMD_ARGC >= 1) {
3199 bool enable;
3200 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3201 if (icache) {
3202 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3203 if (enable)
3204 xscale_enable_mmu_caches(target, 0, 0, 1);
3205 else
3206 xscale_disable_mmu_caches(target, 0, 0, 1);
3207 } else {
3208 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3209 if (enable)
3210 xscale_enable_mmu_caches(target, 0, 1, 0);
3211 else
3212 xscale_disable_mmu_caches(target, 0, 1, 0);
3216 bool enabled = icache ?
3217 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3218 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3219 const char *msg = enabled ? "enabled" : "disabled";
3220 command_print(CMD, "%s %s", CMD_NAME, msg);
3222 return ERROR_OK;
3225 static const struct {
3226 char name[15];
3227 unsigned mask;
3228 } vec_ids[] = {
3229 { "fiq", DCSR_TF, },
3230 { "irq", DCSR_TI, },
3231 { "dabt", DCSR_TD, },
3232 { "pabt", DCSR_TA, },
3233 { "swi", DCSR_TS, },
3234 { "undef", DCSR_TU, },
3235 { "reset", DCSR_TR, },
3238 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3240 struct target *target = get_current_target(CMD_CTX);
3241 struct xscale_common *xscale = target_to_xscale(target);
3242 int retval;
3243 uint32_t dcsr_value;
3244 uint32_t catch = 0;
3245 struct reg *dcsr_reg = &xscale->reg_cache->reg_list[XSCALE_DCSR];
3247 retval = xscale_verify_pointer(CMD, xscale);
3248 if (retval != ERROR_OK)
3249 return retval;
3251 if (CMD_ARGC > 0) {
3252 if (CMD_ARGC == 1) {
3253 if (strcmp(CMD_ARGV[0], "all") == 0) {
3254 catch = DCSR_TRAP_MASK;
3255 CMD_ARGC--;
3256 } else if (strcmp(CMD_ARGV[0], "none") == 0) {
3257 catch = 0;
3258 CMD_ARGC--;
3261 while (CMD_ARGC-- > 0) {
3262 unsigned i;
3263 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
3264 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name))
3265 continue;
3266 catch |= vec_ids[i].mask;
3267 break;
3269 if (i == ARRAY_SIZE(vec_ids)) {
3270 LOG_ERROR("No vector '%s'", CMD_ARGV[CMD_ARGC]);
3271 return ERROR_COMMAND_SYNTAX_ERROR;
3274 buf_set_u32(dcsr_reg->value, 0, 32,
3275 (buf_get_u32(dcsr_reg->value, 0, 32) & ~DCSR_TRAP_MASK) | catch);
3276 xscale_write_dcsr(target, -1, -1);
3279 dcsr_value = buf_get_u32(dcsr_reg->value, 0, 32);
3280 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
3281 command_print(CMD, "%15s: %s", vec_ids[i].name,
3282 (dcsr_value & vec_ids[i].mask) ? "catch" : "ignore");
3285 return ERROR_OK;
3289 COMMAND_HANDLER(xscale_handle_vector_table_command)
3291 struct target *target = get_current_target(CMD_CTX);
3292 struct xscale_common *xscale = target_to_xscale(target);
3293 int err = 0;
3294 int retval;
3296 retval = xscale_verify_pointer(CMD, xscale);
3297 if (retval != ERROR_OK)
3298 return retval;
3300 if (CMD_ARGC == 0) { /* print current settings */
3301 int idx;
3303 command_print(CMD, "active user-set static vectors:");
3304 for (idx = 1; idx < 8; idx++)
3305 if (xscale->static_low_vectors_set & (1 << idx))
3306 command_print(CMD,
3307 "low %d: 0x%" PRIx32,
3308 idx,
3309 xscale->static_low_vectors[idx]);
3310 for (idx = 1; idx < 8; idx++)
3311 if (xscale->static_high_vectors_set & (1 << idx))
3312 command_print(CMD,
3313 "high %d: 0x%" PRIx32,
3314 idx,
3315 xscale->static_high_vectors[idx]);
3316 return ERROR_OK;
3319 if (CMD_ARGC != 3)
3320 err = 1;
3321 else {
3322 int idx;
3323 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3324 uint32_t vec;
3325 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3327 if (idx < 1 || idx >= 8)
3328 err = 1;
3330 if (!err && strcmp(CMD_ARGV[0], "low") == 0) {
3331 xscale->static_low_vectors_set |= (1<<idx);
3332 xscale->static_low_vectors[idx] = vec;
3333 } else if (!err && (strcmp(CMD_ARGV[0], "high") == 0)) {
3334 xscale->static_high_vectors_set |= (1<<idx);
3335 xscale->static_high_vectors[idx] = vec;
3336 } else
3337 err = 1;
3340 if (err)
3341 return ERROR_COMMAND_SYNTAX_ERROR;
3343 return ERROR_OK;
3347 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3349 struct target *target = get_current_target(CMD_CTX);
3350 struct xscale_common *xscale = target_to_xscale(target);
3351 uint32_t dcsr_value;
3352 int retval;
3354 retval = xscale_verify_pointer(CMD, xscale);
3355 if (retval != ERROR_OK)
3356 return retval;
3358 if (target->state != TARGET_HALTED) {
3359 command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
3360 return ERROR_OK;
3363 if (CMD_ARGC >= 1) {
3364 if (strcmp("enable", CMD_ARGV[0]) == 0)
3365 xscale->trace.mode = XSCALE_TRACE_WRAP; /* default */
3366 else if (strcmp("disable", CMD_ARGV[0]) == 0)
3367 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3368 else
3369 return ERROR_COMMAND_SYNTAX_ERROR;
3372 if (CMD_ARGC >= 2 && xscale->trace.mode != XSCALE_TRACE_DISABLED) {
3373 if (strcmp("fill", CMD_ARGV[1]) == 0) {
3374 int buffcount = 1; /* default */
3375 if (CMD_ARGC >= 3)
3376 COMMAND_PARSE_NUMBER(int, CMD_ARGV[2], buffcount);
3377 if (buffcount < 1) { /* invalid */
3378 command_print(CMD, "fill buffer count must be > 0");
3379 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3380 return ERROR_COMMAND_SYNTAX_ERROR;
3382 xscale->trace.buffer_fill = buffcount;
3383 xscale->trace.mode = XSCALE_TRACE_FILL;
3384 } else if (strcmp("wrap", CMD_ARGV[1]) == 0)
3385 xscale->trace.mode = XSCALE_TRACE_WRAP;
3386 else {
3387 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3388 return ERROR_COMMAND_SYNTAX_ERROR;
3392 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
3393 char fill_string[12];
3394 sprintf(fill_string, "fill %d", xscale->trace.buffer_fill);
3395 command_print(CMD, "trace buffer enabled (%s)",
3396 (xscale->trace.mode == XSCALE_TRACE_FILL)
3397 ? fill_string : "wrap");
3398 } else
3399 command_print(CMD, "trace buffer disabled");
3401 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3402 if (xscale->trace.mode == XSCALE_TRACE_FILL)
3403 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3404 else
3405 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3407 return ERROR_OK;
3410 COMMAND_HANDLER(xscale_handle_trace_image_command)
3412 struct target *target = get_current_target(CMD_CTX);
3413 struct xscale_common *xscale = target_to_xscale(target);
3414 int retval;
3416 if (CMD_ARGC < 1)
3417 return ERROR_COMMAND_SYNTAX_ERROR;
3419 retval = xscale_verify_pointer(CMD, xscale);
3420 if (retval != ERROR_OK)
3421 return retval;
3423 if (xscale->trace.image) {
3424 image_close(xscale->trace.image);
3425 free(xscale->trace.image);
3426 command_print(CMD, "previously loaded image found and closed");
3429 xscale->trace.image = malloc(sizeof(struct image));
3430 xscale->trace.image->base_address_set = false;
3431 xscale->trace.image->start_address_set = false;
3433 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3434 if (CMD_ARGC >= 2) {
3435 xscale->trace.image->base_address_set = true;
3436 COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
3437 } else
3438 xscale->trace.image->base_address_set = false;
3440 if (image_open(xscale->trace.image, CMD_ARGV[0],
3441 (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK) {
3442 free(xscale->trace.image);
3443 xscale->trace.image = NULL;
3444 return ERROR_OK;
3447 return ERROR_OK;
3450 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3452 struct target *target = get_current_target(CMD_CTX);
3453 struct xscale_common *xscale = target_to_xscale(target);
3454 struct xscale_trace_data *trace_data;
3455 struct fileio *file;
3456 int retval;
3458 retval = xscale_verify_pointer(CMD, xscale);
3459 if (retval != ERROR_OK)
3460 return retval;
3462 if (target->state != TARGET_HALTED) {
3463 command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
3464 return ERROR_OK;
3467 if (CMD_ARGC < 1)
3468 return ERROR_COMMAND_SYNTAX_ERROR;
3470 trace_data = xscale->trace.data;
3472 if (!trace_data) {
3473 command_print(CMD, "no trace data collected");
3474 return ERROR_OK;
3477 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3478 return ERROR_OK;
3480 while (trace_data) {
3481 int i;
3483 fileio_write_u32(file, trace_data->chkpt0);
3484 fileio_write_u32(file, trace_data->chkpt1);
3485 fileio_write_u32(file, trace_data->last_instruction);
3486 fileio_write_u32(file, trace_data->depth);
3488 for (i = 0; i < trace_data->depth; i++)
3489 fileio_write_u32(file, trace_data->entries[i].data |
3490 ((trace_data->entries[i].type & 0xffff) << 16));
3492 trace_data = trace_data->next;
3495 fileio_close(file);
3497 return ERROR_OK;
3500 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3502 struct target *target = get_current_target(CMD_CTX);
3503 struct xscale_common *xscale = target_to_xscale(target);
3504 int retval;
3506 retval = xscale_verify_pointer(CMD, xscale);
3507 if (retval != ERROR_OK)
3508 return retval;
3510 xscale_analyze_trace(target, CMD);
3512 return ERROR_OK;
3515 COMMAND_HANDLER(xscale_handle_cp15)
3517 struct target *target = get_current_target(CMD_CTX);
3518 struct xscale_common *xscale = target_to_xscale(target);
3519 int retval;
3521 retval = xscale_verify_pointer(CMD, xscale);
3522 if (retval != ERROR_OK)
3523 return retval;
3525 if (target->state != TARGET_HALTED) {
3526 command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
3527 return ERROR_OK;
3529 uint32_t reg_no = 0;
3530 struct reg *reg = NULL;
3531 if (CMD_ARGC > 0) {
3532 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3533 /*translate from xscale cp15 register no to openocd register*/
3534 switch (reg_no) {
3535 case 0:
3536 reg_no = XSCALE_MAINID;
3537 break;
3538 case 1:
3539 reg_no = XSCALE_CTRL;
3540 break;
3541 case 2:
3542 reg_no = XSCALE_TTB;
3543 break;
3544 case 3:
3545 reg_no = XSCALE_DAC;
3546 break;
3547 case 5:
3548 reg_no = XSCALE_FSR;
3549 break;
3550 case 6:
3551 reg_no = XSCALE_FAR;
3552 break;
3553 case 13:
3554 reg_no = XSCALE_PID;
3555 break;
3556 case 15:
3557 reg_no = XSCALE_CPACCESS;
3558 break;
3559 default:
3560 command_print(CMD, "invalid register number");
3561 return ERROR_COMMAND_SYNTAX_ERROR;
3563 reg = &xscale->reg_cache->reg_list[reg_no];
3566 if (CMD_ARGC == 1) {
3567 uint32_t value;
3569 /* read cp15 control register */
3570 xscale_get_reg(reg);
3571 value = buf_get_u32(reg->value, 0, 32);
3572 command_print(CMD, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size),
3573 value);
3574 } else if (CMD_ARGC == 2) {
3575 uint32_t value;
3576 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3578 /* send CP write request (command 0x41) */
3579 xscale_send_u32(target, 0x41);
3581 /* send CP register number */
3582 xscale_send_u32(target, reg_no);
3584 /* send CP register value */
3585 xscale_send_u32(target, value);
3587 /* execute cpwait to ensure outstanding operations complete */
3588 xscale_send_u32(target, 0x53);
3589 } else
3590 return ERROR_COMMAND_SYNTAX_ERROR;
3592 return ERROR_OK;
3595 static const struct command_registration xscale_exec_command_handlers[] = {
3597 .name = "cache_info",
3598 .handler = xscale_handle_cache_info_command,
3599 .mode = COMMAND_EXEC,
3600 .help = "display information about CPU caches",
3601 .usage = "",
3604 .name = "mmu",
3605 .handler = xscale_handle_mmu_command,
3606 .mode = COMMAND_EXEC,
3607 .help = "enable or disable the MMU",
3608 .usage = "['enable'|'disable']",
3611 .name = "icache",
3612 .handler = xscale_handle_idcache_command,
3613 .mode = COMMAND_EXEC,
3614 .help = "display ICache state, optionally enabling or "
3615 "disabling it",
3616 .usage = "['enable'|'disable']",
3619 .name = "dcache",
3620 .handler = xscale_handle_idcache_command,
3621 .mode = COMMAND_EXEC,
3622 .help = "display DCache state, optionally enabling or "
3623 "disabling it",
3624 .usage = "['enable'|'disable']",
3627 .name = "vector_catch",
3628 .handler = xscale_handle_vector_catch_command,
3629 .mode = COMMAND_EXEC,
3630 .help = "set or display mask of vectors "
3631 "that should trigger debug entry",
3632 .usage = "['all'|'none'|'fiq'|'irq'|'dabt'|'pabt'|'swi'|'undef'|'reset']",
3635 .name = "vector_table",
3636 .handler = xscale_handle_vector_table_command,
3637 .mode = COMMAND_EXEC,
3638 .help = "set vector table entry in mini-ICache, "
3639 "or display current tables",
3640 .usage = "[('high'|'low') index code]",
3643 .name = "trace_buffer",
3644 .handler = xscale_handle_trace_buffer_command,
3645 .mode = COMMAND_EXEC,
3646 .help = "display trace buffer status, enable or disable "
3647 "tracing, and optionally reconfigure trace mode",
3648 .usage = "['enable'|'disable' ['fill' [number]|'wrap']]",
3651 .name = "dump_trace",
3652 .handler = xscale_handle_dump_trace_command,
3653 .mode = COMMAND_EXEC,
3654 .help = "dump content of trace buffer to file",
3655 .usage = "filename",
3658 .name = "analyze_trace",
3659 .handler = xscale_handle_analyze_trace_buffer_command,
3660 .mode = COMMAND_EXEC,
3661 .help = "analyze content of trace buffer",
3662 .usage = "",
3665 .name = "trace_image",
3666 .handler = xscale_handle_trace_image_command,
3667 .mode = COMMAND_EXEC,
3668 .help = "load image from file to address (default 0)",
3669 .usage = "filename [offset [filetype]]",
3672 .name = "cp15",
3673 .handler = xscale_handle_cp15,
3674 .mode = COMMAND_EXEC,
3675 .help = "Read or write coprocessor 15 register.",
3676 .usage = "register [value]",
3678 COMMAND_REGISTRATION_DONE
3680 static const struct command_registration xscale_any_command_handlers[] = {
3682 .name = "debug_handler",
3683 .handler = xscale_handle_debug_handler_command,
3684 .mode = COMMAND_ANY,
3685 .help = "Change address used for debug handler.",
3686 .usage = "<target> <address>",
3689 .name = "cache_clean_address",
3690 .handler = xscale_handle_cache_clean_address_command,
3691 .mode = COMMAND_ANY,
3692 .help = "Change address used for cleaning data cache.",
3693 .usage = "address",
3696 .chain = xscale_exec_command_handlers,
3698 COMMAND_REGISTRATION_DONE
3700 static const struct command_registration xscale_command_handlers[] = {
3702 .chain = arm_command_handlers,
3705 .name = "xscale",
3706 .mode = COMMAND_ANY,
3707 .help = "xscale command group",
3708 .usage = "",
3709 .chain = xscale_any_command_handlers,
3711 COMMAND_REGISTRATION_DONE
3714 struct target_type xscale_target = {
3715 .name = "xscale",
3717 .poll = xscale_poll,
3718 .arch_state = xscale_arch_state,
3720 .halt = xscale_halt,
3721 .resume = xscale_resume,
3722 .step = xscale_step,
3724 .assert_reset = xscale_assert_reset,
3725 .deassert_reset = xscale_deassert_reset,
3727 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3728 .get_gdb_arch = arm_get_gdb_arch,
3729 .get_gdb_reg_list = arm_get_gdb_reg_list,
3731 .read_memory = xscale_read_memory,
3732 .read_phys_memory = xscale_read_phys_memory,
3733 .write_memory = xscale_write_memory,
3734 .write_phys_memory = xscale_write_phys_memory,
3736 .checksum_memory = arm_checksum_memory,
3737 .blank_check_memory = arm_blank_check_memory,
3739 .run_algorithm = armv4_5_run_algorithm,
3741 .add_breakpoint = xscale_add_breakpoint,
3742 .remove_breakpoint = xscale_remove_breakpoint,
3743 .add_watchpoint = xscale_add_watchpoint,
3744 .remove_watchpoint = xscale_remove_watchpoint,
3746 .commands = xscale_command_handlers,
3747 .target_create = xscale_target_create,
3748 .init_target = xscale_init_target,
3749 .deinit_target = xscale_deinit_target,
3751 .virt2phys = xscale_virt2phys,
3752 .mmu = xscale_mmu