arm mmu: error propagation added for address translation
[openocd/cmsis-dap.git] / src / target / xscale.c
blobab7eee3dfe212b86fceaea67f4c1e9bb6deeefed
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
37 #include "register.h"
38 #include "image.h"
39 #include "arm_opcodes.h"
40 #include "armv4_5.h"
44 * Important XScale documents available as of October 2009 include:
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
59 * Chip-specific microarchitecture documents may also be useful.
63 /* forward declarations */
64 static int xscale_resume(struct target *, int current,
65 uint32_t address, int handle_breakpoints, int debug_execution);
66 static int xscale_debug_entry(struct target *);
67 static int xscale_restore_banked(struct target *);
68 static int xscale_get_reg(struct reg *reg);
69 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
70 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
72 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
73 static int xscale_read_trace(struct target *);
76 /* This XScale "debug handler" is loaded into the processor's
77 * mini-ICache, which is 2K of code writable only via JTAG.
79 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
80 * binary files cleanly. It's string oriented, and terminates them
81 * with a NUL character. Better would be to generate the constants
82 * and let other code decide names, scoping, and other housekeeping.
84 static /* unsigned const char xscale_debug_handler[] = ... */
85 #include "xscale_debug.h"
87 static char *const xscale_reg_list[] =
89 "XSCALE_MAINID", /* 0 */
90 "XSCALE_CACHETYPE",
91 "XSCALE_CTRL",
92 "XSCALE_AUXCTRL",
93 "XSCALE_TTB",
94 "XSCALE_DAC",
95 "XSCALE_FSR",
96 "XSCALE_FAR",
97 "XSCALE_PID",
98 "XSCALE_CPACCESS",
99 "XSCALE_IBCR0", /* 10 */
100 "XSCALE_IBCR1",
101 "XSCALE_DBR0",
102 "XSCALE_DBR1",
103 "XSCALE_DBCON",
104 "XSCALE_TBREG",
105 "XSCALE_CHKPT0",
106 "XSCALE_CHKPT1",
107 "XSCALE_DCSR",
108 "XSCALE_TX",
109 "XSCALE_RX", /* 20 */
110 "XSCALE_TXRXCTRL",
113 static const struct xscale_reg xscale_reg_arch_info[] =
115 {XSCALE_MAINID, NULL},
116 {XSCALE_CACHETYPE, NULL},
117 {XSCALE_CTRL, NULL},
118 {XSCALE_AUXCTRL, NULL},
119 {XSCALE_TTB, NULL},
120 {XSCALE_DAC, NULL},
121 {XSCALE_FSR, NULL},
122 {XSCALE_FAR, NULL},
123 {XSCALE_PID, NULL},
124 {XSCALE_CPACCESS, NULL},
125 {XSCALE_IBCR0, NULL},
126 {XSCALE_IBCR1, NULL},
127 {XSCALE_DBR0, NULL},
128 {XSCALE_DBR1, NULL},
129 {XSCALE_DBCON, NULL},
130 {XSCALE_TBREG, NULL},
131 {XSCALE_CHKPT0, NULL},
132 {XSCALE_CHKPT1, NULL},
133 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
134 {-1, NULL}, /* TX accessed via JTAG */
135 {-1, NULL}, /* RX accessed via JTAG */
136 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
139 /* convenience wrapper to access XScale specific registers */
140 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
142 uint8_t buf[4];
144 buf_set_u32(buf, 0, 32, value);
146 return xscale_set_reg(reg, buf);
149 static const char xscale_not[] = "target is not an XScale";
151 static int xscale_verify_pointer(struct command_context *cmd_ctx,
152 struct xscale_common *xscale)
154 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
155 command_print(cmd_ctx, xscale_not);
156 return ERROR_TARGET_INVALID;
158 return ERROR_OK;
161 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
163 if (tap == NULL)
164 return ERROR_FAIL;
166 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
168 struct scan_field field;
169 uint8_t scratch[4];
171 memset(&field, 0, sizeof field);
172 field.num_bits = tap->ir_length;
173 field.out_value = scratch;
174 buf_set_u32(scratch, 0, field.num_bits, new_instr);
176 jtag_add_ir_scan(tap, &field, end_state);
179 return ERROR_OK;
182 static int xscale_read_dcsr(struct target *target)
184 struct xscale_common *xscale = target_to_xscale(target);
185 int retval;
186 struct scan_field fields[3];
187 uint8_t field0 = 0x0;
188 uint8_t field0_check_value = 0x2;
189 uint8_t field0_check_mask = 0x7;
190 uint8_t field2 = 0x0;
191 uint8_t field2_check_value = 0x0;
192 uint8_t field2_check_mask = 0x1;
194 xscale_jtag_set_instr(target->tap,
195 XSCALE_SELDCSR << xscale->xscale_variant,
196 TAP_DRPAUSE);
198 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
199 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
201 memset(&fields, 0, sizeof fields);
203 fields[0].num_bits = 3;
204 fields[0].out_value = &field0;
205 uint8_t tmp;
206 fields[0].in_value = &tmp;
208 fields[1].num_bits = 32;
209 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
211 fields[2].num_bits = 1;
212 fields[2].out_value = &field2;
213 uint8_t tmp2;
214 fields[2].in_value = &tmp2;
216 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
218 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
219 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
221 if ((retval = jtag_execute_queue()) != ERROR_OK)
223 LOG_ERROR("JTAG error while reading DCSR");
224 return retval;
227 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
228 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
230 /* write the register with the value we just read
231 * on this second pass, only the first bit of field0 is guaranteed to be 0)
233 field0_check_mask = 0x1;
234 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
235 fields[1].in_value = NULL;
237 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
239 /* DANGER!!! this must be here. It will make sure that the arguments
240 * to jtag_set_check_value() does not go out of scope! */
241 return jtag_execute_queue();
245 static void xscale_getbuf(jtag_callback_data_t arg)
247 uint8_t *in = (uint8_t *)arg;
248 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
251 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
253 if (num_words == 0)
254 return ERROR_INVALID_ARGUMENTS;
256 struct xscale_common *xscale = target_to_xscale(target);
257 int retval = ERROR_OK;
258 tap_state_t path[3];
259 struct scan_field fields[3];
260 uint8_t *field0 = malloc(num_words * 1);
261 uint8_t field0_check_value = 0x2;
262 uint8_t field0_check_mask = 0x6;
263 uint32_t *field1 = malloc(num_words * 4);
264 uint8_t field2_check_value = 0x0;
265 uint8_t field2_check_mask = 0x1;
266 int words_done = 0;
267 int words_scheduled = 0;
268 int i;
270 path[0] = TAP_DRSELECT;
271 path[1] = TAP_DRCAPTURE;
272 path[2] = TAP_DRSHIFT;
274 memset(&fields, 0, sizeof fields);
276 fields[0].num_bits = 3;
277 fields[0].check_value = &field0_check_value;
278 fields[0].check_mask = &field0_check_mask;
280 fields[1].num_bits = 32;
282 fields[2].num_bits = 1;
283 fields[2].check_value = &field2_check_value;
284 fields[2].check_mask = &field2_check_mask;
286 xscale_jtag_set_instr(target->tap,
287 XSCALE_DBGTX << xscale->xscale_variant,
288 TAP_IDLE);
289 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
291 /* repeat until all words have been collected */
292 int attempts = 0;
293 while (words_done < num_words)
295 /* schedule reads */
296 words_scheduled = 0;
297 for (i = words_done; i < num_words; i++)
299 fields[0].in_value = &field0[i];
301 jtag_add_pathmove(3, path);
303 fields[1].in_value = (uint8_t *)(field1 + i);
305 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
307 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
309 words_scheduled++;
312 if ((retval = jtag_execute_queue()) != ERROR_OK)
314 LOG_ERROR("JTAG error while receiving data from debug handler");
315 break;
318 /* examine results */
319 for (i = words_done; i < num_words; i++)
321 if (!(field0[0] & 1))
323 /* move backwards if necessary */
324 int j;
325 for (j = i; j < num_words - 1; j++)
327 field0[j] = field0[j + 1];
328 field1[j] = field1[j + 1];
330 words_scheduled--;
333 if (words_scheduled == 0)
335 if (attempts++==1000)
337 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
338 retval = ERROR_TARGET_TIMEOUT;
339 break;
343 words_done += words_scheduled;
346 for (i = 0; i < num_words; i++)
347 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
349 free(field1);
351 return retval;
354 static int xscale_read_tx(struct target *target, int consume)
356 struct xscale_common *xscale = target_to_xscale(target);
357 tap_state_t path[3];
358 tap_state_t noconsume_path[6];
359 int retval;
360 struct timeval timeout, now;
361 struct scan_field fields[3];
362 uint8_t field0_in = 0x0;
363 uint8_t field0_check_value = 0x2;
364 uint8_t field0_check_mask = 0x6;
365 uint8_t field2_check_value = 0x0;
366 uint8_t field2_check_mask = 0x1;
368 xscale_jtag_set_instr(target->tap,
369 XSCALE_DBGTX << xscale->xscale_variant,
370 TAP_IDLE);
372 path[0] = TAP_DRSELECT;
373 path[1] = TAP_DRCAPTURE;
374 path[2] = TAP_DRSHIFT;
376 noconsume_path[0] = TAP_DRSELECT;
377 noconsume_path[1] = TAP_DRCAPTURE;
378 noconsume_path[2] = TAP_DREXIT1;
379 noconsume_path[3] = TAP_DRPAUSE;
380 noconsume_path[4] = TAP_DREXIT2;
381 noconsume_path[5] = TAP_DRSHIFT;
383 memset(&fields, 0, sizeof fields);
385 fields[0].num_bits = 3;
386 fields[0].in_value = &field0_in;
388 fields[1].num_bits = 32;
389 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
391 fields[2].num_bits = 1;
392 uint8_t tmp;
393 fields[2].in_value = &tmp;
395 gettimeofday(&timeout, NULL);
396 timeval_add_time(&timeout, 1, 0);
398 for (;;)
400 /* if we want to consume the register content (i.e. clear TX_READY),
401 * we have to go straight from Capture-DR to Shift-DR
402 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
404 if (consume)
405 jtag_add_pathmove(3, path);
406 else
408 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
411 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
413 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
414 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
416 if ((retval = jtag_execute_queue()) != ERROR_OK)
418 LOG_ERROR("JTAG error while reading TX");
419 return ERROR_TARGET_TIMEOUT;
422 gettimeofday(&now, NULL);
423 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
425 LOG_ERROR("time out reading TX register");
426 return ERROR_TARGET_TIMEOUT;
428 if (!((!(field0_in & 1)) && consume))
430 goto done;
432 if (debug_level >= 3)
434 LOG_DEBUG("waiting 100ms");
435 alive_sleep(100); /* avoid flooding the logs */
436 } else
438 keep_alive();
441 done:
443 if (!(field0_in & 1))
444 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
446 return ERROR_OK;
449 static int xscale_write_rx(struct target *target)
451 struct xscale_common *xscale = target_to_xscale(target);
452 int retval;
453 struct timeval timeout, now;
454 struct scan_field fields[3];
455 uint8_t field0_out = 0x0;
456 uint8_t field0_in = 0x0;
457 uint8_t field0_check_value = 0x2;
458 uint8_t field0_check_mask = 0x6;
459 uint8_t field2 = 0x0;
460 uint8_t field2_check_value = 0x0;
461 uint8_t field2_check_mask = 0x1;
463 xscale_jtag_set_instr(target->tap,
464 XSCALE_DBGRX << xscale->xscale_variant,
465 TAP_IDLE);
467 memset(&fields, 0, sizeof fields);
469 fields[0].num_bits = 3;
470 fields[0].out_value = &field0_out;
471 fields[0].in_value = &field0_in;
473 fields[1].num_bits = 32;
474 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
476 fields[2].num_bits = 1;
477 fields[2].out_value = &field2;
478 uint8_t tmp;
479 fields[2].in_value = &tmp;
481 gettimeofday(&timeout, NULL);
482 timeval_add_time(&timeout, 1, 0);
484 /* poll until rx_read is low */
485 LOG_DEBUG("polling RX");
486 for (;;)
488 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
490 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
491 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
493 if ((retval = jtag_execute_queue()) != ERROR_OK)
495 LOG_ERROR("JTAG error while writing RX");
496 return retval;
499 gettimeofday(&now, NULL);
500 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
502 LOG_ERROR("time out writing RX register");
503 return ERROR_TARGET_TIMEOUT;
505 if (!(field0_in & 1))
506 goto done;
507 if (debug_level >= 3)
509 LOG_DEBUG("waiting 100ms");
510 alive_sleep(100); /* avoid flooding the logs */
511 } else
513 keep_alive();
516 done:
518 /* set rx_valid */
519 field2 = 0x1;
520 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
522 if ((retval = jtag_execute_queue()) != ERROR_OK)
524 LOG_ERROR("JTAG error while writing RX");
525 return retval;
528 return ERROR_OK;
531 /* send count elements of size byte to the debug handler */
532 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
534 struct xscale_common *xscale = target_to_xscale(target);
535 uint32_t t[3];
536 int bits[3];
537 int retval;
538 int done_count = 0;
540 xscale_jtag_set_instr(target->tap,
541 XSCALE_DBGRX << xscale->xscale_variant,
542 TAP_IDLE);
544 bits[0]=3;
545 t[0]=0;
546 bits[1]=32;
547 t[2]=1;
548 bits[2]=1;
549 int endianness = target->endianness;
550 while (done_count++ < count)
552 switch (size)
554 case 4:
555 if (endianness == TARGET_LITTLE_ENDIAN)
557 t[1]=le_to_h_u32(buffer);
558 } else
560 t[1]=be_to_h_u32(buffer);
562 break;
563 case 2:
564 if (endianness == TARGET_LITTLE_ENDIAN)
566 t[1]=le_to_h_u16(buffer);
567 } else
569 t[1]=be_to_h_u16(buffer);
571 break;
572 case 1:
573 t[1]=buffer[0];
574 break;
575 default:
576 LOG_ERROR("BUG: size neither 4, 2 nor 1");
577 return ERROR_INVALID_ARGUMENTS;
579 jtag_add_dr_out(target->tap,
581 bits,
583 TAP_IDLE);
584 buffer += size;
587 if ((retval = jtag_execute_queue()) != ERROR_OK)
589 LOG_ERROR("JTAG error while sending data to debug handler");
590 return retval;
593 return ERROR_OK;
596 static int xscale_send_u32(struct target *target, uint32_t value)
598 struct xscale_common *xscale = target_to_xscale(target);
600 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
601 return xscale_write_rx(target);
604 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
606 struct xscale_common *xscale = target_to_xscale(target);
607 int retval;
608 struct scan_field fields[3];
609 uint8_t field0 = 0x0;
610 uint8_t field0_check_value = 0x2;
611 uint8_t field0_check_mask = 0x7;
612 uint8_t field2 = 0x0;
613 uint8_t field2_check_value = 0x0;
614 uint8_t field2_check_mask = 0x1;
616 if (hold_rst != -1)
617 xscale->hold_rst = hold_rst;
619 if (ext_dbg_brk != -1)
620 xscale->external_debug_break = ext_dbg_brk;
622 xscale_jtag_set_instr(target->tap,
623 XSCALE_SELDCSR << xscale->xscale_variant,
624 TAP_IDLE);
626 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
627 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
629 memset(&fields, 0, sizeof fields);
631 fields[0].num_bits = 3;
632 fields[0].out_value = &field0;
633 uint8_t tmp;
634 fields[0].in_value = &tmp;
636 fields[1].num_bits = 32;
637 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
639 fields[2].num_bits = 1;
640 fields[2].out_value = &field2;
641 uint8_t tmp2;
642 fields[2].in_value = &tmp2;
644 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
646 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
647 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
649 if ((retval = jtag_execute_queue()) != ERROR_OK)
651 LOG_ERROR("JTAG error while writing DCSR");
652 return retval;
655 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
656 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
658 return ERROR_OK;
661 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
662 static unsigned int parity (unsigned int v)
664 // unsigned int ov = v;
665 v ^= v >> 16;
666 v ^= v >> 8;
667 v ^= v >> 4;
668 v &= 0xf;
669 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
670 return (0x6996 >> v) & 1;
673 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
675 struct xscale_common *xscale = target_to_xscale(target);
676 uint8_t packet[4];
677 uint8_t cmd;
678 int word;
679 struct scan_field fields[2];
681 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
683 /* LDIC into IR */
684 xscale_jtag_set_instr(target->tap,
685 XSCALE_LDIC << xscale->xscale_variant,
686 TAP_IDLE);
688 /* CMD is b011 to load a cacheline into the Mini ICache.
689 * Loading into the main ICache is deprecated, and unused.
690 * It's followed by three zero bits, and 27 address bits.
692 buf_set_u32(&cmd, 0, 6, 0x3);
694 /* virtual address of desired cache line */
695 buf_set_u32(packet, 0, 27, va >> 5);
697 memset(&fields, 0, sizeof fields);
699 fields[0].num_bits = 6;
700 fields[0].out_value = &cmd;
702 fields[1].num_bits = 27;
703 fields[1].out_value = packet;
705 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
707 /* rest of packet is a cacheline: 8 instructions, with parity */
708 fields[0].num_bits = 32;
709 fields[0].out_value = packet;
711 fields[1].num_bits = 1;
712 fields[1].out_value = &cmd;
714 for (word = 0; word < 8; word++)
716 buf_set_u32(packet, 0, 32, buffer[word]);
718 uint32_t value;
719 memcpy(&value, packet, sizeof(uint32_t));
720 cmd = parity(value);
722 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
725 return jtag_execute_queue();
728 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
730 struct xscale_common *xscale = target_to_xscale(target);
731 uint8_t packet[4];
732 uint8_t cmd;
733 struct scan_field fields[2];
735 xscale_jtag_set_instr(target->tap,
736 XSCALE_LDIC << xscale->xscale_variant,
737 TAP_IDLE);
739 /* CMD for invalidate IC line b000, bits [6:4] b000 */
740 buf_set_u32(&cmd, 0, 6, 0x0);
742 /* virtual address of desired cache line */
743 buf_set_u32(packet, 0, 27, va >> 5);
745 memset(&fields, 0, sizeof fields);
747 fields[0].num_bits = 6;
748 fields[0].out_value = &cmd;
750 fields[1].num_bits = 27;
751 fields[1].out_value = packet;
753 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
755 return ERROR_OK;
758 static int xscale_update_vectors(struct target *target)
760 struct xscale_common *xscale = target_to_xscale(target);
761 int i;
762 int retval;
764 uint32_t low_reset_branch, high_reset_branch;
766 for (i = 1; i < 8; i++)
768 /* if there's a static vector specified for this exception, override */
769 if (xscale->static_high_vectors_set & (1 << i))
771 xscale->high_vectors[i] = xscale->static_high_vectors[i];
773 else
775 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
776 if (retval == ERROR_TARGET_TIMEOUT)
777 return retval;
778 if (retval != ERROR_OK)
780 /* Some of these reads will fail as part of normal execution */
781 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
786 for (i = 1; i < 8; i++)
788 if (xscale->static_low_vectors_set & (1 << i))
790 xscale->low_vectors[i] = xscale->static_low_vectors[i];
792 else
794 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
795 if (retval == ERROR_TARGET_TIMEOUT)
796 return retval;
797 if (retval != ERROR_OK)
799 /* Some of these reads will fail as part of normal execution */
800 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
805 /* calculate branches to debug handler */
806 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
807 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
809 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
810 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
812 /* invalidate and load exception vectors in mini i-cache */
813 xscale_invalidate_ic_line(target, 0x0);
814 xscale_invalidate_ic_line(target, 0xffff0000);
816 xscale_load_ic(target, 0x0, xscale->low_vectors);
817 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
819 return ERROR_OK;
822 static int xscale_arch_state(struct target *target)
824 struct xscale_common *xscale = target_to_xscale(target);
825 struct arm *armv4_5 = &xscale->armv4_5_common;
827 static const char *state[] =
829 "disabled", "enabled"
832 static const char *arch_dbg_reason[] =
834 "", "\n(processor reset)", "\n(trace buffer full)"
837 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
839 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
840 return ERROR_INVALID_ARGUMENTS;
843 arm_arch_state(target);
844 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
845 state[xscale->armv4_5_mmu.mmu_enabled],
846 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
847 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
848 arch_dbg_reason[xscale->arch_debug_reason]);
850 return ERROR_OK;
853 static int xscale_poll(struct target *target)
855 int retval = ERROR_OK;
857 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
859 enum target_state previous_state = target->state;
860 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
863 /* there's data to read from the tx register, we entered debug state */
864 target->state = TARGET_HALTED;
866 /* process debug entry, fetching current mode regs */
867 retval = xscale_debug_entry(target);
869 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
871 LOG_USER("error while polling TX register, reset CPU");
872 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
873 target->state = TARGET_HALTED;
876 /* debug_entry could have overwritten target state (i.e. immediate resume)
877 * don't signal event handlers in that case
879 if (target->state != TARGET_HALTED)
880 return ERROR_OK;
882 /* if target was running, signal that we halted
883 * otherwise we reentered from debug execution */
884 if (previous_state == TARGET_RUNNING)
885 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
886 else
887 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
890 return retval;
893 static int xscale_debug_entry(struct target *target)
895 struct xscale_common *xscale = target_to_xscale(target);
896 struct arm *armv4_5 = &xscale->armv4_5_common;
897 uint32_t pc;
898 uint32_t buffer[10];
899 int i;
900 int retval;
901 uint32_t moe;
903 /* clear external dbg break (will be written on next DCSR read) */
904 xscale->external_debug_break = 0;
905 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
906 return retval;
908 /* get r0, pc, r1 to r7 and cpsr */
909 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
910 return retval;
912 /* move r0 from buffer to register cache */
913 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
914 armv4_5->core_cache->reg_list[0].dirty = 1;
915 armv4_5->core_cache->reg_list[0].valid = 1;
916 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
918 /* move pc from buffer to register cache */
919 buf_set_u32(armv4_5->pc->value, 0, 32, buffer[1]);
920 armv4_5->pc->dirty = 1;
921 armv4_5->pc->valid = 1;
922 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
924 /* move data from buffer to register cache */
925 for (i = 1; i <= 7; i++)
927 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
928 armv4_5->core_cache->reg_list[i].dirty = 1;
929 armv4_5->core_cache->reg_list[i].valid = 1;
930 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
933 arm_set_cpsr(armv4_5, buffer[9]);
934 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
936 if (!is_arm_mode(armv4_5->core_mode))
938 target->state = TARGET_UNKNOWN;
939 LOG_ERROR("cpsr contains invalid mode value - communication failure");
940 return ERROR_TARGET_FAILURE;
942 LOG_DEBUG("target entered debug state in %s mode",
943 arm_mode_name(armv4_5->core_mode));
945 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
946 if (armv4_5->spsr) {
947 xscale_receive(target, buffer, 8);
948 buf_set_u32(armv4_5->spsr->value, 0, 32, buffer[7]);
949 armv4_5->spsr->dirty = false;
950 armv4_5->spsr->valid = true;
952 else
954 /* r8 to r14, but no spsr */
955 xscale_receive(target, buffer, 7);
958 /* move data from buffer to right banked register in cache */
959 for (i = 8; i <= 14; i++)
961 struct reg *r = arm_reg_current(armv4_5, i);
963 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
964 r->dirty = false;
965 r->valid = true;
968 /* examine debug reason */
969 xscale_read_dcsr(target);
970 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
972 /* stored PC (for calculating fixup) */
973 pc = buf_get_u32(armv4_5->pc->value, 0, 32);
975 switch (moe)
977 case 0x0: /* Processor reset */
978 target->debug_reason = DBG_REASON_DBGRQ;
979 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
980 pc -= 4;
981 break;
982 case 0x1: /* Instruction breakpoint hit */
983 target->debug_reason = DBG_REASON_BREAKPOINT;
984 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
985 pc -= 4;
986 break;
987 case 0x2: /* Data breakpoint hit */
988 target->debug_reason = DBG_REASON_WATCHPOINT;
989 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
990 pc -= 4;
991 break;
992 case 0x3: /* BKPT instruction executed */
993 target->debug_reason = DBG_REASON_BREAKPOINT;
994 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
995 pc -= 4;
996 break;
997 case 0x4: /* Ext. debug event */
998 target->debug_reason = DBG_REASON_DBGRQ;
999 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1000 pc -= 4;
1001 break;
1002 case 0x5: /* Vector trap occured */
1003 target->debug_reason = DBG_REASON_BREAKPOINT;
1004 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1005 pc -= 4;
1006 break;
1007 case 0x6: /* Trace buffer full break */
1008 target->debug_reason = DBG_REASON_DBGRQ;
1009 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1010 pc -= 4;
1011 break;
1012 case 0x7: /* Reserved (may flag Hot-Debug support) */
1013 default:
1014 LOG_ERROR("Method of Entry is 'Reserved'");
1015 exit(-1);
1016 break;
1019 /* apply PC fixup */
1020 buf_set_u32(armv4_5->pc->value, 0, 32, pc);
1022 /* on the first debug entry, identify cache type */
1023 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1025 uint32_t cache_type_reg;
1027 /* read cp15 cache type register */
1028 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1029 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1031 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1034 /* examine MMU and Cache settings */
1035 /* read cp15 control register */
1036 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1037 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1038 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1039 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1040 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1042 /* tracing enabled, read collected trace data */
1043 if (xscale->trace.buffer_enabled)
1045 xscale_read_trace(target);
1046 xscale->trace.buffer_fill--;
1048 /* resume if we're still collecting trace data */
1049 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1050 && (xscale->trace.buffer_fill > 0))
1052 xscale_resume(target, 1, 0x0, 1, 0);
1054 else
1056 xscale->trace.buffer_enabled = 0;
1060 return ERROR_OK;
1063 static int xscale_halt(struct target *target)
1065 struct xscale_common *xscale = target_to_xscale(target);
1067 LOG_DEBUG("target->state: %s",
1068 target_state_name(target));
1070 if (target->state == TARGET_HALTED)
1072 LOG_DEBUG("target was already halted");
1073 return ERROR_OK;
1075 else if (target->state == TARGET_UNKNOWN)
1077 /* this must not happen for a xscale target */
1078 LOG_ERROR("target was in unknown state when halt was requested");
1079 return ERROR_TARGET_INVALID;
1081 else if (target->state == TARGET_RESET)
1083 LOG_DEBUG("target->state == TARGET_RESET");
1085 else
1087 /* assert external dbg break */
1088 xscale->external_debug_break = 1;
1089 xscale_read_dcsr(target);
1091 target->debug_reason = DBG_REASON_DBGRQ;
1094 return ERROR_OK;
1097 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1099 struct xscale_common *xscale = target_to_xscale(target);
1100 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1101 int retval;
1103 if (xscale->ibcr0_used)
1105 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1107 if (ibcr0_bp)
1109 xscale_unset_breakpoint(target, ibcr0_bp);
1111 else
1113 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1114 exit(-1);
1118 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1119 return retval;
1121 return ERROR_OK;
1124 static int xscale_disable_single_step(struct target *target)
1126 struct xscale_common *xscale = target_to_xscale(target);
1127 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1128 int retval;
1130 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1131 return retval;
1133 return ERROR_OK;
1136 static void xscale_enable_watchpoints(struct target *target)
1138 struct watchpoint *watchpoint = target->watchpoints;
1140 while (watchpoint)
1142 if (watchpoint->set == 0)
1143 xscale_set_watchpoint(target, watchpoint);
1144 watchpoint = watchpoint->next;
1148 static void xscale_enable_breakpoints(struct target *target)
1150 struct breakpoint *breakpoint = target->breakpoints;
1152 /* set any pending breakpoints */
1153 while (breakpoint)
1155 if (breakpoint->set == 0)
1156 xscale_set_breakpoint(target, breakpoint);
1157 breakpoint = breakpoint->next;
1161 static int xscale_resume(struct target *target, int current,
1162 uint32_t address, int handle_breakpoints, int debug_execution)
1164 struct xscale_common *xscale = target_to_xscale(target);
1165 struct arm *armv4_5 = &xscale->armv4_5_common;
1166 struct breakpoint *breakpoint = target->breakpoints;
1167 uint32_t current_pc;
1168 int retval;
1169 int i;
1171 LOG_DEBUG("-");
1173 if (target->state != TARGET_HALTED)
1175 LOG_WARNING("target not halted");
1176 return ERROR_TARGET_NOT_HALTED;
1179 if (!debug_execution)
1181 target_free_all_working_areas(target);
1184 /* update vector tables */
1185 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1186 return retval;
1188 /* current = 1: continue on current pc, otherwise continue at <address> */
1189 if (!current)
1190 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1192 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1194 /* if we're at the reset vector, we have to simulate the branch */
1195 if (current_pc == 0x0)
1197 arm_simulate_step(target, NULL);
1198 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1201 /* the front-end may request us not to handle breakpoints */
1202 if (handle_breakpoints)
1204 breakpoint = breakpoint_find(target,
1205 buf_get_u32(armv4_5->pc->value, 0, 32));
1206 if (breakpoint != NULL)
1208 uint32_t next_pc;
1209 int saved_trace_buffer_enabled;
1211 /* there's a breakpoint at the current PC, we have to step over it */
1212 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1213 xscale_unset_breakpoint(target, breakpoint);
1215 /* calculate PC of next instruction */
1216 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1218 uint32_t current_opcode;
1219 target_read_u32(target, current_pc, &current_opcode);
1220 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1223 LOG_DEBUG("enable single-step");
1224 xscale_enable_single_step(target, next_pc);
1226 /* restore banked registers */
1227 retval = xscale_restore_banked(target);
1229 /* send resume request */
1230 xscale_send_u32(target, 0x30);
1232 /* send CPSR */
1233 xscale_send_u32(target,
1234 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1235 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1236 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1238 for (i = 7; i >= 0; i--)
1240 /* send register */
1241 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1242 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1245 /* send PC */
1246 xscale_send_u32(target,
1247 buf_get_u32(armv4_5->pc->value, 0, 32));
1248 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1249 buf_get_u32(armv4_5->pc->value, 0, 32));
1251 /* disable trace data collection in xscale_debug_entry() */
1252 saved_trace_buffer_enabled = xscale->trace.buffer_enabled;
1253 xscale->trace.buffer_enabled = 0;
1255 /* wait for and process debug entry */
1256 xscale_debug_entry(target);
1258 /* re-enable trace buffer, if enabled previously */
1259 xscale->trace.buffer_enabled = saved_trace_buffer_enabled;
1261 LOG_DEBUG("disable single-step");
1262 xscale_disable_single_step(target);
1264 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1265 xscale_set_breakpoint(target, breakpoint);
1269 /* enable any pending breakpoints and watchpoints */
1270 xscale_enable_breakpoints(target);
1271 xscale_enable_watchpoints(target);
1273 /* restore banked registers */
1274 retval = xscale_restore_banked(target);
1276 /* send resume request (command 0x30 or 0x31)
1277 * clean the trace buffer if it is to be enabled (0x62) */
1278 if (xscale->trace.buffer_enabled)
1280 xscale_send_u32(target, 0x62);
1281 xscale_send_u32(target, 0x31);
1283 else
1284 xscale_send_u32(target, 0x30);
1286 /* send CPSR */
1287 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1288 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1289 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1291 for (i = 7; i >= 0; i--)
1293 /* send register */
1294 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1295 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1298 /* send PC */
1299 xscale_send_u32(target, buf_get_u32(armv4_5->pc->value, 0, 32));
1300 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1301 buf_get_u32(armv4_5->pc->value, 0, 32));
1303 target->debug_reason = DBG_REASON_NOTHALTED;
1305 if (!debug_execution)
1307 /* registers are now invalid */
1308 register_cache_invalidate(armv4_5->core_cache);
1309 target->state = TARGET_RUNNING;
1310 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1312 else
1314 target->state = TARGET_DEBUG_RUNNING;
1315 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1318 LOG_DEBUG("target resumed");
1320 return ERROR_OK;
1323 static int xscale_step_inner(struct target *target, int current,
1324 uint32_t address, int handle_breakpoints)
1326 struct xscale_common *xscale = target_to_xscale(target);
1327 struct arm *armv4_5 = &xscale->armv4_5_common;
1328 uint32_t next_pc;
1329 int retval;
1330 int i;
1332 target->debug_reason = DBG_REASON_SINGLESTEP;
1334 /* calculate PC of next instruction */
1335 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1337 uint32_t current_opcode, current_pc;
1338 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1340 target_read_u32(target, current_pc, &current_opcode);
1341 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1342 return retval;
1345 LOG_DEBUG("enable single-step");
1346 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1347 return retval;
1349 /* restore banked registers */
1350 if ((retval = xscale_restore_banked(target)) != ERROR_OK)
1351 return retval;
1353 /* send resume request (command 0x30 or 0x31)
1354 * clean the trace buffer if it is to be enabled (0x62) */
1355 if (xscale->trace.buffer_enabled)
1357 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1358 return retval;
1359 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1360 return retval;
1362 else
1363 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1364 return retval;
1366 /* send CPSR */
1367 retval = xscale_send_u32(target,
1368 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1369 if (retval != ERROR_OK)
1370 return retval;
1371 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1372 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1374 for (i = 7; i >= 0; i--)
1376 /* send register */
1377 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1378 return retval;
1379 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1382 /* send PC */
1383 retval = xscale_send_u32(target,
1384 buf_get_u32(armv4_5->pc->value, 0, 32));
1385 if (retval != ERROR_OK)
1386 return retval;
1387 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1388 buf_get_u32(armv4_5->pc->value, 0, 32));
1390 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1392 /* registers are now invalid */
1393 register_cache_invalidate(armv4_5->core_cache);
1395 /* wait for and process debug entry */
1396 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1397 return retval;
1399 LOG_DEBUG("disable single-step");
1400 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1401 return retval;
1403 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1405 return ERROR_OK;
1408 static int xscale_step(struct target *target, int current,
1409 uint32_t address, int handle_breakpoints)
1411 struct arm *armv4_5 = target_to_arm(target);
1412 struct breakpoint *breakpoint = NULL;
1414 uint32_t current_pc;
1415 int retval;
1417 if (target->state != TARGET_HALTED)
1419 LOG_WARNING("target not halted");
1420 return ERROR_TARGET_NOT_HALTED;
1423 /* current = 1: continue on current pc, otherwise continue at <address> */
1424 if (!current)
1425 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1427 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1429 /* if we're at the reset vector, we have to simulate the step */
1430 if (current_pc == 0x0)
1432 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1433 return retval;
1434 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1436 target->debug_reason = DBG_REASON_SINGLESTEP;
1437 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1439 return ERROR_OK;
1442 /* the front-end may request us not to handle breakpoints */
1443 if (handle_breakpoints)
1444 breakpoint = breakpoint_find(target,
1445 buf_get_u32(armv4_5->pc->value, 0, 32));
1446 if (breakpoint != NULL) {
1447 retval = xscale_unset_breakpoint(target, breakpoint);
1448 if (retval != ERROR_OK)
1449 return retval;
1452 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1454 if (breakpoint)
1456 xscale_set_breakpoint(target, breakpoint);
1459 LOG_DEBUG("target stepped");
1461 return ERROR_OK;
1465 static int xscale_assert_reset(struct target *target)
1467 struct xscale_common *xscale = target_to_xscale(target);
1469 LOG_DEBUG("target->state: %s",
1470 target_state_name(target));
1472 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1473 * end up in T-L-R, which would reset JTAG
1475 xscale_jtag_set_instr(target->tap,
1476 XSCALE_SELDCSR << xscale->xscale_variant,
1477 TAP_IDLE);
1479 /* set Hold reset, Halt mode and Trap Reset */
1480 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1481 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1482 xscale_write_dcsr(target, 1, 0);
1484 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1485 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1486 jtag_execute_queue();
1488 /* assert reset */
1489 jtag_add_reset(0, 1);
1491 /* sleep 1ms, to be sure we fulfill any requirements */
1492 jtag_add_sleep(1000);
1493 jtag_execute_queue();
1495 target->state = TARGET_RESET;
1497 if (target->reset_halt)
1499 int retval;
1500 if ((retval = target_halt(target)) != ERROR_OK)
1501 return retval;
1504 return ERROR_OK;
1507 static int xscale_deassert_reset(struct target *target)
1509 struct xscale_common *xscale = target_to_xscale(target);
1510 struct breakpoint *breakpoint = target->breakpoints;
1512 LOG_DEBUG("-");
1514 xscale->ibcr_available = 2;
1515 xscale->ibcr0_used = 0;
1516 xscale->ibcr1_used = 0;
1518 xscale->dbr_available = 2;
1519 xscale->dbr0_used = 0;
1520 xscale->dbr1_used = 0;
1522 /* mark all hardware breakpoints as unset */
1523 while (breakpoint)
1525 if (breakpoint->type == BKPT_HARD)
1527 breakpoint->set = 0;
1529 breakpoint = breakpoint->next;
1532 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1534 /* FIXME mark hardware watchpoints got unset too. Also,
1535 * at least some of the XScale registers are invalid...
1539 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1540 * contents got invalidated. Safer to force that, so writing new
1541 * contents can't ever fail..
1544 uint32_t address;
1545 unsigned buf_cnt;
1546 const uint8_t *buffer = xscale_debug_handler;
1547 int retval;
1549 /* release SRST */
1550 jtag_add_reset(0, 0);
1552 /* wait 300ms; 150 and 100ms were not enough */
1553 jtag_add_sleep(300*1000);
1555 jtag_add_runtest(2030, TAP_IDLE);
1556 jtag_execute_queue();
1558 /* set Hold reset, Halt mode and Trap Reset */
1559 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1560 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1561 xscale_write_dcsr(target, 1, 0);
1563 /* Load the debug handler into the mini-icache. Since
1564 * it's using halt mode (not monitor mode), it runs in
1565 * "Special Debug State" for access to registers, memory,
1566 * coprocessors, trace data, etc.
1568 address = xscale->handler_address;
1569 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1570 binary_size > 0;
1571 binary_size -= buf_cnt, buffer += buf_cnt)
1573 uint32_t cache_line[8];
1574 unsigned i;
1576 buf_cnt = binary_size;
1577 if (buf_cnt > 32)
1578 buf_cnt = 32;
1580 for (i = 0; i < buf_cnt; i += 4)
1582 /* convert LE buffer to host-endian uint32_t */
1583 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1586 for (; i < 32; i += 4)
1588 cache_line[i / 4] = 0xe1a08008;
1591 /* only load addresses other than the reset vectors */
1592 if ((address % 0x400) != 0x0)
1594 retval = xscale_load_ic(target, address,
1595 cache_line);
1596 if (retval != ERROR_OK)
1597 return retval;
1600 address += buf_cnt;
1603 retval = xscale_load_ic(target, 0x0,
1604 xscale->low_vectors);
1605 if (retval != ERROR_OK)
1606 return retval;
1607 retval = xscale_load_ic(target, 0xffff0000,
1608 xscale->high_vectors);
1609 if (retval != ERROR_OK)
1610 return retval;
1612 jtag_add_runtest(30, TAP_IDLE);
1614 jtag_add_sleep(100000);
1616 /* set Hold reset, Halt mode and Trap Reset */
1617 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1618 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1619 xscale_write_dcsr(target, 1, 0);
1621 /* clear Hold reset to let the target run (should enter debug handler) */
1622 xscale_write_dcsr(target, 0, 1);
1623 target->state = TARGET_RUNNING;
1625 if (!target->reset_halt)
1627 jtag_add_sleep(10000);
1629 /* we should have entered debug now */
1630 xscale_debug_entry(target);
1631 target->state = TARGET_HALTED;
1633 /* resume the target */
1634 xscale_resume(target, 1, 0x0, 1, 0);
1638 return ERROR_OK;
1641 static int xscale_read_core_reg(struct target *target, struct reg *r,
1642 int num, enum arm_mode mode)
1644 /** \todo add debug handler support for core register reads */
1645 LOG_ERROR("not implemented");
1646 return ERROR_OK;
1649 static int xscale_write_core_reg(struct target *target, struct reg *r,
1650 int num, enum arm_mode mode, uint32_t value)
1652 /** \todo add debug handler support for core register writes */
1653 LOG_ERROR("not implemented");
1654 return ERROR_OK;
1657 static int xscale_full_context(struct target *target)
1659 struct arm *armv4_5 = target_to_arm(target);
1661 uint32_t *buffer;
1663 int i, j;
1665 LOG_DEBUG("-");
1667 if (target->state != TARGET_HALTED)
1669 LOG_WARNING("target not halted");
1670 return ERROR_TARGET_NOT_HALTED;
1673 buffer = malloc(4 * 8);
1675 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1676 * we can't enter User mode on an XScale (unpredictable),
1677 * but User shares registers with SYS
1679 for (i = 1; i < 7; i++)
1681 enum arm_mode mode = armv4_5_number_to_mode(i);
1682 bool valid = true;
1683 struct reg *r;
1685 if (mode == ARM_MODE_USR)
1686 continue;
1688 /* check if there are invalid registers in the current mode
1690 for (j = 0; valid && j <= 16; j++)
1692 if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1693 mode, j).valid)
1694 valid = false;
1696 if (valid)
1697 continue;
1699 /* request banked registers */
1700 xscale_send_u32(target, 0x0);
1702 /* send CPSR for desired bank mode */
1703 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1705 /* get banked registers: r8 to r14; and SPSR
1706 * except in USR/SYS mode
1708 if (mode != ARM_MODE_SYS) {
1709 /* SPSR */
1710 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1711 mode, 16);
1713 xscale_receive(target, buffer, 8);
1715 buf_set_u32(r->value, 0, 32, buffer[7]);
1716 r->dirty = false;
1717 r->valid = true;
1718 } else {
1719 xscale_receive(target, buffer, 7);
1722 /* move data from buffer to register cache */
1723 for (j = 8; j <= 14; j++)
1725 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1726 mode, j);
1728 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1729 r->dirty = false;
1730 r->valid = true;
1734 free(buffer);
1736 return ERROR_OK;
1739 static int xscale_restore_banked(struct target *target)
1741 struct arm *armv4_5 = target_to_arm(target);
1743 int i, j;
1745 if (target->state != TARGET_HALTED)
1747 LOG_WARNING("target not halted");
1748 return ERROR_TARGET_NOT_HALTED;
1751 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1752 * and check if any banked registers need to be written. Ignore
1753 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1754 * an XScale (unpredictable), but they share all registers.
1756 for (i = 1; i < 7; i++)
1758 enum arm_mode mode = armv4_5_number_to_mode(i);
1759 struct reg *r;
1761 if (mode == ARM_MODE_USR)
1762 continue;
1764 /* check if there are dirty registers in this mode */
1765 for (j = 8; j <= 14; j++)
1767 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1768 mode, j).dirty)
1769 goto dirty;
1772 /* if not USR/SYS, check if the SPSR needs to be written */
1773 if (mode != ARM_MODE_SYS)
1775 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1776 mode, 16).dirty)
1777 goto dirty;
1780 /* there's nothing to flush for this mode */
1781 continue;
1783 dirty:
1784 /* command 0x1: "send banked registers" */
1785 xscale_send_u32(target, 0x1);
1787 /* send CPSR for desired mode */
1788 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1790 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1791 * but this protocol doesn't understand that nuance.
1793 for (j = 8; j <= 14; j++) {
1794 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1795 mode, j);
1796 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1797 r->dirty = false;
1800 /* send spsr if not in USR/SYS mode */
1801 if (mode != ARM_MODE_SYS) {
1802 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1803 mode, 16);
1804 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1805 r->dirty = false;
1809 return ERROR_OK;
1812 static int xscale_read_memory(struct target *target, uint32_t address,
1813 uint32_t size, uint32_t count, uint8_t *buffer)
1815 struct xscale_common *xscale = target_to_xscale(target);
1816 uint32_t *buf32;
1817 uint32_t i;
1818 int retval;
1820 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1822 if (target->state != TARGET_HALTED)
1824 LOG_WARNING("target not halted");
1825 return ERROR_TARGET_NOT_HALTED;
1828 /* sanitize arguments */
1829 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1830 return ERROR_INVALID_ARGUMENTS;
1832 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1833 return ERROR_TARGET_UNALIGNED_ACCESS;
1835 /* send memory read request (command 0x1n, n: access size) */
1836 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1837 return retval;
1839 /* send base address for read request */
1840 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1841 return retval;
1843 /* send number of requested data words */
1844 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1845 return retval;
1847 /* receive data from target (count times 32-bit words in host endianness) */
1848 buf32 = malloc(4 * count);
1849 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1850 return retval;
1852 /* extract data from host-endian buffer into byte stream */
1853 for (i = 0; i < count; i++)
1855 switch (size)
1857 case 4:
1858 target_buffer_set_u32(target, buffer, buf32[i]);
1859 buffer += 4;
1860 break;
1861 case 2:
1862 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1863 buffer += 2;
1864 break;
1865 case 1:
1866 *buffer++ = buf32[i] & 0xff;
1867 break;
1868 default:
1869 LOG_ERROR("invalid read size");
1870 return ERROR_INVALID_ARGUMENTS;
1874 free(buf32);
1876 /* examine DCSR, to see if Sticky Abort (SA) got set */
1877 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1878 return retval;
1879 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1881 /* clear SA bit */
1882 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1883 return retval;
1885 return ERROR_TARGET_DATA_ABORT;
1888 return ERROR_OK;
1891 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1892 uint32_t size, uint32_t count, uint8_t *buffer)
1894 struct xscale_common *xscale = target_to_xscale(target);
1896 /* with MMU inactive, there are only physical addresses */
1897 if (!xscale->armv4_5_mmu.mmu_enabled)
1898 return xscale_read_memory(target, address, size, count, buffer);
1900 /** \todo: provide a non-stub implementation of this routine. */
1901 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1902 target_name(target), __func__);
1903 return ERROR_FAIL;
1906 static int xscale_write_memory(struct target *target, uint32_t address,
1907 uint32_t size, uint32_t count, uint8_t *buffer)
1909 struct xscale_common *xscale = target_to_xscale(target);
1910 int retval;
1912 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1914 if (target->state != TARGET_HALTED)
1916 LOG_WARNING("target not halted");
1917 return ERROR_TARGET_NOT_HALTED;
1920 /* sanitize arguments */
1921 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1922 return ERROR_INVALID_ARGUMENTS;
1924 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1925 return ERROR_TARGET_UNALIGNED_ACCESS;
1927 /* send memory write request (command 0x2n, n: access size) */
1928 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1929 return retval;
1931 /* send base address for read request */
1932 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1933 return retval;
1935 /* send number of requested data words to be written*/
1936 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1937 return retval;
1939 /* extract data from host-endian buffer into byte stream */
1940 #if 0
1941 for (i = 0; i < count; i++)
1943 switch (size)
1945 case 4:
1946 value = target_buffer_get_u32(target, buffer);
1947 xscale_send_u32(target, value);
1948 buffer += 4;
1949 break;
1950 case 2:
1951 value = target_buffer_get_u16(target, buffer);
1952 xscale_send_u32(target, value);
1953 buffer += 2;
1954 break;
1955 case 1:
1956 value = *buffer;
1957 xscale_send_u32(target, value);
1958 buffer += 1;
1959 break;
1960 default:
1961 LOG_ERROR("should never get here");
1962 exit(-1);
1965 #endif
1966 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1967 return retval;
1969 /* examine DCSR, to see if Sticky Abort (SA) got set */
1970 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1971 return retval;
1972 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1974 /* clear SA bit */
1975 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1976 return retval;
1978 return ERROR_TARGET_DATA_ABORT;
1981 return ERROR_OK;
1984 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1985 uint32_t size, uint32_t count, uint8_t *buffer)
1987 struct xscale_common *xscale = target_to_xscale(target);
1989 /* with MMU inactive, there are only physical addresses */
1990 if (!xscale->armv4_5_mmu.mmu_enabled)
1991 return xscale_read_memory(target, address, size, count, buffer);
1993 /** \todo: provide a non-stub implementation of this routine. */
1994 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1995 target_name(target), __func__);
1996 return ERROR_FAIL;
1999 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
2000 uint32_t count, uint8_t *buffer)
2002 return xscale_write_memory(target, address, 4, count, buffer);
2005 static uint32_t xscale_get_ttb(struct target *target)
2007 struct xscale_common *xscale = target_to_xscale(target);
2008 uint32_t ttb;
2010 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2011 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2013 return ttb;
2016 static void xscale_disable_mmu_caches(struct target *target, int mmu,
2017 int d_u_cache, int i_cache)
2019 struct xscale_common *xscale = target_to_xscale(target);
2020 uint32_t cp15_control;
2022 /* read cp15 control register */
2023 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2024 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2026 if (mmu)
2027 cp15_control &= ~0x1U;
2029 if (d_u_cache)
2031 /* clean DCache */
2032 xscale_send_u32(target, 0x50);
2033 xscale_send_u32(target, xscale->cache_clean_address);
2035 /* invalidate DCache */
2036 xscale_send_u32(target, 0x51);
2038 cp15_control &= ~0x4U;
2041 if (i_cache)
2043 /* invalidate ICache */
2044 xscale_send_u32(target, 0x52);
2045 cp15_control &= ~0x1000U;
2048 /* write new cp15 control register */
2049 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2051 /* execute cpwait to ensure outstanding operations complete */
2052 xscale_send_u32(target, 0x53);
2055 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2056 int d_u_cache, int i_cache)
2058 struct xscale_common *xscale = target_to_xscale(target);
2059 uint32_t cp15_control;
2061 /* read cp15 control register */
2062 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2063 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2065 if (mmu)
2066 cp15_control |= 0x1U;
2068 if (d_u_cache)
2069 cp15_control |= 0x4U;
2071 if (i_cache)
2072 cp15_control |= 0x1000U;
2074 /* write new cp15 control register */
2075 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2077 /* execute cpwait to ensure outstanding operations complete */
2078 xscale_send_u32(target, 0x53);
2081 static int xscale_set_breakpoint(struct target *target,
2082 struct breakpoint *breakpoint)
2084 int retval;
2085 struct xscale_common *xscale = target_to_xscale(target);
2087 if (target->state != TARGET_HALTED)
2089 LOG_WARNING("target not halted");
2090 return ERROR_TARGET_NOT_HALTED;
2093 if (breakpoint->set)
2095 LOG_WARNING("breakpoint already set");
2096 return ERROR_OK;
2099 if (breakpoint->type == BKPT_HARD)
2101 uint32_t value = breakpoint->address | 1;
2102 if (!xscale->ibcr0_used)
2104 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2105 xscale->ibcr0_used = 1;
2106 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2108 else if (!xscale->ibcr1_used)
2110 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2111 xscale->ibcr1_used = 1;
2112 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2114 else
2116 LOG_ERROR("BUG: no hardware comparator available");
2117 return ERROR_OK;
2120 else if (breakpoint->type == BKPT_SOFT)
2122 if (breakpoint->length == 4)
2124 /* keep the original instruction in target endianness */
2125 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2127 return retval;
2129 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2130 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2132 return retval;
2135 else
2137 /* keep the original instruction in target endianness */
2138 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2140 return retval;
2142 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2143 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2145 return retval;
2148 breakpoint->set = 1;
2150 xscale_send_u32(target, 0x50); /* clean dcache */
2151 xscale_send_u32(target, xscale->cache_clean_address);
2152 xscale_send_u32(target, 0x51); /* invalidate dcache */
2153 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2156 return ERROR_OK;
2159 static int xscale_add_breakpoint(struct target *target,
2160 struct breakpoint *breakpoint)
2162 struct xscale_common *xscale = target_to_xscale(target);
2164 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2166 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2167 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2170 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2172 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2173 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2176 if (breakpoint->type == BKPT_HARD)
2178 xscale->ibcr_available--;
2181 return ERROR_OK;
2184 static int xscale_unset_breakpoint(struct target *target,
2185 struct breakpoint *breakpoint)
2187 int retval;
2188 struct xscale_common *xscale = target_to_xscale(target);
2190 if (target->state != TARGET_HALTED)
2192 LOG_WARNING("target not halted");
2193 return ERROR_TARGET_NOT_HALTED;
2196 if (!breakpoint->set)
2198 LOG_WARNING("breakpoint not set");
2199 return ERROR_OK;
2202 if (breakpoint->type == BKPT_HARD)
2204 if (breakpoint->set == 1)
2206 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2207 xscale->ibcr0_used = 0;
2209 else if (breakpoint->set == 2)
2211 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2212 xscale->ibcr1_used = 0;
2214 breakpoint->set = 0;
2216 else
2218 /* restore original instruction (kept in target endianness) */
2219 if (breakpoint->length == 4)
2221 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2223 return retval;
2226 else
2228 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2230 return retval;
2233 breakpoint->set = 0;
2235 xscale_send_u32(target, 0x50); /* clean dcache */
2236 xscale_send_u32(target, xscale->cache_clean_address);
2237 xscale_send_u32(target, 0x51); /* invalidate dcache */
2238 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2241 return ERROR_OK;
2244 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2246 struct xscale_common *xscale = target_to_xscale(target);
2248 if (target->state != TARGET_HALTED)
2250 LOG_WARNING("target not halted");
2251 return ERROR_TARGET_NOT_HALTED;
2254 if (breakpoint->set)
2256 xscale_unset_breakpoint(target, breakpoint);
2259 if (breakpoint->type == BKPT_HARD)
2260 xscale->ibcr_available++;
2262 return ERROR_OK;
2265 static int xscale_set_watchpoint(struct target *target,
2266 struct watchpoint *watchpoint)
2268 struct xscale_common *xscale = target_to_xscale(target);
2269 uint32_t enable = 0;
2270 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2271 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2273 if (target->state != TARGET_HALTED)
2275 LOG_WARNING("target not halted");
2276 return ERROR_TARGET_NOT_HALTED;
2279 switch (watchpoint->rw)
2281 case WPT_READ:
2282 enable = 0x3;
2283 break;
2284 case WPT_ACCESS:
2285 enable = 0x2;
2286 break;
2287 case WPT_WRITE:
2288 enable = 0x1;
2289 break;
2290 default:
2291 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2294 /* For watchpoint across more than one word, both DBR registers must
2295 be enlisted, with the second used as a mask. */
2296 if (watchpoint->length > 4)
2298 if (xscale->dbr0_used || xscale->dbr1_used)
2300 LOG_ERROR("BUG: sufficient hardware comparators unavailable");
2301 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2304 /* Write mask value to DBR1, based on the length argument.
2305 * Address bits ignored by the comparator are those set in mask. */
2306 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1],
2307 watchpoint->length - 1);
2308 xscale->dbr1_used = 1;
2309 enable |= 0x100; /* DBCON[M] */
2312 if (!xscale->dbr0_used)
2314 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2315 dbcon_value |= enable;
2316 xscale_set_reg_u32(dbcon, dbcon_value);
2317 watchpoint->set = 1;
2318 xscale->dbr0_used = 1;
2320 else if (!xscale->dbr1_used)
2322 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2323 dbcon_value |= enable << 2;
2324 xscale_set_reg_u32(dbcon, dbcon_value);
2325 watchpoint->set = 2;
2326 xscale->dbr1_used = 1;
2328 else
2330 LOG_ERROR("BUG: no hardware comparator available");
2331 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2334 return ERROR_OK;
2337 static int xscale_add_watchpoint(struct target *target,
2338 struct watchpoint *watchpoint)
2340 struct xscale_common *xscale = target_to_xscale(target);
2342 if (xscale->dbr_available < 1)
2344 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2347 if (watchpoint->value)
2348 LOG_WARNING("xscale does not support value, mask arguments; ignoring");
2350 /* check that length is a power of two */
2351 for (uint32_t len = watchpoint->length; len != 1; len /= 2)
2353 if (len % 2)
2355 LOG_ERROR("xscale requires that watchpoint length is a power of two");
2356 return ERROR_COMMAND_ARGUMENT_INVALID;
2360 if (watchpoint->length == 4) /* single word watchpoint */
2362 xscale->dbr_available--; /* one DBR reg used */
2363 return ERROR_OK;
2366 /* watchpoints across multiple words require both DBR registers */
2367 if (xscale->dbr_available < 2)
2368 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2370 xscale->dbr_available = 0;
2371 return ERROR_OK;
2374 static int xscale_unset_watchpoint(struct target *target,
2375 struct watchpoint *watchpoint)
2377 struct xscale_common *xscale = target_to_xscale(target);
2378 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2379 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2381 if (target->state != TARGET_HALTED)
2383 LOG_WARNING("target not halted");
2384 return ERROR_TARGET_NOT_HALTED;
2387 if (!watchpoint->set)
2389 LOG_WARNING("breakpoint not set");
2390 return ERROR_OK;
2393 if (watchpoint->set == 1)
2395 if (watchpoint->length > 4)
2397 dbcon_value &= ~0x103; /* clear DBCON[M] as well */
2398 xscale->dbr1_used = 0; /* DBR1 was used for mask */
2400 else
2401 dbcon_value &= ~0x3;
2403 xscale_set_reg_u32(dbcon, dbcon_value);
2404 xscale->dbr0_used = 0;
2406 else if (watchpoint->set == 2)
2408 dbcon_value &= ~0xc;
2409 xscale_set_reg_u32(dbcon, dbcon_value);
2410 xscale->dbr1_used = 0;
2412 watchpoint->set = 0;
2414 return ERROR_OK;
2417 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2419 struct xscale_common *xscale = target_to_xscale(target);
2421 if (target->state != TARGET_HALTED)
2423 LOG_WARNING("target not halted");
2424 return ERROR_TARGET_NOT_HALTED;
2427 if (watchpoint->set)
2429 xscale_unset_watchpoint(target, watchpoint);
2432 if (watchpoint->length > 4)
2433 xscale->dbr_available++; /* both DBR regs now available */
2435 xscale->dbr_available++;
2437 return ERROR_OK;
2440 static int xscale_get_reg(struct reg *reg)
2442 struct xscale_reg *arch_info = reg->arch_info;
2443 struct target *target = arch_info->target;
2444 struct xscale_common *xscale = target_to_xscale(target);
2446 /* DCSR, TX and RX are accessible via JTAG */
2447 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2449 return xscale_read_dcsr(arch_info->target);
2451 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2453 /* 1 = consume register content */
2454 return xscale_read_tx(arch_info->target, 1);
2456 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2458 /* can't read from RX register (host -> debug handler) */
2459 return ERROR_OK;
2461 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2463 /* can't (explicitly) read from TXRXCTRL register */
2464 return ERROR_OK;
2466 else /* Other DBG registers have to be transfered by the debug handler */
2468 /* send CP read request (command 0x40) */
2469 xscale_send_u32(target, 0x40);
2471 /* send CP register number */
2472 xscale_send_u32(target, arch_info->dbg_handler_number);
2474 /* read register value */
2475 xscale_read_tx(target, 1);
2476 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2478 reg->dirty = 0;
2479 reg->valid = 1;
2482 return ERROR_OK;
2485 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2487 struct xscale_reg *arch_info = reg->arch_info;
2488 struct target *target = arch_info->target;
2489 struct xscale_common *xscale = target_to_xscale(target);
2490 uint32_t value = buf_get_u32(buf, 0, 32);
2492 /* DCSR, TX and RX are accessible via JTAG */
2493 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2495 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2496 return xscale_write_dcsr(arch_info->target, -1, -1);
2498 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2500 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2501 return xscale_write_rx(arch_info->target);
2503 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2505 /* can't write to TX register (debug-handler -> host) */
2506 return ERROR_OK;
2508 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2510 /* can't (explicitly) write to TXRXCTRL register */
2511 return ERROR_OK;
2513 else /* Other DBG registers have to be transfered by the debug handler */
2515 /* send CP write request (command 0x41) */
2516 xscale_send_u32(target, 0x41);
2518 /* send CP register number */
2519 xscale_send_u32(target, arch_info->dbg_handler_number);
2521 /* send CP register value */
2522 xscale_send_u32(target, value);
2523 buf_set_u32(reg->value, 0, 32, value);
2526 return ERROR_OK;
2529 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2531 struct xscale_common *xscale = target_to_xscale(target);
2532 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2533 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2535 /* send CP write request (command 0x41) */
2536 xscale_send_u32(target, 0x41);
2538 /* send CP register number */
2539 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2541 /* send CP register value */
2542 xscale_send_u32(target, value);
2543 buf_set_u32(dcsr->value, 0, 32, value);
2545 return ERROR_OK;
2548 static int xscale_read_trace(struct target *target)
2550 struct xscale_common *xscale = target_to_xscale(target);
2551 struct arm *armv4_5 = &xscale->armv4_5_common;
2552 struct xscale_trace_data **trace_data_p;
2554 /* 258 words from debug handler
2555 * 256 trace buffer entries
2556 * 2 checkpoint addresses
2558 uint32_t trace_buffer[258];
2559 int is_address[256];
2560 int i, j;
2561 unsigned int num_checkpoints = 0;
2563 if (target->state != TARGET_HALTED)
2565 LOG_WARNING("target must be stopped to read trace data");
2566 return ERROR_TARGET_NOT_HALTED;
2569 /* send read trace buffer command (command 0x61) */
2570 xscale_send_u32(target, 0x61);
2572 /* receive trace buffer content */
2573 xscale_receive(target, trace_buffer, 258);
2575 /* parse buffer backwards to identify address entries */
2576 for (i = 255; i >= 0; i--)
2578 /* also count number of checkpointed entries */
2579 if ((trace_buffer[i] & 0xe0) == 0xc0)
2580 num_checkpoints++;
2582 is_address[i] = 0;
2583 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2584 ((trace_buffer[i] & 0xf0) == 0xd0))
2586 if (i > 0)
2587 is_address[--i] = 1;
2588 if (i > 0)
2589 is_address[--i] = 1;
2590 if (i > 0)
2591 is_address[--i] = 1;
2592 if (i > 0)
2593 is_address[--i] = 1;
2598 /* search first non-zero entry that is not part of an address */
2599 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2602 if (j == 256)
2604 LOG_DEBUG("no trace data collected");
2605 return ERROR_XSCALE_NO_TRACE_DATA;
2608 /* account for possible partial address at buffer start (wrap mode only) */
2609 if (is_address[0])
2610 { /* first entry is address; complete set of 4? */
2611 i = 1;
2612 while (i < 4)
2613 if (!is_address[i++])
2614 break;
2615 if (i < 4)
2616 j += i; /* partial address; can't use it */
2619 /* if first valid entry is indirect branch, can't use that either (no address) */
2620 if (((trace_buffer[j] & 0xf0) == 0x90) || ((trace_buffer[j] & 0xf0) == 0xd0))
2621 j++;
2623 /* walk linked list to terminating entry */
2624 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2627 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2628 (*trace_data_p)->next = NULL;
2629 (*trace_data_p)->chkpt0 = trace_buffer[256];
2630 (*trace_data_p)->chkpt1 = trace_buffer[257];
2631 (*trace_data_p)->last_instruction =
2632 buf_get_u32(armv4_5->pc->value, 0, 32);
2633 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2634 (*trace_data_p)->depth = 256 - j;
2635 (*trace_data_p)->num_checkpoints = num_checkpoints;
2637 for (i = j; i < 256; i++)
2639 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2640 if (is_address[i])
2641 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2642 else
2643 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2646 return ERROR_OK;
2649 static int xscale_read_instruction(struct target *target, uint32_t pc,
2650 struct arm_instruction *instruction)
2652 struct xscale_common *const xscale = target_to_xscale(target);
2653 int i;
2654 int section = -1;
2655 size_t size_read;
2656 uint32_t opcode;
2657 int retval;
2659 if (!xscale->trace.image)
2660 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2662 /* search for the section the current instruction belongs to */
2663 for (i = 0; i < xscale->trace.image->num_sections; i++)
2665 if ((xscale->trace.image->sections[i].base_address <= pc) &&
2666 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > pc))
2668 section = i;
2669 break;
2673 if (section == -1)
2675 /* current instruction couldn't be found in the image */
2676 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2679 if (xscale->trace.core_state == ARM_STATE_ARM)
2681 uint8_t buf[4];
2682 if ((retval = image_read_section(xscale->trace.image, section,
2683 pc - xscale->trace.image->sections[section].base_address,
2684 4, buf, &size_read)) != ERROR_OK)
2686 LOG_ERROR("error while reading instruction: %i", retval);
2687 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2689 opcode = target_buffer_get_u32(target, buf);
2690 arm_evaluate_opcode(opcode, pc, instruction);
2692 else if (xscale->trace.core_state == ARM_STATE_THUMB)
2694 uint8_t buf[2];
2695 if ((retval = image_read_section(xscale->trace.image, section,
2696 pc - xscale->trace.image->sections[section].base_address,
2697 2, buf, &size_read)) != ERROR_OK)
2699 LOG_ERROR("error while reading instruction: %i", retval);
2700 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2702 opcode = target_buffer_get_u16(target, buf);
2703 thumb_evaluate_opcode(opcode, pc, instruction);
2705 else
2707 LOG_ERROR("BUG: unknown core state encountered");
2708 exit(-1);
2711 return ERROR_OK;
2714 /* Extract address encoded into trace data.
2715 * Write result to address referenced by argument 'target', or 0 if incomplete. */
2716 static inline void xscale_branch_address(struct xscale_trace_data *trace_data,
2717 int i, uint32_t *target)
2719 /* if there are less than four entries prior to the indirect branch message
2720 * we can't extract the address */
2721 if (i < 4)
2722 *target = 0;
2723 else
2724 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2725 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2728 static inline void xscale_display_instruction(struct target *target, uint32_t pc,
2729 struct arm_instruction *instruction,
2730 struct command_context *cmd_ctx)
2732 int retval = xscale_read_instruction(target, pc, instruction);
2733 if (retval == ERROR_OK)
2734 command_print(cmd_ctx, "%s", instruction->text);
2735 else
2736 command_print(cmd_ctx, "0x%8.8" PRIx32 "\t<not found in image>", pc);
2739 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2741 struct xscale_common *xscale = target_to_xscale(target);
2742 struct xscale_trace_data *trace_data = xscale->trace.data;
2743 int i, retval;
2744 uint32_t breakpoint_pc;
2745 struct arm_instruction instruction;
2746 uint32_t current_pc = 0; /* initialized when address determined */
2748 if (!xscale->trace.image)
2749 LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
2751 /* loop for each trace buffer that was loaded from target */
2752 while (trace_data)
2754 int chkpt = 0; /* incremented as checkpointed entries found */
2755 int j;
2757 /* FIXME: set this to correct mode when trace buffer is first enabled */
2758 xscale->trace.core_state = ARM_STATE_ARM;
2760 /* loop for each entry in this trace buffer */
2761 for (i = 0; i < trace_data->depth; i++)
2763 int exception = 0;
2764 uint32_t chkpt_reg = 0x0;
2765 uint32_t branch_target = 0;
2766 int count;
2768 /* trace entry type is upper nybble of 'message byte' */
2769 int trace_msg_type = (trace_data->entries[i].data & 0xf0) >> 4;
2771 /* Target addresses of indirect branches are written into buffer
2772 * before the message byte representing the branch. Skip past it */
2773 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2774 continue;
2776 switch (trace_msg_type)
2778 case 0: /* Exceptions */
2779 case 1:
2780 case 2:
2781 case 3:
2782 case 4:
2783 case 5:
2784 case 6:
2785 case 7:
2786 exception = (trace_data->entries[i].data & 0x70) >> 4;
2788 /* FIXME: vector table may be at ffff0000 */
2789 branch_target = (trace_data->entries[i].data & 0xf0) >> 2;
2790 break;
2792 case 8: /* Direct Branch */
2793 break;
2795 case 9: /* Indirect Branch */
2796 xscale_branch_address(trace_data, i, &branch_target);
2797 break;
2799 case 13: /* Checkpointed Indirect Branch */
2800 xscale_branch_address(trace_data, i, &branch_target);
2801 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2802 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is oldest */
2803 else
2804 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and newest */
2806 chkpt++;
2807 break;
2809 case 12: /* Checkpointed Direct Branch */
2810 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2811 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is oldest */
2812 else
2813 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and newest */
2815 /* if no current_pc, checkpoint will be starting point */
2816 if (current_pc == 0)
2817 branch_target = chkpt_reg;
2819 chkpt++;
2820 break;
2822 case 15: /* Roll-over */
2823 break;
2825 default: /* Reserved */
2826 LOG_WARNING("trace is suspect: invalid trace message byte");
2827 continue;
2831 /* If we don't have the current_pc yet, but we did get the branch target
2832 * (either from the trace buffer on indirect branch, or from a checkpoint reg),
2833 * then we can start displaying instructions at the next iteration, with
2834 * branch_target as the starting point.
2836 if (current_pc == 0)
2838 current_pc = branch_target; /* remains 0 unless branch_target obtained */
2839 continue;
2842 /* We have current_pc. Read and display the instructions from the image.
2843 * First, display count instructions (lower nybble of message byte). */
2844 count = trace_data->entries[i].data & 0x0f;
2845 for (j = 0; j < count; j++)
2847 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2848 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2851 /* An additional instruction is implicitly added to count for
2852 * rollover and some exceptions: undef, swi, prefetch abort. */
2853 if ((trace_msg_type == 15) || (exception > 0 && exception < 4))
2855 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2856 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2859 if (trace_msg_type == 15) /* rollover */
2860 continue;
2862 if (exception)
2864 command_print(cmd_ctx, "--- exception %i ---", exception);
2865 continue;
2868 /* not exception or rollover; next instruction is a branch and is
2869 * not included in the count */
2870 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2872 /* for direct branches, extract branch destination from instruction */
2873 if ((trace_msg_type == 8) || (trace_msg_type == 12))
2875 retval = xscale_read_instruction(target, current_pc, &instruction);
2876 if (retval == ERROR_OK)
2877 current_pc = instruction.info.b_bl_bx_blx.target_address;
2878 else
2879 current_pc = 0; /* branch destination unknown */
2881 /* direct branch w/ checkpoint; can also get from checkpoint reg */
2882 if (trace_msg_type == 12)
2884 if (current_pc == 0)
2885 current_pc = chkpt_reg;
2886 else if (current_pc != chkpt_reg) /* sanity check */
2887 LOG_WARNING("trace is suspect: checkpoint register "
2888 "inconsistent with adddress from image");
2891 if (current_pc == 0)
2892 command_print(cmd_ctx, "address unknown");
2894 continue;
2897 /* indirect branch; the branch destination was read from trace buffer */
2898 if ((trace_msg_type == 9) || (trace_msg_type == 13))
2900 current_pc = branch_target;
2902 /* sanity check (checkpoint reg is redundant) */
2903 if ((trace_msg_type == 13) && (chkpt_reg != branch_target))
2904 LOG_WARNING("trace is suspect: checkpoint register "
2905 "inconsistent with address from trace buffer");
2908 } /* END: for (i = 0; i < trace_data->depth; i++) */
2910 breakpoint_pc = trace_data->last_instruction; /* used below */
2911 trace_data = trace_data->next;
2913 } /* END: while (trace_data) */
2915 /* Finally... display all instructions up to the value of the pc when the
2916 * debug break occurred (saved when trace data was collected from target).
2917 * This is necessary because the trace only records execution branches and 16
2918 * consecutive instructions (rollovers), so last few typically missed.
2920 if (current_pc == 0)
2921 return ERROR_OK; /* current_pc was never found */
2923 /* how many instructions remaining? */
2924 int gap_count = (breakpoint_pc - current_pc) /
2925 (xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2);
2927 /* should never be negative or over 16, but verify */
2928 if (gap_count < 0 || gap_count > 16)
2930 LOG_WARNING("trace is suspect: excessive gap at end of trace");
2931 return ERROR_OK; /* bail; large number or negative value no good */
2934 /* display remaining instructions */
2935 for (i = 0; i < gap_count; i++)
2937 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2938 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2941 return ERROR_OK;
2944 static const struct reg_arch_type xscale_reg_type = {
2945 .get = xscale_get_reg,
2946 .set = xscale_set_reg,
2949 static void xscale_build_reg_cache(struct target *target)
2951 struct xscale_common *xscale = target_to_xscale(target);
2952 struct arm *armv4_5 = &xscale->armv4_5_common;
2953 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2954 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2955 int i;
2956 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2958 (*cache_p) = arm_build_reg_cache(target, armv4_5);
2960 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2961 cache_p = &(*cache_p)->next;
2963 /* fill in values for the xscale reg cache */
2964 (*cache_p)->name = "XScale registers";
2965 (*cache_p)->next = NULL;
2966 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2967 (*cache_p)->num_regs = num_regs;
2969 for (i = 0; i < num_regs; i++)
2971 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2972 (*cache_p)->reg_list[i].value = calloc(4, 1);
2973 (*cache_p)->reg_list[i].dirty = 0;
2974 (*cache_p)->reg_list[i].valid = 0;
2975 (*cache_p)->reg_list[i].size = 32;
2976 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2977 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2978 arch_info[i] = xscale_reg_arch_info[i];
2979 arch_info[i].target = target;
2982 xscale->reg_cache = (*cache_p);
2985 static int xscale_init_target(struct command_context *cmd_ctx,
2986 struct target *target)
2988 xscale_build_reg_cache(target);
2989 return ERROR_OK;
2992 static int xscale_init_arch_info(struct target *target,
2993 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2995 struct arm *armv4_5;
2996 uint32_t high_reset_branch, low_reset_branch;
2997 int i;
2999 armv4_5 = &xscale->armv4_5_common;
3001 /* store architecture specfic data */
3002 xscale->common_magic = XSCALE_COMMON_MAGIC;
3004 /* we don't really *need* a variant param ... */
3005 if (variant) {
3006 int ir_length = 0;
3008 if (strcmp(variant, "pxa250") == 0
3009 || strcmp(variant, "pxa255") == 0
3010 || strcmp(variant, "pxa26x") == 0)
3011 ir_length = 5;
3012 else if (strcmp(variant, "pxa27x") == 0
3013 || strcmp(variant, "ixp42x") == 0
3014 || strcmp(variant, "ixp45x") == 0
3015 || strcmp(variant, "ixp46x") == 0)
3016 ir_length = 7;
3017 else if (strcmp(variant, "pxa3xx") == 0)
3018 ir_length = 11;
3019 else
3020 LOG_WARNING("%s: unrecognized variant %s",
3021 tap->dotted_name, variant);
3023 if (ir_length && ir_length != tap->ir_length) {
3024 LOG_WARNING("%s: IR length for %s is %d; fixing",
3025 tap->dotted_name, variant, ir_length);
3026 tap->ir_length = ir_length;
3030 /* PXA3xx shifts the JTAG instructions */
3031 if (tap->ir_length == 11)
3032 xscale->xscale_variant = XSCALE_PXA3XX;
3033 else
3034 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
3036 /* the debug handler isn't installed (and thus not running) at this time */
3037 xscale->handler_address = 0xfe000800;
3039 /* clear the vectors we keep locally for reference */
3040 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
3041 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
3043 /* no user-specified vectors have been configured yet */
3044 xscale->static_low_vectors_set = 0x0;
3045 xscale->static_high_vectors_set = 0x0;
3047 /* calculate branches to debug handler */
3048 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
3049 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
3051 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
3052 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
3054 for (i = 1; i <= 7; i++)
3056 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3057 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3060 /* 64kB aligned region used for DCache cleaning */
3061 xscale->cache_clean_address = 0xfffe0000;
3063 xscale->hold_rst = 0;
3064 xscale->external_debug_break = 0;
3066 xscale->ibcr_available = 2;
3067 xscale->ibcr0_used = 0;
3068 xscale->ibcr1_used = 0;
3070 xscale->dbr_available = 2;
3071 xscale->dbr0_used = 0;
3072 xscale->dbr1_used = 0;
3074 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
3075 target_name(target));
3077 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3078 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3080 xscale->vector_catch = 0x1;
3082 xscale->trace.capture_status = TRACE_IDLE;
3083 xscale->trace.data = NULL;
3084 xscale->trace.image = NULL;
3085 xscale->trace.buffer_enabled = 0;
3086 xscale->trace.buffer_fill = 0;
3088 /* prepare ARMv4/5 specific information */
3089 armv4_5->arch_info = xscale;
3090 armv4_5->read_core_reg = xscale_read_core_reg;
3091 armv4_5->write_core_reg = xscale_write_core_reg;
3092 armv4_5->full_context = xscale_full_context;
3094 arm_init_arch_info(target, armv4_5);
3096 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3097 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3098 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3099 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3100 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3101 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3102 xscale->armv4_5_mmu.has_tiny_pages = 1;
3103 xscale->armv4_5_mmu.mmu_enabled = 0;
3105 return ERROR_OK;
3108 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3110 struct xscale_common *xscale;
3112 if (sizeof xscale_debug_handler - 1 > 0x800) {
3113 LOG_ERROR("debug_handler.bin: larger than 2kb");
3114 return ERROR_FAIL;
3117 xscale = calloc(1, sizeof(*xscale));
3118 if (!xscale)
3119 return ERROR_FAIL;
3121 return xscale_init_arch_info(target, xscale, target->tap,
3122 target->variant);
3125 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3127 struct target *target = NULL;
3128 struct xscale_common *xscale;
3129 int retval;
3130 uint32_t handler_address;
3132 if (CMD_ARGC < 2)
3134 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3135 return ERROR_OK;
3138 if ((target = get_target(CMD_ARGV[0])) == NULL)
3140 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3141 return ERROR_FAIL;
3144 xscale = target_to_xscale(target);
3145 retval = xscale_verify_pointer(CMD_CTX, xscale);
3146 if (retval != ERROR_OK)
3147 return retval;
3149 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3151 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3152 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3154 xscale->handler_address = handler_address;
3156 else
3158 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3159 return ERROR_FAIL;
3162 return ERROR_OK;
3165 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3167 struct target *target = NULL;
3168 struct xscale_common *xscale;
3169 int retval;
3170 uint32_t cache_clean_address;
3172 if (CMD_ARGC < 2)
3174 return ERROR_COMMAND_SYNTAX_ERROR;
3177 target = get_target(CMD_ARGV[0]);
3178 if (target == NULL)
3180 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3181 return ERROR_FAIL;
3183 xscale = target_to_xscale(target);
3184 retval = xscale_verify_pointer(CMD_CTX, xscale);
3185 if (retval != ERROR_OK)
3186 return retval;
3188 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3190 if (cache_clean_address & 0xffff)
3192 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3194 else
3196 xscale->cache_clean_address = cache_clean_address;
3199 return ERROR_OK;
3202 COMMAND_HANDLER(xscale_handle_cache_info_command)
3204 struct target *target = get_current_target(CMD_CTX);
3205 struct xscale_common *xscale = target_to_xscale(target);
3206 int retval;
3208 retval = xscale_verify_pointer(CMD_CTX, xscale);
3209 if (retval != ERROR_OK)
3210 return retval;
3212 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3215 static int xscale_virt2phys(struct target *target,
3216 uint32_t virtual, uint32_t *physical)
3218 struct xscale_common *xscale = target_to_xscale(target);
3219 int type;
3220 uint32_t cb;
3221 int domain;
3222 uint32_t ap;
3224 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3225 LOG_ERROR(xscale_not);
3226 return ERROR_TARGET_INVALID;
3229 uint32_t ret;
3230 int retval = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap, &ret);
3231 if (retval != ERROR_OK)
3232 return retval;
3233 if (type == -1)
3235 return ret;
3237 *physical = ret;
3238 return ERROR_OK;
3241 static int xscale_mmu(struct target *target, int *enabled)
3243 struct xscale_common *xscale = target_to_xscale(target);
3245 if (target->state != TARGET_HALTED)
3247 LOG_ERROR("Target not halted");
3248 return ERROR_TARGET_INVALID;
3250 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3251 return ERROR_OK;
3254 COMMAND_HANDLER(xscale_handle_mmu_command)
3256 struct target *target = get_current_target(CMD_CTX);
3257 struct xscale_common *xscale = target_to_xscale(target);
3258 int retval;
3260 retval = xscale_verify_pointer(CMD_CTX, xscale);
3261 if (retval != ERROR_OK)
3262 return retval;
3264 if (target->state != TARGET_HALTED)
3266 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3267 return ERROR_OK;
3270 if (CMD_ARGC >= 1)
3272 bool enable;
3273 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3274 if (enable)
3275 xscale_enable_mmu_caches(target, 1, 0, 0);
3276 else
3277 xscale_disable_mmu_caches(target, 1, 0, 0);
3278 xscale->armv4_5_mmu.mmu_enabled = enable;
3281 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3283 return ERROR_OK;
3286 COMMAND_HANDLER(xscale_handle_idcache_command)
3288 struct target *target = get_current_target(CMD_CTX);
3289 struct xscale_common *xscale = target_to_xscale(target);
3291 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3292 if (retval != ERROR_OK)
3293 return retval;
3295 if (target->state != TARGET_HALTED)
3297 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3298 return ERROR_OK;
3301 bool icache = false;
3302 if (strcmp(CMD_NAME, "icache") == 0)
3303 icache = true;
3304 if (CMD_ARGC >= 1)
3306 bool enable;
3307 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3308 if (icache) {
3309 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3310 if (enable)
3311 xscale_enable_mmu_caches(target, 0, 0, 1);
3312 else
3313 xscale_disable_mmu_caches(target, 0, 0, 1);
3314 } else {
3315 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3316 if (enable)
3317 xscale_enable_mmu_caches(target, 0, 1, 0);
3318 else
3319 xscale_disable_mmu_caches(target, 0, 1, 0);
3323 bool enabled = icache ?
3324 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3325 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3326 const char *msg = enabled ? "enabled" : "disabled";
3327 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3329 return ERROR_OK;
3332 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3334 struct target *target = get_current_target(CMD_CTX);
3335 struct xscale_common *xscale = target_to_xscale(target);
3336 int retval;
3338 retval = xscale_verify_pointer(CMD_CTX, xscale);
3339 if (retval != ERROR_OK)
3340 return retval;
3342 if (CMD_ARGC < 1)
3344 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3346 else
3348 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3349 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3350 xscale_write_dcsr(target, -1, -1);
3353 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3355 return ERROR_OK;
3359 COMMAND_HANDLER(xscale_handle_vector_table_command)
3361 struct target *target = get_current_target(CMD_CTX);
3362 struct xscale_common *xscale = target_to_xscale(target);
3363 int err = 0;
3364 int retval;
3366 retval = xscale_verify_pointer(CMD_CTX, xscale);
3367 if (retval != ERROR_OK)
3368 return retval;
3370 if (CMD_ARGC == 0) /* print current settings */
3372 int idx;
3374 command_print(CMD_CTX, "active user-set static vectors:");
3375 for (idx = 1; idx < 8; idx++)
3376 if (xscale->static_low_vectors_set & (1 << idx))
3377 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3378 for (idx = 1; idx < 8; idx++)
3379 if (xscale->static_high_vectors_set & (1 << idx))
3380 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3381 return ERROR_OK;
3384 if (CMD_ARGC != 3)
3385 err = 1;
3386 else
3388 int idx;
3389 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3390 uint32_t vec;
3391 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3393 if (idx < 1 || idx >= 8)
3394 err = 1;
3396 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3398 xscale->static_low_vectors_set |= (1<<idx);
3399 xscale->static_low_vectors[idx] = vec;
3401 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3403 xscale->static_high_vectors_set |= (1<<idx);
3404 xscale->static_high_vectors[idx] = vec;
3406 else
3407 err = 1;
3410 if (err)
3411 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3413 return ERROR_OK;
3417 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3419 struct target *target = get_current_target(CMD_CTX);
3420 struct xscale_common *xscale = target_to_xscale(target);
3421 uint32_t dcsr_value;
3422 int retval;
3424 retval = xscale_verify_pointer(CMD_CTX, xscale);
3425 if (retval != ERROR_OK)
3426 return retval;
3428 if (target->state != TARGET_HALTED)
3430 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3431 return ERROR_OK;
3434 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3436 struct xscale_trace_data *td, *next_td;
3437 xscale->trace.buffer_enabled = 1;
3439 /* free old trace data */
3440 td = xscale->trace.data;
3441 while (td)
3443 next_td = td->next;
3445 if (td->entries)
3446 free(td->entries);
3447 free(td);
3448 td = next_td;
3450 xscale->trace.data = NULL;
3452 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3454 xscale->trace.buffer_enabled = 0;
3457 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3459 uint32_t fill = 1;
3460 if (CMD_ARGC >= 3)
3461 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3462 xscale->trace.buffer_fill = fill;
3464 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3466 xscale->trace.buffer_fill = -1;
3469 command_print(CMD_CTX, "trace buffer %s (%s)",
3470 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3471 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3473 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3474 if (xscale->trace.buffer_fill >= 0)
3475 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3476 else
3477 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3479 return ERROR_OK;
3482 COMMAND_HANDLER(xscale_handle_trace_image_command)
3484 struct target *target = get_current_target(CMD_CTX);
3485 struct xscale_common *xscale = target_to_xscale(target);
3486 int retval;
3488 if (CMD_ARGC < 1)
3490 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3491 return ERROR_OK;
3494 retval = xscale_verify_pointer(CMD_CTX, xscale);
3495 if (retval != ERROR_OK)
3496 return retval;
3498 if (xscale->trace.image)
3500 image_close(xscale->trace.image);
3501 free(xscale->trace.image);
3502 command_print(CMD_CTX, "previously loaded image found and closed");
3505 xscale->trace.image = malloc(sizeof(struct image));
3506 xscale->trace.image->base_address_set = 0;
3507 xscale->trace.image->start_address_set = 0;
3509 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3510 if (CMD_ARGC >= 2)
3512 xscale->trace.image->base_address_set = 1;
3513 COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
3515 else
3517 xscale->trace.image->base_address_set = 0;
3520 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3522 free(xscale->trace.image);
3523 xscale->trace.image = NULL;
3524 return ERROR_OK;
3527 return ERROR_OK;
3530 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3532 struct target *target = get_current_target(CMD_CTX);
3533 struct xscale_common *xscale = target_to_xscale(target);
3534 struct xscale_trace_data *trace_data;
3535 struct fileio file;
3536 int retval;
3538 retval = xscale_verify_pointer(CMD_CTX, xscale);
3539 if (retval != ERROR_OK)
3540 return retval;
3542 if (target->state != TARGET_HALTED)
3544 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3545 return ERROR_OK;
3548 if (CMD_ARGC < 1)
3550 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3551 return ERROR_OK;
3554 trace_data = xscale->trace.data;
3556 if (!trace_data)
3558 command_print(CMD_CTX, "no trace data collected");
3559 return ERROR_OK;
3562 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3564 return ERROR_OK;
3567 while (trace_data)
3569 int i;
3571 fileio_write_u32(&file, trace_data->chkpt0);
3572 fileio_write_u32(&file, trace_data->chkpt1);
3573 fileio_write_u32(&file, trace_data->last_instruction);
3574 fileio_write_u32(&file, trace_data->depth);
3576 for (i = 0; i < trace_data->depth; i++)
3577 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3579 trace_data = trace_data->next;
3582 fileio_close(&file);
3584 return ERROR_OK;
3587 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3589 struct target *target = get_current_target(CMD_CTX);
3590 struct xscale_common *xscale = target_to_xscale(target);
3591 int retval;
3593 retval = xscale_verify_pointer(CMD_CTX, xscale);
3594 if (retval != ERROR_OK)
3595 return retval;
3597 xscale_analyze_trace(target, CMD_CTX);
3599 return ERROR_OK;
3602 COMMAND_HANDLER(xscale_handle_cp15)
3604 struct target *target = get_current_target(CMD_CTX);
3605 struct xscale_common *xscale = target_to_xscale(target);
3606 int retval;
3608 retval = xscale_verify_pointer(CMD_CTX, xscale);
3609 if (retval != ERROR_OK)
3610 return retval;
3612 if (target->state != TARGET_HALTED)
3614 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3615 return ERROR_OK;
3617 uint32_t reg_no = 0;
3618 struct reg *reg = NULL;
3619 if (CMD_ARGC > 0)
3621 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3622 /*translate from xscale cp15 register no to openocd register*/
3623 switch (reg_no)
3625 case 0:
3626 reg_no = XSCALE_MAINID;
3627 break;
3628 case 1:
3629 reg_no = XSCALE_CTRL;
3630 break;
3631 case 2:
3632 reg_no = XSCALE_TTB;
3633 break;
3634 case 3:
3635 reg_no = XSCALE_DAC;
3636 break;
3637 case 5:
3638 reg_no = XSCALE_FSR;
3639 break;
3640 case 6:
3641 reg_no = XSCALE_FAR;
3642 break;
3643 case 13:
3644 reg_no = XSCALE_PID;
3645 break;
3646 case 15:
3647 reg_no = XSCALE_CPACCESS;
3648 break;
3649 default:
3650 command_print(CMD_CTX, "invalid register number");
3651 return ERROR_INVALID_ARGUMENTS;
3653 reg = &xscale->reg_cache->reg_list[reg_no];
3656 if (CMD_ARGC == 1)
3658 uint32_t value;
3660 /* read cp15 control register */
3661 xscale_get_reg(reg);
3662 value = buf_get_u32(reg->value, 0, 32);
3663 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3665 else if (CMD_ARGC == 2)
3667 uint32_t value;
3668 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3670 /* send CP write request (command 0x41) */
3671 xscale_send_u32(target, 0x41);
3673 /* send CP register number */
3674 xscale_send_u32(target, reg_no);
3676 /* send CP register value */
3677 xscale_send_u32(target, value);
3679 /* execute cpwait to ensure outstanding operations complete */
3680 xscale_send_u32(target, 0x53);
3682 else
3684 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3687 return ERROR_OK;
3690 static const struct command_registration xscale_exec_command_handlers[] = {
3692 .name = "cache_info",
3693 .handler = xscale_handle_cache_info_command,
3694 .mode = COMMAND_EXEC,
3695 .help = "display information about CPU caches",
3698 .name = "mmu",
3699 .handler = xscale_handle_mmu_command,
3700 .mode = COMMAND_EXEC,
3701 .help = "enable or disable the MMU",
3702 .usage = "['enable'|'disable']",
3705 .name = "icache",
3706 .handler = xscale_handle_idcache_command,
3707 .mode = COMMAND_EXEC,
3708 .help = "display ICache state, optionally enabling or "
3709 "disabling it",
3710 .usage = "['enable'|'disable']",
3713 .name = "dcache",
3714 .handler = xscale_handle_idcache_command,
3715 .mode = COMMAND_EXEC,
3716 .help = "display DCache state, optionally enabling or "
3717 "disabling it",
3718 .usage = "['enable'|'disable']",
3721 .name = "vector_catch",
3722 .handler = xscale_handle_vector_catch_command,
3723 .mode = COMMAND_EXEC,
3724 .help = "set or display 8-bit mask of vectors "
3725 "that should trigger debug entry",
3726 .usage = "[mask]",
3729 .name = "vector_table",
3730 .handler = xscale_handle_vector_table_command,
3731 .mode = COMMAND_EXEC,
3732 .help = "set vector table entry in mini-ICache, "
3733 "or display current tables",
3734 .usage = "[('high'|'low') index code]",
3737 .name = "trace_buffer",
3738 .handler = xscale_handle_trace_buffer_command,
3739 .mode = COMMAND_EXEC,
3740 .help = "display trace buffer status, enable or disable "
3741 "tracing, and optionally reconfigure trace mode",
3742 .usage = "['enable'|'disable' ['fill' number|'wrap']]",
3745 .name = "dump_trace",
3746 .handler = xscale_handle_dump_trace_command,
3747 .mode = COMMAND_EXEC,
3748 .help = "dump content of trace buffer to file",
3749 .usage = "filename",
3752 .name = "analyze_trace",
3753 .handler = xscale_handle_analyze_trace_buffer_command,
3754 .mode = COMMAND_EXEC,
3755 .help = "analyze content of trace buffer",
3756 .usage = "",
3759 .name = "trace_image",
3760 .handler = xscale_handle_trace_image_command,
3761 .mode = COMMAND_EXEC,
3762 .help = "load image from file to address (default 0)",
3763 .usage = "filename [offset [filetype]]",
3766 .name = "cp15",
3767 .handler = xscale_handle_cp15,
3768 .mode = COMMAND_EXEC,
3769 .help = "Read or write coprocessor 15 register.",
3770 .usage = "register [value]",
3772 COMMAND_REGISTRATION_DONE
3774 static const struct command_registration xscale_any_command_handlers[] = {
3776 .name = "debug_handler",
3777 .handler = xscale_handle_debug_handler_command,
3778 .mode = COMMAND_ANY,
3779 .help = "Change address used for debug handler.",
3780 .usage = "target address",
3783 .name = "cache_clean_address",
3784 .handler = xscale_handle_cache_clean_address_command,
3785 .mode = COMMAND_ANY,
3786 .help = "Change address used for cleaning data cache.",
3787 .usage = "address",
3790 .chain = xscale_exec_command_handlers,
3792 COMMAND_REGISTRATION_DONE
3794 static const struct command_registration xscale_command_handlers[] = {
3796 .chain = arm_command_handlers,
3799 .name = "xscale",
3800 .mode = COMMAND_ANY,
3801 .help = "xscale command group",
3802 .chain = xscale_any_command_handlers,
3804 COMMAND_REGISTRATION_DONE
3807 struct target_type xscale_target =
3809 .name = "xscale",
3811 .poll = xscale_poll,
3812 .arch_state = xscale_arch_state,
3814 .target_request_data = NULL,
3816 .halt = xscale_halt,
3817 .resume = xscale_resume,
3818 .step = xscale_step,
3820 .assert_reset = xscale_assert_reset,
3821 .deassert_reset = xscale_deassert_reset,
3822 .soft_reset_halt = NULL,
3824 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3825 .get_gdb_reg_list = arm_get_gdb_reg_list,
3827 .read_memory = xscale_read_memory,
3828 .read_phys_memory = xscale_read_phys_memory,
3829 .write_memory = xscale_write_memory,
3830 .write_phys_memory = xscale_write_phys_memory,
3831 .bulk_write_memory = xscale_bulk_write_memory,
3833 .checksum_memory = arm_checksum_memory,
3834 .blank_check_memory = arm_blank_check_memory,
3836 .run_algorithm = armv4_5_run_algorithm,
3838 .add_breakpoint = xscale_add_breakpoint,
3839 .remove_breakpoint = xscale_remove_breakpoint,
3840 .add_watchpoint = xscale_add_watchpoint,
3841 .remove_watchpoint = xscale_remove_watchpoint,
3843 .commands = xscale_command_handlers,
3844 .target_create = xscale_target_create,
3845 .init_target = xscale_init_target,
3847 .virt2phys = xscale_virt2phys,
3848 .mmu = xscale_mmu