ARM: memory utils aren't ARM7/ARM9 dependent
[openocd/ztw.git] / src / target / xscale.c
blob0b5b26b3c795f87819a660eeeba884542bcd417e
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
30 #include "xscale.h"
31 #include "target_type.h"
32 #include "arm7_9_common.h"
33 #include "arm_simulator.h"
34 #include "arm_disassembler.h"
35 #include "time_support.h"
36 #include "image.h"
40 * Important XScale documents available as of October 2009 include:
42 * Intel XScale® Core Developer’s Manual, January 2004
43 * Order Number: 273473-002
44 * This has a chapter detailing debug facilities, and punts some
45 * details to chip-specific microarchitecture documents.
47 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
48 * Document Number: 273539-005
49 * Less detailed than the developer's manual, but summarizes those
50 * missing details (for most XScales) and gives LOTS of notes about
51 * debugger/handler interaction issues. Presents a simpler reset
52 * and load-handler sequence than the arch doc. (Note, OpenOCD
53 * doesn't currently support "Hot-Debug" as defined there.)
55 * Chip-specific microarchitecture documents may also be useful.
59 /* forward declarations */
60 static int xscale_resume(struct target *, int current,
61 uint32_t address, int handle_breakpoints, int debug_execution);
62 static int xscale_debug_entry(struct target *);
63 static int xscale_restore_context(struct target *);
64 static int xscale_get_reg(struct reg *reg);
65 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
66 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
67 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
68 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
69 static int xscale_read_trace(struct target *);
72 /* This XScale "debug handler" is loaded into the processor's
73 * mini-ICache, which is 2K of code writable only via JTAG.
75 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
76 * binary files cleanly. It's string oriented, and terminates them
77 * with a NUL character. Better would be to generate the constants
78 * and let other code decide names, scoping, and other housekeeping.
80 static /* unsigned const char xscale_debug_handler[] = ... */
81 #include "xscale_debug.h"
83 static char *const xscale_reg_list[] =
85 "XSCALE_MAINID", /* 0 */
86 "XSCALE_CACHETYPE",
87 "XSCALE_CTRL",
88 "XSCALE_AUXCTRL",
89 "XSCALE_TTB",
90 "XSCALE_DAC",
91 "XSCALE_FSR",
92 "XSCALE_FAR",
93 "XSCALE_PID",
94 "XSCALE_CPACCESS",
95 "XSCALE_IBCR0", /* 10 */
96 "XSCALE_IBCR1",
97 "XSCALE_DBR0",
98 "XSCALE_DBR1",
99 "XSCALE_DBCON",
100 "XSCALE_TBREG",
101 "XSCALE_CHKPT0",
102 "XSCALE_CHKPT1",
103 "XSCALE_DCSR",
104 "XSCALE_TX",
105 "XSCALE_RX", /* 20 */
106 "XSCALE_TXRXCTRL",
109 static const struct xscale_reg xscale_reg_arch_info[] =
111 {XSCALE_MAINID, NULL},
112 {XSCALE_CACHETYPE, NULL},
113 {XSCALE_CTRL, NULL},
114 {XSCALE_AUXCTRL, NULL},
115 {XSCALE_TTB, NULL},
116 {XSCALE_DAC, NULL},
117 {XSCALE_FSR, NULL},
118 {XSCALE_FAR, NULL},
119 {XSCALE_PID, NULL},
120 {XSCALE_CPACCESS, NULL},
121 {XSCALE_IBCR0, NULL},
122 {XSCALE_IBCR1, NULL},
123 {XSCALE_DBR0, NULL},
124 {XSCALE_DBR1, NULL},
125 {XSCALE_DBCON, NULL},
126 {XSCALE_TBREG, NULL},
127 {XSCALE_CHKPT0, NULL},
128 {XSCALE_CHKPT1, NULL},
129 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
130 {-1, NULL}, /* TX accessed via JTAG */
131 {-1, NULL}, /* RX accessed via JTAG */
132 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
135 static int xscale_reg_arch_type = -1;
137 /* convenience wrapper to access XScale specific registers */
138 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
140 uint8_t buf[4];
142 buf_set_u32(buf, 0, 32, value);
144 return xscale_set_reg(reg, buf);
147 static const char xscale_not[] = "target is not an XScale";
149 static int xscale_verify_pointer(struct command_context *cmd_ctx,
150 struct xscale_common *xscale)
152 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
153 command_print(cmd_ctx, xscale_not);
154 return ERROR_TARGET_INVALID;
156 return ERROR_OK;
159 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
161 if (tap == NULL)
162 return ERROR_FAIL;
164 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
166 struct scan_field field;
167 uint8_t scratch[4];
169 memset(&field, 0, sizeof field);
170 field.tap = tap;
171 field.num_bits = tap->ir_length;
172 field.out_value = scratch;
173 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
175 jtag_add_ir_scan(1, &field, jtag_get_end_state());
178 return ERROR_OK;
181 static int xscale_read_dcsr(struct target *target)
183 struct xscale_common *xscale = target_to_xscale(target);
184 int retval;
185 struct scan_field fields[3];
186 uint8_t field0 = 0x0;
187 uint8_t field0_check_value = 0x2;
188 uint8_t field0_check_mask = 0x7;
189 uint8_t field2 = 0x0;
190 uint8_t field2_check_value = 0x0;
191 uint8_t field2_check_mask = 0x1;
193 jtag_set_end_state(TAP_DRPAUSE);
194 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
196 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
197 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
199 memset(&fields, 0, sizeof fields);
201 fields[0].tap = target->tap;
202 fields[0].num_bits = 3;
203 fields[0].out_value = &field0;
204 uint8_t tmp;
205 fields[0].in_value = &tmp;
207 fields[1].tap = target->tap;
208 fields[1].num_bits = 32;
209 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
211 fields[2].tap = target->tap;
212 fields[2].num_bits = 1;
213 fields[2].out_value = &field2;
214 uint8_t tmp2;
215 fields[2].in_value = &tmp2;
217 jtag_add_dr_scan(3, fields, jtag_get_end_state());
219 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
220 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
222 if ((retval = jtag_execute_queue()) != ERROR_OK)
224 LOG_ERROR("JTAG error while reading DCSR");
225 return retval;
228 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
229 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
231 /* write the register with the value we just read
232 * on this second pass, only the first bit of field0 is guaranteed to be 0)
234 field0_check_mask = 0x1;
235 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
236 fields[1].in_value = NULL;
238 jtag_set_end_state(TAP_IDLE);
240 jtag_add_dr_scan(3, fields, jtag_get_end_state());
242 /* DANGER!!! this must be here. It will make sure that the arguments
243 * to jtag_set_check_value() does not go out of scope! */
244 return jtag_execute_queue();
248 static void xscale_getbuf(jtag_callback_data_t arg)
250 uint8_t *in = (uint8_t *)arg;
251 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
254 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
256 if (num_words == 0)
257 return ERROR_INVALID_ARGUMENTS;
259 int retval = ERROR_OK;
260 tap_state_t path[3];
261 struct scan_field fields[3];
262 uint8_t *field0 = malloc(num_words * 1);
263 uint8_t field0_check_value = 0x2;
264 uint8_t field0_check_mask = 0x6;
265 uint32_t *field1 = malloc(num_words * 4);
266 uint8_t field2_check_value = 0x0;
267 uint8_t field2_check_mask = 0x1;
268 int words_done = 0;
269 int words_scheduled = 0;
270 int i;
272 path[0] = TAP_DRSELECT;
273 path[1] = TAP_DRCAPTURE;
274 path[2] = TAP_DRSHIFT;
276 memset(&fields, 0, sizeof fields);
278 fields[0].tap = target->tap;
279 fields[0].num_bits = 3;
280 fields[0].check_value = &field0_check_value;
281 fields[0].check_mask = &field0_check_mask;
283 fields[1].tap = target->tap;
284 fields[1].num_bits = 32;
286 fields[2].tap = target->tap;
287 fields[2].num_bits = 1;
288 fields[2].check_value = &field2_check_value;
289 fields[2].check_mask = &field2_check_mask;
291 jtag_set_end_state(TAP_IDLE);
292 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
293 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
295 /* repeat until all words have been collected */
296 int attempts = 0;
297 while (words_done < num_words)
299 /* schedule reads */
300 words_scheduled = 0;
301 for (i = words_done; i < num_words; i++)
303 fields[0].in_value = &field0[i];
305 jtag_add_pathmove(3, path);
307 fields[1].in_value = (uint8_t *)(field1 + i);
309 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
311 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
313 words_scheduled++;
316 if ((retval = jtag_execute_queue()) != ERROR_OK)
318 LOG_ERROR("JTAG error while receiving data from debug handler");
319 break;
322 /* examine results */
323 for (i = words_done; i < num_words; i++)
325 if (!(field0[0] & 1))
327 /* move backwards if necessary */
328 int j;
329 for (j = i; j < num_words - 1; j++)
331 field0[j] = field0[j + 1];
332 field1[j] = field1[j + 1];
334 words_scheduled--;
337 if (words_scheduled == 0)
339 if (attempts++==1000)
341 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
342 retval = ERROR_TARGET_TIMEOUT;
343 break;
347 words_done += words_scheduled;
350 for (i = 0; i < num_words; i++)
351 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
353 free(field1);
355 return retval;
358 static int xscale_read_tx(struct target *target, int consume)
360 struct xscale_common *xscale = target_to_xscale(target);
361 tap_state_t path[3];
362 tap_state_t noconsume_path[6];
363 int retval;
364 struct timeval timeout, now;
365 struct scan_field fields[3];
366 uint8_t field0_in = 0x0;
367 uint8_t field0_check_value = 0x2;
368 uint8_t field0_check_mask = 0x6;
369 uint8_t field2_check_value = 0x0;
370 uint8_t field2_check_mask = 0x1;
372 jtag_set_end_state(TAP_IDLE);
374 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
376 path[0] = TAP_DRSELECT;
377 path[1] = TAP_DRCAPTURE;
378 path[2] = TAP_DRSHIFT;
380 noconsume_path[0] = TAP_DRSELECT;
381 noconsume_path[1] = TAP_DRCAPTURE;
382 noconsume_path[2] = TAP_DREXIT1;
383 noconsume_path[3] = TAP_DRPAUSE;
384 noconsume_path[4] = TAP_DREXIT2;
385 noconsume_path[5] = TAP_DRSHIFT;
387 memset(&fields, 0, sizeof fields);
389 fields[0].tap = target->tap;
390 fields[0].num_bits = 3;
391 fields[0].in_value = &field0_in;
393 fields[1].tap = target->tap;
394 fields[1].num_bits = 32;
395 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
397 fields[2].tap = target->tap;
398 fields[2].num_bits = 1;
399 uint8_t tmp;
400 fields[2].in_value = &tmp;
402 gettimeofday(&timeout, NULL);
403 timeval_add_time(&timeout, 1, 0);
405 for (;;)
407 /* if we want to consume the register content (i.e. clear TX_READY),
408 * we have to go straight from Capture-DR to Shift-DR
409 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
411 if (consume)
412 jtag_add_pathmove(3, path);
413 else
415 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
418 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
420 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
421 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
423 if ((retval = jtag_execute_queue()) != ERROR_OK)
425 LOG_ERROR("JTAG error while reading TX");
426 return ERROR_TARGET_TIMEOUT;
429 gettimeofday(&now, NULL);
430 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
432 LOG_ERROR("time out reading TX register");
433 return ERROR_TARGET_TIMEOUT;
435 if (!((!(field0_in & 1)) && consume))
437 goto done;
439 if (debug_level >= 3)
441 LOG_DEBUG("waiting 100ms");
442 alive_sleep(100); /* avoid flooding the logs */
443 } else
445 keep_alive();
448 done:
450 if (!(field0_in & 1))
451 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
453 return ERROR_OK;
456 static int xscale_write_rx(struct target *target)
458 struct xscale_common *xscale = target_to_xscale(target);
459 int retval;
460 struct timeval timeout, now;
461 struct scan_field fields[3];
462 uint8_t field0_out = 0x0;
463 uint8_t field0_in = 0x0;
464 uint8_t field0_check_value = 0x2;
465 uint8_t field0_check_mask = 0x6;
466 uint8_t field2 = 0x0;
467 uint8_t field2_check_value = 0x0;
468 uint8_t field2_check_mask = 0x1;
470 jtag_set_end_state(TAP_IDLE);
472 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
474 memset(&fields, 0, sizeof fields);
476 fields[0].tap = target->tap;
477 fields[0].num_bits = 3;
478 fields[0].out_value = &field0_out;
479 fields[0].in_value = &field0_in;
481 fields[1].tap = target->tap;
482 fields[1].num_bits = 32;
483 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
485 fields[2].tap = target->tap;
486 fields[2].num_bits = 1;
487 fields[2].out_value = &field2;
488 uint8_t tmp;
489 fields[2].in_value = &tmp;
491 gettimeofday(&timeout, NULL);
492 timeval_add_time(&timeout, 1, 0);
494 /* poll until rx_read is low */
495 LOG_DEBUG("polling RX");
496 for (;;)
498 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
500 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
501 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
503 if ((retval = jtag_execute_queue()) != ERROR_OK)
505 LOG_ERROR("JTAG error while writing RX");
506 return retval;
509 gettimeofday(&now, NULL);
510 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
512 LOG_ERROR("time out writing RX register");
513 return ERROR_TARGET_TIMEOUT;
515 if (!(field0_in & 1))
516 goto done;
517 if (debug_level >= 3)
519 LOG_DEBUG("waiting 100ms");
520 alive_sleep(100); /* avoid flooding the logs */
521 } else
523 keep_alive();
526 done:
528 /* set rx_valid */
529 field2 = 0x1;
530 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
532 if ((retval = jtag_execute_queue()) != ERROR_OK)
534 LOG_ERROR("JTAG error while writing RX");
535 return retval;
538 return ERROR_OK;
541 /* send count elements of size byte to the debug handler */
542 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
544 uint32_t t[3];
545 int bits[3];
546 int retval;
547 int done_count = 0;
549 jtag_set_end_state(TAP_IDLE);
551 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
553 bits[0]=3;
554 t[0]=0;
555 bits[1]=32;
556 t[2]=1;
557 bits[2]=1;
558 int endianness = target->endianness;
559 while (done_count++ < count)
561 switch (size)
563 case 4:
564 if (endianness == TARGET_LITTLE_ENDIAN)
566 t[1]=le_to_h_u32(buffer);
567 } else
569 t[1]=be_to_h_u32(buffer);
571 break;
572 case 2:
573 if (endianness == TARGET_LITTLE_ENDIAN)
575 t[1]=le_to_h_u16(buffer);
576 } else
578 t[1]=be_to_h_u16(buffer);
580 break;
581 case 1:
582 t[1]=buffer[0];
583 break;
584 default:
585 LOG_ERROR("BUG: size neither 4, 2 nor 1");
586 exit(-1);
588 jtag_add_dr_out(target->tap,
590 bits,
592 jtag_set_end_state(TAP_IDLE));
593 buffer += size;
596 if ((retval = jtag_execute_queue()) != ERROR_OK)
598 LOG_ERROR("JTAG error while sending data to debug handler");
599 return retval;
602 return ERROR_OK;
605 static int xscale_send_u32(struct target *target, uint32_t value)
607 struct xscale_common *xscale = target_to_xscale(target);
609 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
610 return xscale_write_rx(target);
613 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
615 struct xscale_common *xscale = target_to_xscale(target);
616 int retval;
617 struct scan_field fields[3];
618 uint8_t field0 = 0x0;
619 uint8_t field0_check_value = 0x2;
620 uint8_t field0_check_mask = 0x7;
621 uint8_t field2 = 0x0;
622 uint8_t field2_check_value = 0x0;
623 uint8_t field2_check_mask = 0x1;
625 if (hold_rst != -1)
626 xscale->hold_rst = hold_rst;
628 if (ext_dbg_brk != -1)
629 xscale->external_debug_break = ext_dbg_brk;
631 jtag_set_end_state(TAP_IDLE);
632 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
634 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
635 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
637 memset(&fields, 0, sizeof fields);
639 fields[0].tap = target->tap;
640 fields[0].num_bits = 3;
641 fields[0].out_value = &field0;
642 uint8_t tmp;
643 fields[0].in_value = &tmp;
645 fields[1].tap = target->tap;
646 fields[1].num_bits = 32;
647 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
649 fields[2].tap = target->tap;
650 fields[2].num_bits = 1;
651 fields[2].out_value = &field2;
652 uint8_t tmp2;
653 fields[2].in_value = &tmp2;
655 jtag_add_dr_scan(3, fields, jtag_get_end_state());
657 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
658 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
660 if ((retval = jtag_execute_queue()) != ERROR_OK)
662 LOG_ERROR("JTAG error while writing DCSR");
663 return retval;
666 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
667 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
669 return ERROR_OK;
672 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
673 static unsigned int parity (unsigned int v)
675 // unsigned int ov = v;
676 v ^= v >> 16;
677 v ^= v >> 8;
678 v ^= v >> 4;
679 v &= 0xf;
680 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
681 return (0x6996 >> v) & 1;
684 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
686 uint8_t packet[4];
687 uint8_t cmd;
688 int word;
689 struct scan_field fields[2];
691 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
693 /* LDIC into IR */
694 jtag_set_end_state(TAP_IDLE);
695 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
697 /* CMD is b011 to load a cacheline into the Mini ICache.
698 * Loading into the main ICache is deprecated, and unused.
699 * It's followed by three zero bits, and 27 address bits.
701 buf_set_u32(&cmd, 0, 6, 0x3);
703 /* virtual address of desired cache line */
704 buf_set_u32(packet, 0, 27, va >> 5);
706 memset(&fields, 0, sizeof fields);
708 fields[0].tap = target->tap;
709 fields[0].num_bits = 6;
710 fields[0].out_value = &cmd;
712 fields[1].tap = target->tap;
713 fields[1].num_bits = 27;
714 fields[1].out_value = packet;
716 jtag_add_dr_scan(2, fields, jtag_get_end_state());
718 /* rest of packet is a cacheline: 8 instructions, with parity */
719 fields[0].num_bits = 32;
720 fields[0].out_value = packet;
722 fields[1].num_bits = 1;
723 fields[1].out_value = &cmd;
725 for (word = 0; word < 8; word++)
727 buf_set_u32(packet, 0, 32, buffer[word]);
729 uint32_t value;
730 memcpy(&value, packet, sizeof(uint32_t));
731 cmd = parity(value);
733 jtag_add_dr_scan(2, fields, jtag_get_end_state());
736 return jtag_execute_queue();
739 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
741 uint8_t packet[4];
742 uint8_t cmd;
743 struct scan_field fields[2];
745 jtag_set_end_state(TAP_IDLE);
746 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
748 /* CMD for invalidate IC line b000, bits [6:4] b000 */
749 buf_set_u32(&cmd, 0, 6, 0x0);
751 /* virtual address of desired cache line */
752 buf_set_u32(packet, 0, 27, va >> 5);
754 memset(&fields, 0, sizeof fields);
756 fields[0].tap = target->tap;
757 fields[0].num_bits = 6;
758 fields[0].out_value = &cmd;
760 fields[1].tap = target->tap;
761 fields[1].num_bits = 27;
762 fields[1].out_value = packet;
764 jtag_add_dr_scan(2, fields, jtag_get_end_state());
766 return ERROR_OK;
769 static int xscale_update_vectors(struct target *target)
771 struct xscale_common *xscale = target_to_xscale(target);
772 int i;
773 int retval;
775 uint32_t low_reset_branch, high_reset_branch;
777 for (i = 1; i < 8; i++)
779 /* if there's a static vector specified for this exception, override */
780 if (xscale->static_high_vectors_set & (1 << i))
782 xscale->high_vectors[i] = xscale->static_high_vectors[i];
784 else
786 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
787 if (retval == ERROR_TARGET_TIMEOUT)
788 return retval;
789 if (retval != ERROR_OK)
791 /* Some of these reads will fail as part of normal execution */
792 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
797 for (i = 1; i < 8; i++)
799 if (xscale->static_low_vectors_set & (1 << i))
801 xscale->low_vectors[i] = xscale->static_low_vectors[i];
803 else
805 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
806 if (retval == ERROR_TARGET_TIMEOUT)
807 return retval;
808 if (retval != ERROR_OK)
810 /* Some of these reads will fail as part of normal execution */
811 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
816 /* calculate branches to debug handler */
817 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
818 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
820 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
821 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
823 /* invalidate and load exception vectors in mini i-cache */
824 xscale_invalidate_ic_line(target, 0x0);
825 xscale_invalidate_ic_line(target, 0xffff0000);
827 xscale_load_ic(target, 0x0, xscale->low_vectors);
828 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
830 return ERROR_OK;
833 static int xscale_arch_state(struct target *target)
835 struct xscale_common *xscale = target_to_xscale(target);
836 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
838 static const char *state[] =
840 "disabled", "enabled"
843 static const char *arch_dbg_reason[] =
845 "", "\n(processor reset)", "\n(trace buffer full)"
848 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
850 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
851 exit(-1);
854 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
855 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
856 "MMU: %s, D-Cache: %s, I-Cache: %s"
857 "%s",
858 armv4_5_state_strings[armv4_5->core_state],
859 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name ,
860 armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
861 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
862 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
863 state[xscale->armv4_5_mmu.mmu_enabled],
864 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
865 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
866 arch_dbg_reason[xscale->arch_debug_reason]);
868 return ERROR_OK;
871 static int xscale_poll(struct target *target)
873 int retval = ERROR_OK;
875 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
877 enum target_state previous_state = target->state;
878 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
881 /* there's data to read from the tx register, we entered debug state */
882 target->state = TARGET_HALTED;
884 /* process debug entry, fetching current mode regs */
885 retval = xscale_debug_entry(target);
887 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
889 LOG_USER("error while polling TX register, reset CPU");
890 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
891 target->state = TARGET_HALTED;
894 /* debug_entry could have overwritten target state (i.e. immediate resume)
895 * don't signal event handlers in that case
897 if (target->state != TARGET_HALTED)
898 return ERROR_OK;
900 /* if target was running, signal that we halted
901 * otherwise we reentered from debug execution */
902 if (previous_state == TARGET_RUNNING)
903 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
904 else
905 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
908 return retval;
911 static int xscale_debug_entry(struct target *target)
913 struct xscale_common *xscale = target_to_xscale(target);
914 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
915 uint32_t pc;
916 uint32_t buffer[10];
917 int i;
918 int retval;
919 uint32_t moe;
921 /* clear external dbg break (will be written on next DCSR read) */
922 xscale->external_debug_break = 0;
923 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
924 return retval;
926 /* get r0, pc, r1 to r7 and cpsr */
927 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
928 return retval;
930 /* move r0 from buffer to register cache */
931 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
932 armv4_5->core_cache->reg_list[0].dirty = 1;
933 armv4_5->core_cache->reg_list[0].valid = 1;
934 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
936 /* move pc from buffer to register cache */
937 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
938 armv4_5->core_cache->reg_list[15].dirty = 1;
939 armv4_5->core_cache->reg_list[15].valid = 1;
940 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
942 /* move data from buffer to register cache */
943 for (i = 1; i <= 7; i++)
945 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
946 armv4_5->core_cache->reg_list[i].dirty = 1;
947 armv4_5->core_cache->reg_list[i].valid = 1;
948 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
951 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
952 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
953 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
954 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
956 armv4_5->core_mode = buffer[9] & 0x1f;
957 if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
959 target->state = TARGET_UNKNOWN;
960 LOG_ERROR("cpsr contains invalid mode value - communication failure");
961 return ERROR_TARGET_FAILURE;
963 LOG_DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
965 if (buffer[9] & 0x20)
966 armv4_5->core_state = ARMV4_5_STATE_THUMB;
967 else
968 armv4_5->core_state = ARMV4_5_STATE_ARM;
971 if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
972 return ERROR_FAIL;
974 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
975 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
977 xscale_receive(target, buffer, 8);
978 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
979 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
980 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
982 else
984 /* r8 to r14, but no spsr */
985 xscale_receive(target, buffer, 7);
988 /* move data from buffer to register cache */
989 for (i = 8; i <= 14; i++)
991 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
992 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
993 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
996 /* examine debug reason */
997 xscale_read_dcsr(target);
998 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
1000 /* stored PC (for calculating fixup) */
1001 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1003 switch (moe)
1005 case 0x0: /* Processor reset */
1006 target->debug_reason = DBG_REASON_DBGRQ;
1007 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1008 pc -= 4;
1009 break;
1010 case 0x1: /* Instruction breakpoint hit */
1011 target->debug_reason = DBG_REASON_BREAKPOINT;
1012 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1013 pc -= 4;
1014 break;
1015 case 0x2: /* Data breakpoint hit */
1016 target->debug_reason = DBG_REASON_WATCHPOINT;
1017 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1018 pc -= 4;
1019 break;
1020 case 0x3: /* BKPT instruction executed */
1021 target->debug_reason = DBG_REASON_BREAKPOINT;
1022 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1023 pc -= 4;
1024 break;
1025 case 0x4: /* Ext. debug event */
1026 target->debug_reason = DBG_REASON_DBGRQ;
1027 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1028 pc -= 4;
1029 break;
1030 case 0x5: /* Vector trap occured */
1031 target->debug_reason = DBG_REASON_BREAKPOINT;
1032 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1033 pc -= 4;
1034 break;
1035 case 0x6: /* Trace buffer full break */
1036 target->debug_reason = DBG_REASON_DBGRQ;
1037 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1038 pc -= 4;
1039 break;
1040 case 0x7: /* Reserved (may flag Hot-Debug support) */
1041 default:
1042 LOG_ERROR("Method of Entry is 'Reserved'");
1043 exit(-1);
1044 break;
1047 /* apply PC fixup */
1048 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1050 /* on the first debug entry, identify cache type */
1051 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1053 uint32_t cache_type_reg;
1055 /* read cp15 cache type register */
1056 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1057 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1059 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1062 /* examine MMU and Cache settings */
1063 /* read cp15 control register */
1064 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1065 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1066 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1067 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1068 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1070 /* tracing enabled, read collected trace data */
1071 if (xscale->trace.buffer_enabled)
1073 xscale_read_trace(target);
1074 xscale->trace.buffer_fill--;
1076 /* resume if we're still collecting trace data */
1077 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1078 && (xscale->trace.buffer_fill > 0))
1080 xscale_resume(target, 1, 0x0, 1, 0);
1082 else
1084 xscale->trace.buffer_enabled = 0;
1088 return ERROR_OK;
1091 static int xscale_halt(struct target *target)
1093 struct xscale_common *xscale = target_to_xscale(target);
1095 LOG_DEBUG("target->state: %s",
1096 target_state_name(target));
1098 if (target->state == TARGET_HALTED)
1100 LOG_DEBUG("target was already halted");
1101 return ERROR_OK;
1103 else if (target->state == TARGET_UNKNOWN)
1105 /* this must not happen for a xscale target */
1106 LOG_ERROR("target was in unknown state when halt was requested");
1107 return ERROR_TARGET_INVALID;
1109 else if (target->state == TARGET_RESET)
1111 LOG_DEBUG("target->state == TARGET_RESET");
1113 else
1115 /* assert external dbg break */
1116 xscale->external_debug_break = 1;
1117 xscale_read_dcsr(target);
1119 target->debug_reason = DBG_REASON_DBGRQ;
1122 return ERROR_OK;
1125 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1127 struct xscale_common *xscale = target_to_xscale(target);
1128 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1129 int retval;
1131 if (xscale->ibcr0_used)
1133 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1135 if (ibcr0_bp)
1137 xscale_unset_breakpoint(target, ibcr0_bp);
1139 else
1141 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1142 exit(-1);
1146 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1147 return retval;
1149 return ERROR_OK;
1152 static int xscale_disable_single_step(struct target *target)
1154 struct xscale_common *xscale = target_to_xscale(target);
1155 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1156 int retval;
1158 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1159 return retval;
1161 return ERROR_OK;
1164 static void xscale_enable_watchpoints(struct target *target)
1166 struct watchpoint *watchpoint = target->watchpoints;
1168 while (watchpoint)
1170 if (watchpoint->set == 0)
1171 xscale_set_watchpoint(target, watchpoint);
1172 watchpoint = watchpoint->next;
1176 static void xscale_enable_breakpoints(struct target *target)
1178 struct breakpoint *breakpoint = target->breakpoints;
1180 /* set any pending breakpoints */
1181 while (breakpoint)
1183 if (breakpoint->set == 0)
1184 xscale_set_breakpoint(target, breakpoint);
1185 breakpoint = breakpoint->next;
1189 static int xscale_resume(struct target *target, int current,
1190 uint32_t address, int handle_breakpoints, int debug_execution)
1192 struct xscale_common *xscale = target_to_xscale(target);
1193 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
1194 struct breakpoint *breakpoint = target->breakpoints;
1195 uint32_t current_pc;
1196 int retval;
1197 int i;
1199 LOG_DEBUG("-");
1201 if (target->state != TARGET_HALTED)
1203 LOG_WARNING("target not halted");
1204 return ERROR_TARGET_NOT_HALTED;
1207 if (!debug_execution)
1209 target_free_all_working_areas(target);
1212 /* update vector tables */
1213 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1214 return retval;
1216 /* current = 1: continue on current pc, otherwise continue at <address> */
1217 if (!current)
1218 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1220 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1222 /* if we're at the reset vector, we have to simulate the branch */
1223 if (current_pc == 0x0)
1225 arm_simulate_step(target, NULL);
1226 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1229 /* the front-end may request us not to handle breakpoints */
1230 if (handle_breakpoints)
1232 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1234 uint32_t next_pc;
1236 /* there's a breakpoint at the current PC, we have to step over it */
1237 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1238 xscale_unset_breakpoint(target, breakpoint);
1240 /* calculate PC of next instruction */
1241 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1243 uint32_t current_opcode;
1244 target_read_u32(target, current_pc, &current_opcode);
1245 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1248 LOG_DEBUG("enable single-step");
1249 xscale_enable_single_step(target, next_pc);
1251 /* restore banked registers */
1252 xscale_restore_context(target);
1254 /* send resume request (command 0x30 or 0x31)
1255 * clean the trace buffer if it is to be enabled (0x62) */
1256 if (xscale->trace.buffer_enabled)
1258 xscale_send_u32(target, 0x62);
1259 xscale_send_u32(target, 0x31);
1261 else
1262 xscale_send_u32(target, 0x30);
1264 /* send CPSR */
1265 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1266 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1268 for (i = 7; i >= 0; i--)
1270 /* send register */
1271 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1272 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1275 /* send PC */
1276 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1277 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1279 /* wait for and process debug entry */
1280 xscale_debug_entry(target);
1282 LOG_DEBUG("disable single-step");
1283 xscale_disable_single_step(target);
1285 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1286 xscale_set_breakpoint(target, breakpoint);
1290 /* enable any pending breakpoints and watchpoints */
1291 xscale_enable_breakpoints(target);
1292 xscale_enable_watchpoints(target);
1294 /* restore banked registers */
1295 xscale_restore_context(target);
1297 /* send resume request (command 0x30 or 0x31)
1298 * clean the trace buffer if it is to be enabled (0x62) */
1299 if (xscale->trace.buffer_enabled)
1301 xscale_send_u32(target, 0x62);
1302 xscale_send_u32(target, 0x31);
1304 else
1305 xscale_send_u32(target, 0x30);
1307 /* send CPSR */
1308 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1309 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1311 for (i = 7; i >= 0; i--)
1313 /* send register */
1314 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1315 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1318 /* send PC */
1319 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1320 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1322 target->debug_reason = DBG_REASON_NOTHALTED;
1324 if (!debug_execution)
1326 /* registers are now invalid */
1327 armv4_5_invalidate_core_regs(target);
1328 target->state = TARGET_RUNNING;
1329 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1331 else
1333 target->state = TARGET_DEBUG_RUNNING;
1334 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1337 LOG_DEBUG("target resumed");
1339 return ERROR_OK;
1342 static int xscale_step_inner(struct target *target, int current,
1343 uint32_t address, int handle_breakpoints)
1345 struct xscale_common *xscale = target_to_xscale(target);
1346 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
1347 uint32_t next_pc;
1348 int retval;
1349 int i;
1351 target->debug_reason = DBG_REASON_SINGLESTEP;
1353 /* calculate PC of next instruction */
1354 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1356 uint32_t current_opcode, current_pc;
1357 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1359 target_read_u32(target, current_pc, &current_opcode);
1360 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1361 return retval;
1364 LOG_DEBUG("enable single-step");
1365 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1366 return retval;
1368 /* restore banked registers */
1369 if ((retval = xscale_restore_context(target)) != ERROR_OK)
1370 return retval;
1372 /* send resume request (command 0x30 or 0x31)
1373 * clean the trace buffer if it is to be enabled (0x62) */
1374 if (xscale->trace.buffer_enabled)
1376 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1377 return retval;
1378 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1379 return retval;
1381 else
1382 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1383 return retval;
1385 /* send CPSR */
1386 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32))) != ERROR_OK)
1387 return retval;
1388 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1390 for (i = 7; i >= 0; i--)
1392 /* send register */
1393 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1394 return retval;
1395 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1398 /* send PC */
1399 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
1400 return retval;
1401 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1403 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1405 /* registers are now invalid */
1406 if ((retval = armv4_5_invalidate_core_regs(target)) != ERROR_OK)
1407 return retval;
1409 /* wait for and process debug entry */
1410 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1411 return retval;
1413 LOG_DEBUG("disable single-step");
1414 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1415 return retval;
1417 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1419 return ERROR_OK;
1422 static int xscale_step(struct target *target, int current,
1423 uint32_t address, int handle_breakpoints)
1425 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1426 struct breakpoint *breakpoint = target->breakpoints;
1428 uint32_t current_pc;
1429 int retval;
1431 if (target->state != TARGET_HALTED)
1433 LOG_WARNING("target not halted");
1434 return ERROR_TARGET_NOT_HALTED;
1437 /* current = 1: continue on current pc, otherwise continue at <address> */
1438 if (!current)
1439 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1441 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1443 /* if we're at the reset vector, we have to simulate the step */
1444 if (current_pc == 0x0)
1446 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1447 return retval;
1448 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1450 target->debug_reason = DBG_REASON_SINGLESTEP;
1451 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1453 return ERROR_OK;
1456 /* the front-end may request us not to handle breakpoints */
1457 if (handle_breakpoints)
1458 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1460 if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
1461 return retval;
1464 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1466 if (breakpoint)
1468 xscale_set_breakpoint(target, breakpoint);
1471 LOG_DEBUG("target stepped");
1473 return ERROR_OK;
1477 static int xscale_assert_reset(struct target *target)
1479 struct xscale_common *xscale = target_to_xscale(target);
1481 LOG_DEBUG("target->state: %s",
1482 target_state_name(target));
1484 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1485 * end up in T-L-R, which would reset JTAG
1487 jtag_set_end_state(TAP_IDLE);
1488 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
1490 /* set Hold reset, Halt mode and Trap Reset */
1491 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1492 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1493 xscale_write_dcsr(target, 1, 0);
1495 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1496 xscale_jtag_set_instr(target->tap, 0x7f);
1497 jtag_execute_queue();
1499 /* assert reset */
1500 jtag_add_reset(0, 1);
1502 /* sleep 1ms, to be sure we fulfill any requirements */
1503 jtag_add_sleep(1000);
1504 jtag_execute_queue();
1506 target->state = TARGET_RESET;
1508 if (target->reset_halt)
1510 int retval;
1511 if ((retval = target_halt(target)) != ERROR_OK)
1512 return retval;
1515 return ERROR_OK;
1518 static int xscale_deassert_reset(struct target *target)
1520 struct xscale_common *xscale = target_to_xscale(target);
1521 struct breakpoint *breakpoint = target->breakpoints;
1523 LOG_DEBUG("-");
1525 xscale->ibcr_available = 2;
1526 xscale->ibcr0_used = 0;
1527 xscale->ibcr1_used = 0;
1529 xscale->dbr_available = 2;
1530 xscale->dbr0_used = 0;
1531 xscale->dbr1_used = 0;
1533 /* mark all hardware breakpoints as unset */
1534 while (breakpoint)
1536 if (breakpoint->type == BKPT_HARD)
1538 breakpoint->set = 0;
1540 breakpoint = breakpoint->next;
1543 armv4_5_invalidate_core_regs(target);
1545 /* FIXME mark hardware watchpoints got unset too. Also,
1546 * at least some of the XScale registers are invalid...
1550 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1551 * contents got invalidated. Safer to force that, so writing new
1552 * contents can't ever fail..
1555 uint32_t address;
1556 unsigned buf_cnt;
1557 const uint8_t *buffer = xscale_debug_handler;
1558 int retval;
1560 /* release SRST */
1561 jtag_add_reset(0, 0);
1563 /* wait 300ms; 150 and 100ms were not enough */
1564 jtag_add_sleep(300*1000);
1566 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1567 jtag_execute_queue();
1569 /* set Hold reset, Halt mode and Trap Reset */
1570 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1571 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1572 xscale_write_dcsr(target, 1, 0);
1574 /* Load the debug handler into the mini-icache. Since
1575 * it's using halt mode (not monitor mode), it runs in
1576 * "Special Debug State" for access to registers, memory,
1577 * coprocessors, trace data, etc.
1579 address = xscale->handler_address;
1580 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1581 binary_size > 0;
1582 binary_size -= buf_cnt, buffer += buf_cnt)
1584 uint32_t cache_line[8];
1585 unsigned i;
1587 buf_cnt = binary_size;
1588 if (buf_cnt > 32)
1589 buf_cnt = 32;
1591 for (i = 0; i < buf_cnt; i += 4)
1593 /* convert LE buffer to host-endian uint32_t */
1594 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1597 for (; i < 32; i += 4)
1599 cache_line[i / 4] = 0xe1a08008;
1602 /* only load addresses other than the reset vectors */
1603 if ((address % 0x400) != 0x0)
1605 retval = xscale_load_ic(target, address,
1606 cache_line);
1607 if (retval != ERROR_OK)
1608 return retval;
1611 address += buf_cnt;
1614 retval = xscale_load_ic(target, 0x0,
1615 xscale->low_vectors);
1616 if (retval != ERROR_OK)
1617 return retval;
1618 retval = xscale_load_ic(target, 0xffff0000,
1619 xscale->high_vectors);
1620 if (retval != ERROR_OK)
1621 return retval;
1623 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1625 jtag_add_sleep(100000);
1627 /* set Hold reset, Halt mode and Trap Reset */
1628 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1629 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1630 xscale_write_dcsr(target, 1, 0);
1632 /* clear Hold reset to let the target run (should enter debug handler) */
1633 xscale_write_dcsr(target, 0, 1);
1634 target->state = TARGET_RUNNING;
1636 if (!target->reset_halt)
1638 jtag_add_sleep(10000);
1640 /* we should have entered debug now */
1641 xscale_debug_entry(target);
1642 target->state = TARGET_HALTED;
1644 /* resume the target */
1645 xscale_resume(target, 1, 0x0, 1, 0);
1649 return ERROR_OK;
1652 static int xscale_read_core_reg(struct target *target, int num,
1653 enum armv4_5_mode mode)
1655 LOG_ERROR("not implemented");
1656 return ERROR_OK;
1659 static int xscale_write_core_reg(struct target *target, int num,
1660 enum armv4_5_mode mode, uint32_t value)
1662 LOG_ERROR("not implemented");
1663 return ERROR_OK;
1666 static int xscale_full_context(struct target *target)
1668 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1670 uint32_t *buffer;
1672 int i, j;
1674 LOG_DEBUG("-");
1676 if (target->state != TARGET_HALTED)
1678 LOG_WARNING("target not halted");
1679 return ERROR_TARGET_NOT_HALTED;
1682 buffer = malloc(4 * 8);
1684 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1685 * we can't enter User mode on an XScale (unpredictable),
1686 * but User shares registers with SYS
1688 for (i = 1; i < 7; i++)
1690 int valid = 1;
1692 /* check if there are invalid registers in the current mode
1694 for (j = 0; j <= 16; j++)
1696 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1697 valid = 0;
1700 if (!valid)
1702 uint32_t tmp_cpsr;
1704 /* request banked registers */
1705 xscale_send_u32(target, 0x0);
1707 tmp_cpsr = 0x0;
1708 tmp_cpsr |= armv4_5_number_to_mode(i);
1709 tmp_cpsr |= 0xc0; /* I/F bits */
1711 /* send CPSR for desired mode */
1712 xscale_send_u32(target, tmp_cpsr);
1714 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1715 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1717 xscale_receive(target, buffer, 8);
1718 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1719 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1720 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1722 else
1724 xscale_receive(target, buffer, 7);
1727 /* move data from buffer to register cache */
1728 for (j = 8; j <= 14; j++)
1730 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1731 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1732 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1737 free(buffer);
1739 return ERROR_OK;
1742 static int xscale_restore_context(struct target *target)
1744 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1746 int i, j;
1748 if (target->state != TARGET_HALTED)
1750 LOG_WARNING("target not halted");
1751 return ERROR_TARGET_NOT_HALTED;
1754 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1755 * we can't enter User mode on an XScale (unpredictable),
1756 * but User shares registers with SYS
1758 for (i = 1; i < 7; i++)
1760 int dirty = 0;
1762 /* check if there are invalid registers in the current mode
1764 for (j = 8; j <= 14; j++)
1766 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1767 dirty = 1;
1770 /* if not USR/SYS, check if the SPSR needs to be written */
1771 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1773 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1774 dirty = 1;
1777 if (dirty)
1779 uint32_t tmp_cpsr;
1781 /* send banked registers */
1782 xscale_send_u32(target, 0x1);
1784 tmp_cpsr = 0x0;
1785 tmp_cpsr |= armv4_5_number_to_mode(i);
1786 tmp_cpsr |= 0xc0; /* I/F bits */
1788 /* send CPSR for desired mode */
1789 xscale_send_u32(target, tmp_cpsr);
1791 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1792 for (j = 8; j <= 14; j++)
1794 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1795 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1798 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1800 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1801 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1806 return ERROR_OK;
1809 static int xscale_read_memory(struct target *target, uint32_t address,
1810 uint32_t size, uint32_t count, uint8_t *buffer)
1812 struct xscale_common *xscale = target_to_xscale(target);
1813 uint32_t *buf32;
1814 uint32_t i;
1815 int retval;
1817 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1819 if (target->state != TARGET_HALTED)
1821 LOG_WARNING("target not halted");
1822 return ERROR_TARGET_NOT_HALTED;
1825 /* sanitize arguments */
1826 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1827 return ERROR_INVALID_ARGUMENTS;
1829 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1830 return ERROR_TARGET_UNALIGNED_ACCESS;
1832 /* send memory read request (command 0x1n, n: access size) */
1833 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1834 return retval;
1836 /* send base address for read request */
1837 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1838 return retval;
1840 /* send number of requested data words */
1841 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1842 return retval;
1844 /* receive data from target (count times 32-bit words in host endianness) */
1845 buf32 = malloc(4 * count);
1846 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1847 return retval;
1849 /* extract data from host-endian buffer into byte stream */
1850 for (i = 0; i < count; i++)
1852 switch (size)
1854 case 4:
1855 target_buffer_set_u32(target, buffer, buf32[i]);
1856 buffer += 4;
1857 break;
1858 case 2:
1859 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1860 buffer += 2;
1861 break;
1862 case 1:
1863 *buffer++ = buf32[i] & 0xff;
1864 break;
1865 default:
1866 LOG_ERROR("should never get here");
1867 exit(-1);
1871 free(buf32);
1873 /* examine DCSR, to see if Sticky Abort (SA) got set */
1874 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1875 return retval;
1876 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1878 /* clear SA bit */
1879 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1880 return retval;
1882 return ERROR_TARGET_DATA_ABORT;
1885 return ERROR_OK;
1888 static int xscale_write_memory(struct target *target, uint32_t address,
1889 uint32_t size, uint32_t count, uint8_t *buffer)
1891 struct xscale_common *xscale = target_to_xscale(target);
1892 int retval;
1894 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1896 if (target->state != TARGET_HALTED)
1898 LOG_WARNING("target not halted");
1899 return ERROR_TARGET_NOT_HALTED;
1902 /* sanitize arguments */
1903 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1904 return ERROR_INVALID_ARGUMENTS;
1906 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1907 return ERROR_TARGET_UNALIGNED_ACCESS;
1909 /* send memory write request (command 0x2n, n: access size) */
1910 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1911 return retval;
1913 /* send base address for read request */
1914 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1915 return retval;
1917 /* send number of requested data words to be written*/
1918 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1919 return retval;
1921 /* extract data from host-endian buffer into byte stream */
1922 #if 0
1923 for (i = 0; i < count; i++)
1925 switch (size)
1927 case 4:
1928 value = target_buffer_get_u32(target, buffer);
1929 xscale_send_u32(target, value);
1930 buffer += 4;
1931 break;
1932 case 2:
1933 value = target_buffer_get_u16(target, buffer);
1934 xscale_send_u32(target, value);
1935 buffer += 2;
1936 break;
1937 case 1:
1938 value = *buffer;
1939 xscale_send_u32(target, value);
1940 buffer += 1;
1941 break;
1942 default:
1943 LOG_ERROR("should never get here");
1944 exit(-1);
1947 #endif
1948 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1949 return retval;
1951 /* examine DCSR, to see if Sticky Abort (SA) got set */
1952 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1953 return retval;
1954 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1956 /* clear SA bit */
1957 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1958 return retval;
1960 return ERROR_TARGET_DATA_ABORT;
1963 return ERROR_OK;
1966 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
1967 uint32_t count, uint8_t *buffer)
1969 return xscale_write_memory(target, address, 4, count, buffer);
1972 static uint32_t xscale_get_ttb(struct target *target)
1974 struct xscale_common *xscale = target_to_xscale(target);
1975 uint32_t ttb;
1977 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
1978 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
1980 return ttb;
1983 static void xscale_disable_mmu_caches(struct target *target, int mmu,
1984 int d_u_cache, int i_cache)
1986 struct xscale_common *xscale = target_to_xscale(target);
1987 uint32_t cp15_control;
1989 /* read cp15 control register */
1990 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1991 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1993 if (mmu)
1994 cp15_control &= ~0x1U;
1996 if (d_u_cache)
1998 /* clean DCache */
1999 xscale_send_u32(target, 0x50);
2000 xscale_send_u32(target, xscale->cache_clean_address);
2002 /* invalidate DCache */
2003 xscale_send_u32(target, 0x51);
2005 cp15_control &= ~0x4U;
2008 if (i_cache)
2010 /* invalidate ICache */
2011 xscale_send_u32(target, 0x52);
2012 cp15_control &= ~0x1000U;
2015 /* write new cp15 control register */
2016 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2018 /* execute cpwait to ensure outstanding operations complete */
2019 xscale_send_u32(target, 0x53);
2022 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2023 int d_u_cache, int i_cache)
2025 struct xscale_common *xscale = target_to_xscale(target);
2026 uint32_t cp15_control;
2028 /* read cp15 control register */
2029 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2030 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2032 if (mmu)
2033 cp15_control |= 0x1U;
2035 if (d_u_cache)
2036 cp15_control |= 0x4U;
2038 if (i_cache)
2039 cp15_control |= 0x1000U;
2041 /* write new cp15 control register */
2042 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2044 /* execute cpwait to ensure outstanding operations complete */
2045 xscale_send_u32(target, 0x53);
2048 static int xscale_set_breakpoint(struct target *target,
2049 struct breakpoint *breakpoint)
2051 int retval;
2052 struct xscale_common *xscale = target_to_xscale(target);
2054 if (target->state != TARGET_HALTED)
2056 LOG_WARNING("target not halted");
2057 return ERROR_TARGET_NOT_HALTED;
2060 if (breakpoint->set)
2062 LOG_WARNING("breakpoint already set");
2063 return ERROR_OK;
2066 if (breakpoint->type == BKPT_HARD)
2068 uint32_t value = breakpoint->address | 1;
2069 if (!xscale->ibcr0_used)
2071 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2072 xscale->ibcr0_used = 1;
2073 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2075 else if (!xscale->ibcr1_used)
2077 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2078 xscale->ibcr1_used = 1;
2079 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2081 else
2083 LOG_ERROR("BUG: no hardware comparator available");
2084 return ERROR_OK;
2087 else if (breakpoint->type == BKPT_SOFT)
2089 if (breakpoint->length == 4)
2091 /* keep the original instruction in target endianness */
2092 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2094 return retval;
2096 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2097 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2099 return retval;
2102 else
2104 /* keep the original instruction in target endianness */
2105 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2107 return retval;
2109 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2110 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2112 return retval;
2115 breakpoint->set = 1;
2118 return ERROR_OK;
2121 static int xscale_add_breakpoint(struct target *target,
2122 struct breakpoint *breakpoint)
2124 struct xscale_common *xscale = target_to_xscale(target);
2126 if (target->state != TARGET_HALTED)
2128 LOG_WARNING("target not halted");
2129 return ERROR_TARGET_NOT_HALTED;
2132 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2134 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2135 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2138 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2140 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2141 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2144 if (breakpoint->type == BKPT_HARD)
2146 xscale->ibcr_available--;
2149 return ERROR_OK;
2152 static int xscale_unset_breakpoint(struct target *target,
2153 struct breakpoint *breakpoint)
2155 int retval;
2156 struct xscale_common *xscale = target_to_xscale(target);
2158 if (target->state != TARGET_HALTED)
2160 LOG_WARNING("target not halted");
2161 return ERROR_TARGET_NOT_HALTED;
2164 if (!breakpoint->set)
2166 LOG_WARNING("breakpoint not set");
2167 return ERROR_OK;
2170 if (breakpoint->type == BKPT_HARD)
2172 if (breakpoint->set == 1)
2174 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2175 xscale->ibcr0_used = 0;
2177 else if (breakpoint->set == 2)
2179 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2180 xscale->ibcr1_used = 0;
2182 breakpoint->set = 0;
2184 else
2186 /* restore original instruction (kept in target endianness) */
2187 if (breakpoint->length == 4)
2189 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2191 return retval;
2194 else
2196 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2198 return retval;
2201 breakpoint->set = 0;
2204 return ERROR_OK;
2207 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2209 struct xscale_common *xscale = target_to_xscale(target);
2211 if (target->state != TARGET_HALTED)
2213 LOG_WARNING("target not halted");
2214 return ERROR_TARGET_NOT_HALTED;
2217 if (breakpoint->set)
2219 xscale_unset_breakpoint(target, breakpoint);
2222 if (breakpoint->type == BKPT_HARD)
2223 xscale->ibcr_available++;
2225 return ERROR_OK;
2228 static int xscale_set_watchpoint(struct target *target,
2229 struct watchpoint *watchpoint)
2231 struct xscale_common *xscale = target_to_xscale(target);
2232 uint8_t enable = 0;
2233 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2234 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2236 if (target->state != TARGET_HALTED)
2238 LOG_WARNING("target not halted");
2239 return ERROR_TARGET_NOT_HALTED;
2242 xscale_get_reg(dbcon);
2244 switch (watchpoint->rw)
2246 case WPT_READ:
2247 enable = 0x3;
2248 break;
2249 case WPT_ACCESS:
2250 enable = 0x2;
2251 break;
2252 case WPT_WRITE:
2253 enable = 0x1;
2254 break;
2255 default:
2256 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2259 if (!xscale->dbr0_used)
2261 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2262 dbcon_value |= enable;
2263 xscale_set_reg_u32(dbcon, dbcon_value);
2264 watchpoint->set = 1;
2265 xscale->dbr0_used = 1;
2267 else if (!xscale->dbr1_used)
2269 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2270 dbcon_value |= enable << 2;
2271 xscale_set_reg_u32(dbcon, dbcon_value);
2272 watchpoint->set = 2;
2273 xscale->dbr1_used = 1;
2275 else
2277 LOG_ERROR("BUG: no hardware comparator available");
2278 return ERROR_OK;
2281 return ERROR_OK;
2284 static int xscale_add_watchpoint(struct target *target,
2285 struct watchpoint *watchpoint)
2287 struct xscale_common *xscale = target_to_xscale(target);
2289 if (target->state != TARGET_HALTED)
2291 LOG_WARNING("target not halted");
2292 return ERROR_TARGET_NOT_HALTED;
2295 if (xscale->dbr_available < 1)
2297 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2300 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2302 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2305 xscale->dbr_available--;
2307 return ERROR_OK;
2310 static int xscale_unset_watchpoint(struct target *target,
2311 struct watchpoint *watchpoint)
2313 struct xscale_common *xscale = target_to_xscale(target);
2314 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2315 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2317 if (target->state != TARGET_HALTED)
2319 LOG_WARNING("target not halted");
2320 return ERROR_TARGET_NOT_HALTED;
2323 if (!watchpoint->set)
2325 LOG_WARNING("breakpoint not set");
2326 return ERROR_OK;
2329 if (watchpoint->set == 1)
2331 dbcon_value &= ~0x3;
2332 xscale_set_reg_u32(dbcon, dbcon_value);
2333 xscale->dbr0_used = 0;
2335 else if (watchpoint->set == 2)
2337 dbcon_value &= ~0xc;
2338 xscale_set_reg_u32(dbcon, dbcon_value);
2339 xscale->dbr1_used = 0;
2341 watchpoint->set = 0;
2343 return ERROR_OK;
2346 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2348 struct xscale_common *xscale = target_to_xscale(target);
2350 if (target->state != TARGET_HALTED)
2352 LOG_WARNING("target not halted");
2353 return ERROR_TARGET_NOT_HALTED;
2356 if (watchpoint->set)
2358 xscale_unset_watchpoint(target, watchpoint);
2361 xscale->dbr_available++;
2363 return ERROR_OK;
2366 static int xscale_get_reg(struct reg *reg)
2368 struct xscale_reg *arch_info = reg->arch_info;
2369 struct target *target = arch_info->target;
2370 struct xscale_common *xscale = target_to_xscale(target);
2372 /* DCSR, TX and RX are accessible via JTAG */
2373 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2375 return xscale_read_dcsr(arch_info->target);
2377 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2379 /* 1 = consume register content */
2380 return xscale_read_tx(arch_info->target, 1);
2382 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2384 /* can't read from RX register (host -> debug handler) */
2385 return ERROR_OK;
2387 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2389 /* can't (explicitly) read from TXRXCTRL register */
2390 return ERROR_OK;
2392 else /* Other DBG registers have to be transfered by the debug handler */
2394 /* send CP read request (command 0x40) */
2395 xscale_send_u32(target, 0x40);
2397 /* send CP register number */
2398 xscale_send_u32(target, arch_info->dbg_handler_number);
2400 /* read register value */
2401 xscale_read_tx(target, 1);
2402 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2404 reg->dirty = 0;
2405 reg->valid = 1;
2408 return ERROR_OK;
2411 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2413 struct xscale_reg *arch_info = reg->arch_info;
2414 struct target *target = arch_info->target;
2415 struct xscale_common *xscale = target_to_xscale(target);
2416 uint32_t value = buf_get_u32(buf, 0, 32);
2418 /* DCSR, TX and RX are accessible via JTAG */
2419 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2421 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2422 return xscale_write_dcsr(arch_info->target, -1, -1);
2424 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2426 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2427 return xscale_write_rx(arch_info->target);
2429 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2431 /* can't write to TX register (debug-handler -> host) */
2432 return ERROR_OK;
2434 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2436 /* can't (explicitly) write to TXRXCTRL register */
2437 return ERROR_OK;
2439 else /* Other DBG registers have to be transfered by the debug handler */
2441 /* send CP write request (command 0x41) */
2442 xscale_send_u32(target, 0x41);
2444 /* send CP register number */
2445 xscale_send_u32(target, arch_info->dbg_handler_number);
2447 /* send CP register value */
2448 xscale_send_u32(target, value);
2449 buf_set_u32(reg->value, 0, 32, value);
2452 return ERROR_OK;
2455 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2457 struct xscale_common *xscale = target_to_xscale(target);
2458 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2459 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2461 /* send CP write request (command 0x41) */
2462 xscale_send_u32(target, 0x41);
2464 /* send CP register number */
2465 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2467 /* send CP register value */
2468 xscale_send_u32(target, value);
2469 buf_set_u32(dcsr->value, 0, 32, value);
2471 return ERROR_OK;
2474 static int xscale_read_trace(struct target *target)
2476 struct xscale_common *xscale = target_to_xscale(target);
2477 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
2478 struct xscale_trace_data **trace_data_p;
2480 /* 258 words from debug handler
2481 * 256 trace buffer entries
2482 * 2 checkpoint addresses
2484 uint32_t trace_buffer[258];
2485 int is_address[256];
2486 int i, j;
2488 if (target->state != TARGET_HALTED)
2490 LOG_WARNING("target must be stopped to read trace data");
2491 return ERROR_TARGET_NOT_HALTED;
2494 /* send read trace buffer command (command 0x61) */
2495 xscale_send_u32(target, 0x61);
2497 /* receive trace buffer content */
2498 xscale_receive(target, trace_buffer, 258);
2500 /* parse buffer backwards to identify address entries */
2501 for (i = 255; i >= 0; i--)
2503 is_address[i] = 0;
2504 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2505 ((trace_buffer[i] & 0xf0) == 0xd0))
2507 if (i >= 3)
2508 is_address[--i] = 1;
2509 if (i >= 2)
2510 is_address[--i] = 1;
2511 if (i >= 1)
2512 is_address[--i] = 1;
2513 if (i >= 0)
2514 is_address[--i] = 1;
2519 /* search first non-zero entry */
2520 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2523 if (j == 256)
2525 LOG_DEBUG("no trace data collected");
2526 return ERROR_XSCALE_NO_TRACE_DATA;
2529 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2532 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2533 (*trace_data_p)->next = NULL;
2534 (*trace_data_p)->chkpt0 = trace_buffer[256];
2535 (*trace_data_p)->chkpt1 = trace_buffer[257];
2536 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2537 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2538 (*trace_data_p)->depth = 256 - j;
2540 for (i = j; i < 256; i++)
2542 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2543 if (is_address[i])
2544 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2545 else
2546 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2549 return ERROR_OK;
2552 static int xscale_read_instruction(struct target *target,
2553 struct arm_instruction *instruction)
2555 struct xscale_common *xscale = target_to_xscale(target);
2556 int i;
2557 int section = -1;
2558 uint32_t size_read;
2559 uint32_t opcode;
2560 int retval;
2562 if (!xscale->trace.image)
2563 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2565 /* search for the section the current instruction belongs to */
2566 for (i = 0; i < xscale->trace.image->num_sections; i++)
2568 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2569 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2571 section = i;
2572 break;
2576 if (section == -1)
2578 /* current instruction couldn't be found in the image */
2579 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2582 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2584 uint8_t buf[4];
2585 if ((retval = image_read_section(xscale->trace.image, section,
2586 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2587 4, buf, &size_read)) != ERROR_OK)
2589 LOG_ERROR("error while reading instruction: %i", retval);
2590 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2592 opcode = target_buffer_get_u32(target, buf);
2593 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2595 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2597 uint8_t buf[2];
2598 if ((retval = image_read_section(xscale->trace.image, section,
2599 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2600 2, buf, &size_read)) != ERROR_OK)
2602 LOG_ERROR("error while reading instruction: %i", retval);
2603 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2605 opcode = target_buffer_get_u16(target, buf);
2606 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2608 else
2610 LOG_ERROR("BUG: unknown core state encountered");
2611 exit(-1);
2614 return ERROR_OK;
2617 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2618 int i, uint32_t *target)
2620 /* if there are less than four entries prior to the indirect branch message
2621 * we can't extract the address */
2622 if (i < 4)
2624 return -1;
2627 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2628 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2630 return 0;
2633 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2635 struct xscale_common *xscale = target_to_xscale(target);
2636 int next_pc_ok = 0;
2637 uint32_t next_pc = 0x0;
2638 struct xscale_trace_data *trace_data = xscale->trace.data;
2639 int retval;
2641 while (trace_data)
2643 int i, chkpt;
2644 int rollover;
2645 int branch;
2646 int exception;
2647 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2649 chkpt = 0;
2650 rollover = 0;
2652 for (i = 0; i < trace_data->depth; i++)
2654 next_pc_ok = 0;
2655 branch = 0;
2656 exception = 0;
2658 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2659 continue;
2661 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2663 case 0: /* Exceptions */
2664 case 1:
2665 case 2:
2666 case 3:
2667 case 4:
2668 case 5:
2669 case 6:
2670 case 7:
2671 exception = (trace_data->entries[i].data & 0x70) >> 4;
2672 next_pc_ok = 1;
2673 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2674 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2675 break;
2676 case 8: /* Direct Branch */
2677 branch = 1;
2678 break;
2679 case 9: /* Indirect Branch */
2680 branch = 1;
2681 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2683 next_pc_ok = 1;
2685 break;
2686 case 13: /* Checkpointed Indirect Branch */
2687 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2689 next_pc_ok = 1;
2690 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2691 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2692 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2694 /* explicit fall-through */
2695 case 12: /* Checkpointed Direct Branch */
2696 branch = 1;
2697 if (chkpt == 0)
2699 next_pc_ok = 1;
2700 next_pc = trace_data->chkpt0;
2701 chkpt++;
2703 else if (chkpt == 1)
2705 next_pc_ok = 1;
2706 next_pc = trace_data->chkpt0;
2707 chkpt++;
2709 else
2711 LOG_WARNING("more than two checkpointed branches encountered");
2713 break;
2714 case 15: /* Roll-over */
2715 rollover++;
2716 continue;
2717 default: /* Reserved */
2718 command_print(cmd_ctx, "--- reserved trace message ---");
2719 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2720 return ERROR_OK;
2723 if (xscale->trace.pc_ok)
2725 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2726 struct arm_instruction instruction;
2728 if ((exception == 6) || (exception == 7))
2730 /* IRQ or FIQ exception, no instruction executed */
2731 executed -= 1;
2734 while (executed-- >= 0)
2736 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2738 /* can't continue tracing with no image available */
2739 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2741 return retval;
2743 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2745 /* TODO: handle incomplete images */
2749 /* a precise abort on a load to the PC is included in the incremental
2750 * word count, other instructions causing data aborts are not included
2752 if ((executed == 0) && (exception == 4)
2753 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2755 if ((instruction.type == ARM_LDM)
2756 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2758 executed--;
2760 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2761 && (instruction.info.load_store.Rd != 15))
2763 executed--;
2767 /* only the last instruction executed
2768 * (the one that caused the control flow change)
2769 * could be a taken branch
2771 if (((executed == -1) && (branch == 1)) &&
2772 (((instruction.type == ARM_B) ||
2773 (instruction.type == ARM_BL) ||
2774 (instruction.type == ARM_BLX)) &&
2775 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2777 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2779 else
2781 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2783 command_print(cmd_ctx, "%s", instruction.text);
2786 rollover = 0;
2789 if (next_pc_ok)
2791 xscale->trace.current_pc = next_pc;
2792 xscale->trace.pc_ok = 1;
2796 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2798 struct arm_instruction instruction;
2799 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2801 /* can't continue tracing with no image available */
2802 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2804 return retval;
2806 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2808 /* TODO: handle incomplete images */
2811 command_print(cmd_ctx, "%s", instruction.text);
2814 trace_data = trace_data->next;
2817 return ERROR_OK;
2820 static void xscale_build_reg_cache(struct target *target)
2822 struct xscale_common *xscale = target_to_xscale(target);
2823 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
2824 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2825 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2826 int i;
2827 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(struct xscale_reg);
2829 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2830 armv4_5->core_cache = (*cache_p);
2832 /* register a register arch-type for XScale dbg registers only once */
2833 if (xscale_reg_arch_type == -1)
2834 xscale_reg_arch_type = register_reg_arch_type(xscale_get_reg, xscale_set_reg);
2836 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2837 cache_p = &(*cache_p)->next;
2839 /* fill in values for the xscale reg cache */
2840 (*cache_p)->name = "XScale registers";
2841 (*cache_p)->next = NULL;
2842 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2843 (*cache_p)->num_regs = num_regs;
2845 for (i = 0; i < num_regs; i++)
2847 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2848 (*cache_p)->reg_list[i].value = calloc(4, 1);
2849 (*cache_p)->reg_list[i].dirty = 0;
2850 (*cache_p)->reg_list[i].valid = 0;
2851 (*cache_p)->reg_list[i].size = 32;
2852 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2853 (*cache_p)->reg_list[i].arch_type = xscale_reg_arch_type;
2854 arch_info[i] = xscale_reg_arch_info[i];
2855 arch_info[i].target = target;
2858 xscale->reg_cache = (*cache_p);
2861 static int xscale_init_target(struct command_context *cmd_ctx,
2862 struct target *target)
2864 xscale_build_reg_cache(target);
2865 return ERROR_OK;
2868 static int xscale_init_arch_info(struct target *target,
2869 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2871 struct arm *armv4_5;
2872 uint32_t high_reset_branch, low_reset_branch;
2873 int i;
2875 armv4_5 = &xscale->armv4_5_common;
2877 /* store architecture specfic data (none so far) */
2878 xscale->common_magic = XSCALE_COMMON_MAGIC;
2880 /* we don't really *need* variant info ... */
2881 if (variant) {
2882 int ir_length = 0;
2884 if (strcmp(variant, "pxa250") == 0
2885 || strcmp(variant, "pxa255") == 0
2886 || strcmp(variant, "pxa26x") == 0)
2887 ir_length = 5;
2888 else if (strcmp(variant, "pxa27x") == 0
2889 || strcmp(variant, "ixp42x") == 0
2890 || strcmp(variant, "ixp45x") == 0
2891 || strcmp(variant, "ixp46x") == 0)
2892 ir_length = 7;
2893 else
2894 LOG_WARNING("%s: unrecognized variant %s",
2895 tap->dotted_name, variant);
2897 if (ir_length && ir_length != tap->ir_length) {
2898 LOG_WARNING("%s: IR length for %s is %d; fixing",
2899 tap->dotted_name, variant, ir_length);
2900 tap->ir_length = ir_length;
2904 /* the debug handler isn't installed (and thus not running) at this time */
2905 xscale->handler_address = 0xfe000800;
2907 /* clear the vectors we keep locally for reference */
2908 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2909 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2911 /* no user-specified vectors have been configured yet */
2912 xscale->static_low_vectors_set = 0x0;
2913 xscale->static_high_vectors_set = 0x0;
2915 /* calculate branches to debug handler */
2916 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2917 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2919 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2920 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2922 for (i = 1; i <= 7; i++)
2924 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2925 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2928 /* 64kB aligned region used for DCache cleaning */
2929 xscale->cache_clean_address = 0xfffe0000;
2931 xscale->hold_rst = 0;
2932 xscale->external_debug_break = 0;
2934 xscale->ibcr_available = 2;
2935 xscale->ibcr0_used = 0;
2936 xscale->ibcr1_used = 0;
2938 xscale->dbr_available = 2;
2939 xscale->dbr0_used = 0;
2940 xscale->dbr1_used = 0;
2942 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2943 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2945 xscale->vector_catch = 0x1;
2947 xscale->trace.capture_status = TRACE_IDLE;
2948 xscale->trace.data = NULL;
2949 xscale->trace.image = NULL;
2950 xscale->trace.buffer_enabled = 0;
2951 xscale->trace.buffer_fill = 0;
2953 /* prepare ARMv4/5 specific information */
2954 armv4_5->arch_info = xscale;
2955 armv4_5->read_core_reg = xscale_read_core_reg;
2956 armv4_5->write_core_reg = xscale_write_core_reg;
2957 armv4_5->full_context = xscale_full_context;
2959 armv4_5_init_arch_info(target, armv4_5);
2961 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2962 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2963 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
2964 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
2965 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
2966 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
2967 xscale->armv4_5_mmu.has_tiny_pages = 1;
2968 xscale->armv4_5_mmu.mmu_enabled = 0;
2970 return ERROR_OK;
2973 static int xscale_target_create(struct target *target, Jim_Interp *interp)
2975 struct xscale_common *xscale;
2977 if (sizeof xscale_debug_handler - 1 > 0x800) {
2978 LOG_ERROR("debug_handler.bin: larger than 2kb");
2979 return ERROR_FAIL;
2982 xscale = calloc(1, sizeof(*xscale));
2983 if (!xscale)
2984 return ERROR_FAIL;
2986 return xscale_init_arch_info(target, xscale, target->tap,
2987 target->variant);
2990 COMMAND_HANDLER(xscale_handle_debug_handler_command)
2992 struct target *target = NULL;
2993 struct xscale_common *xscale;
2994 int retval;
2995 uint32_t handler_address;
2997 if (argc < 2)
2999 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3000 return ERROR_OK;
3003 if ((target = get_target(args[0])) == NULL)
3005 LOG_ERROR("target '%s' not defined", args[0]);
3006 return ERROR_FAIL;
3009 xscale = target_to_xscale(target);
3010 retval = xscale_verify_pointer(cmd_ctx, xscale);
3011 if (retval != ERROR_OK)
3012 return retval;
3014 COMMAND_PARSE_NUMBER(u32, args[1], handler_address);
3016 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3017 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3019 xscale->handler_address = handler_address;
3021 else
3023 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3024 return ERROR_FAIL;
3027 return ERROR_OK;
3030 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3032 struct target *target = NULL;
3033 struct xscale_common *xscale;
3034 int retval;
3035 uint32_t cache_clean_address;
3037 if (argc < 2)
3039 return ERROR_COMMAND_SYNTAX_ERROR;
3042 target = get_target(args[0]);
3043 if (target == NULL)
3045 LOG_ERROR("target '%s' not defined", args[0]);
3046 return ERROR_FAIL;
3048 xscale = target_to_xscale(target);
3049 retval = xscale_verify_pointer(cmd_ctx, xscale);
3050 if (retval != ERROR_OK)
3051 return retval;
3053 COMMAND_PARSE_NUMBER(u32, args[1], cache_clean_address);
3055 if (cache_clean_address & 0xffff)
3057 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3059 else
3061 xscale->cache_clean_address = cache_clean_address;
3064 return ERROR_OK;
3067 COMMAND_HANDLER(xscale_handle_cache_info_command)
3069 struct target *target = get_current_target(cmd_ctx);
3070 struct xscale_common *xscale = target_to_xscale(target);
3071 int retval;
3073 retval = xscale_verify_pointer(cmd_ctx, xscale);
3074 if (retval != ERROR_OK)
3075 return retval;
3077 return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
3080 static int xscale_virt2phys(struct target *target,
3081 uint32_t virtual, uint32_t *physical)
3083 struct xscale_common *xscale = target_to_xscale(target);
3084 int type;
3085 uint32_t cb;
3086 int domain;
3087 uint32_t ap;
3089 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3090 LOG_ERROR(xscale_not);
3091 return ERROR_TARGET_INVALID;
3094 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3095 if (type == -1)
3097 return ret;
3099 *physical = ret;
3100 return ERROR_OK;
3103 static int xscale_mmu(struct target *target, int *enabled)
3105 struct xscale_common *xscale = target_to_xscale(target);
3107 if (target->state != TARGET_HALTED)
3109 LOG_ERROR("Target not halted");
3110 return ERROR_TARGET_INVALID;
3112 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3113 return ERROR_OK;
3116 COMMAND_HANDLER(xscale_handle_mmu_command)
3118 struct target *target = get_current_target(cmd_ctx);
3119 struct xscale_common *xscale = target_to_xscale(target);
3120 int retval;
3122 retval = xscale_verify_pointer(cmd_ctx, xscale);
3123 if (retval != ERROR_OK)
3124 return retval;
3126 if (target->state != TARGET_HALTED)
3128 command_print(cmd_ctx, "target must be stopped for \"%s\" command", CMD_NAME);
3129 return ERROR_OK;
3132 if (argc >= 1)
3134 if (strcmp("enable", args[0]) == 0)
3136 xscale_enable_mmu_caches(target, 1, 0, 0);
3137 xscale->armv4_5_mmu.mmu_enabled = 1;
3139 else if (strcmp("disable", args[0]) == 0)
3141 xscale_disable_mmu_caches(target, 1, 0, 0);
3142 xscale->armv4_5_mmu.mmu_enabled = 0;
3146 command_print(cmd_ctx, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3148 return ERROR_OK;
3151 COMMAND_HANDLER(xscale_handle_idcache_command)
3153 struct target *target = get_current_target(cmd_ctx);
3154 struct xscale_common *xscale = target_to_xscale(target);
3155 int icache = 0, dcache = 0;
3156 int retval;
3158 retval = xscale_verify_pointer(cmd_ctx, xscale);
3159 if (retval != ERROR_OK)
3160 return retval;
3162 if (target->state != TARGET_HALTED)
3164 command_print(cmd_ctx, "target must be stopped for \"%s\" command", CMD_NAME);
3165 return ERROR_OK;
3168 if (strcmp(CMD_NAME, "icache") == 0)
3169 icache = 1;
3170 else if (strcmp(CMD_NAME, "dcache") == 0)
3171 dcache = 1;
3173 if (argc >= 1)
3175 if (strcmp("enable", args[0]) == 0)
3177 xscale_enable_mmu_caches(target, 0, dcache, icache);
3179 if (icache)
3180 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
3181 else if (dcache)
3182 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
3184 else if (strcmp("disable", args[0]) == 0)
3186 xscale_disable_mmu_caches(target, 0, dcache, icache);
3188 if (icache)
3189 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
3190 else if (dcache)
3191 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
3195 if (icache)
3196 command_print(cmd_ctx, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
3198 if (dcache)
3199 command_print(cmd_ctx, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
3201 return ERROR_OK;
3204 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3206 struct target *target = get_current_target(cmd_ctx);
3207 struct xscale_common *xscale = target_to_xscale(target);
3208 int retval;
3210 retval = xscale_verify_pointer(cmd_ctx, xscale);
3211 if (retval != ERROR_OK)
3212 return retval;
3214 if (argc < 1)
3216 command_print(cmd_ctx, "usage: xscale vector_catch [mask]");
3218 else
3220 COMMAND_PARSE_NUMBER(u8, args[0], xscale->vector_catch);
3221 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3222 xscale_write_dcsr(target, -1, -1);
3225 command_print(cmd_ctx, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3227 return ERROR_OK;
3231 COMMAND_HANDLER(xscale_handle_vector_table_command)
3233 struct target *target = get_current_target(cmd_ctx);
3234 struct xscale_common *xscale = target_to_xscale(target);
3235 int err = 0;
3236 int retval;
3238 retval = xscale_verify_pointer(cmd_ctx, xscale);
3239 if (retval != ERROR_OK)
3240 return retval;
3242 if (argc == 0) /* print current settings */
3244 int idx;
3246 command_print(cmd_ctx, "active user-set static vectors:");
3247 for (idx = 1; idx < 8; idx++)
3248 if (xscale->static_low_vectors_set & (1 << idx))
3249 command_print(cmd_ctx, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3250 for (idx = 1; idx < 8; idx++)
3251 if (xscale->static_high_vectors_set & (1 << idx))
3252 command_print(cmd_ctx, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3253 return ERROR_OK;
3256 if (argc != 3)
3257 err = 1;
3258 else
3260 int idx;
3261 COMMAND_PARSE_NUMBER(int, args[1], idx);
3262 uint32_t vec;
3263 COMMAND_PARSE_NUMBER(u32, args[2], vec);
3265 if (idx < 1 || idx >= 8)
3266 err = 1;
3268 if (!err && strcmp(args[0], "low") == 0)
3270 xscale->static_low_vectors_set |= (1<<idx);
3271 xscale->static_low_vectors[idx] = vec;
3273 else if (!err && (strcmp(args[0], "high") == 0))
3275 xscale->static_high_vectors_set |= (1<<idx);
3276 xscale->static_high_vectors[idx] = vec;
3278 else
3279 err = 1;
3282 if (err)
3283 command_print(cmd_ctx, "usage: xscale vector_table <high|low> <index> <code>");
3285 return ERROR_OK;
3289 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3291 struct target *target = get_current_target(cmd_ctx);
3292 struct xscale_common *xscale = target_to_xscale(target);
3293 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
3294 uint32_t dcsr_value;
3295 int retval;
3297 retval = xscale_verify_pointer(cmd_ctx, xscale);
3298 if (retval != ERROR_OK)
3299 return retval;
3301 if (target->state != TARGET_HALTED)
3303 command_print(cmd_ctx, "target must be stopped for \"%s\" command", CMD_NAME);
3304 return ERROR_OK;
3307 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3309 struct xscale_trace_data *td, *next_td;
3310 xscale->trace.buffer_enabled = 1;
3312 /* free old trace data */
3313 td = xscale->trace.data;
3314 while (td)
3316 next_td = td->next;
3318 if (td->entries)
3319 free(td->entries);
3320 free(td);
3321 td = next_td;
3323 xscale->trace.data = NULL;
3325 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3327 xscale->trace.buffer_enabled = 0;
3330 if ((argc >= 2) && (strcmp("fill", args[1]) == 0))
3332 uint32_t fill = 1;
3333 if (argc >= 3)
3334 COMMAND_PARSE_NUMBER(u32, args[2], fill);
3335 xscale->trace.buffer_fill = fill;
3337 else if ((argc >= 2) && (strcmp("wrap", args[1]) == 0))
3339 xscale->trace.buffer_fill = -1;
3342 if (xscale->trace.buffer_enabled)
3344 /* if we enable the trace buffer in fill-once
3345 * mode we know the address of the first instruction */
3346 xscale->trace.pc_ok = 1;
3347 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3349 else
3351 /* otherwise the address is unknown, and we have no known good PC */
3352 xscale->trace.pc_ok = 0;
3355 command_print(cmd_ctx, "trace buffer %s (%s)",
3356 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3357 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3359 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3360 if (xscale->trace.buffer_fill >= 0)
3361 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3362 else
3363 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3365 return ERROR_OK;
3368 COMMAND_HANDLER(xscale_handle_trace_image_command)
3370 struct target *target = get_current_target(cmd_ctx);
3371 struct xscale_common *xscale = target_to_xscale(target);
3372 int retval;
3374 if (argc < 1)
3376 command_print(cmd_ctx, "usage: xscale trace_image <file> [base address] [type]");
3377 return ERROR_OK;
3380 retval = xscale_verify_pointer(cmd_ctx, xscale);
3381 if (retval != ERROR_OK)
3382 return retval;
3384 if (xscale->trace.image)
3386 image_close(xscale->trace.image);
3387 free(xscale->trace.image);
3388 command_print(cmd_ctx, "previously loaded image found and closed");
3391 xscale->trace.image = malloc(sizeof(struct image));
3392 xscale->trace.image->base_address_set = 0;
3393 xscale->trace.image->start_address_set = 0;
3395 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3396 if (argc >= 2)
3398 xscale->trace.image->base_address_set = 1;
3399 COMMAND_PARSE_NUMBER(int, args[1], xscale->trace.image->base_address);
3401 else
3403 xscale->trace.image->base_address_set = 0;
3406 if (image_open(xscale->trace.image, args[0], (argc >= 3) ? args[2] : NULL) != ERROR_OK)
3408 free(xscale->trace.image);
3409 xscale->trace.image = NULL;
3410 return ERROR_OK;
3413 return ERROR_OK;
3416 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3418 struct target *target = get_current_target(cmd_ctx);
3419 struct xscale_common *xscale = target_to_xscale(target);
3420 struct xscale_trace_data *trace_data;
3421 struct fileio file;
3422 int retval;
3424 retval = xscale_verify_pointer(cmd_ctx, xscale);
3425 if (retval != ERROR_OK)
3426 return retval;
3428 if (target->state != TARGET_HALTED)
3430 command_print(cmd_ctx, "target must be stopped for \"%s\" command", CMD_NAME);
3431 return ERROR_OK;
3434 if (argc < 1)
3436 command_print(cmd_ctx, "usage: xscale dump_trace <file>");
3437 return ERROR_OK;
3440 trace_data = xscale->trace.data;
3442 if (!trace_data)
3444 command_print(cmd_ctx, "no trace data collected");
3445 return ERROR_OK;
3448 if (fileio_open(&file, args[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3450 return ERROR_OK;
3453 while (trace_data)
3455 int i;
3457 fileio_write_u32(&file, trace_data->chkpt0);
3458 fileio_write_u32(&file, trace_data->chkpt1);
3459 fileio_write_u32(&file, trace_data->last_instruction);
3460 fileio_write_u32(&file, trace_data->depth);
3462 for (i = 0; i < trace_data->depth; i++)
3463 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3465 trace_data = trace_data->next;
3468 fileio_close(&file);
3470 return ERROR_OK;
3473 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3475 struct target *target = get_current_target(cmd_ctx);
3476 struct xscale_common *xscale = target_to_xscale(target);
3477 int retval;
3479 retval = xscale_verify_pointer(cmd_ctx, xscale);
3480 if (retval != ERROR_OK)
3481 return retval;
3483 xscale_analyze_trace(target, cmd_ctx);
3485 return ERROR_OK;
3488 COMMAND_HANDLER(xscale_handle_cp15)
3490 struct target *target = get_current_target(cmd_ctx);
3491 struct xscale_common *xscale = target_to_xscale(target);
3492 int retval;
3494 retval = xscale_verify_pointer(cmd_ctx, xscale);
3495 if (retval != ERROR_OK)
3496 return retval;
3498 if (target->state != TARGET_HALTED)
3500 command_print(cmd_ctx, "target must be stopped for \"%s\" command", CMD_NAME);
3501 return ERROR_OK;
3503 uint32_t reg_no = 0;
3504 struct reg *reg = NULL;
3505 if (argc > 0)
3507 COMMAND_PARSE_NUMBER(u32, args[0], reg_no);
3508 /*translate from xscale cp15 register no to openocd register*/
3509 switch (reg_no)
3511 case 0:
3512 reg_no = XSCALE_MAINID;
3513 break;
3514 case 1:
3515 reg_no = XSCALE_CTRL;
3516 break;
3517 case 2:
3518 reg_no = XSCALE_TTB;
3519 break;
3520 case 3:
3521 reg_no = XSCALE_DAC;
3522 break;
3523 case 5:
3524 reg_no = XSCALE_FSR;
3525 break;
3526 case 6:
3527 reg_no = XSCALE_FAR;
3528 break;
3529 case 13:
3530 reg_no = XSCALE_PID;
3531 break;
3532 case 15:
3533 reg_no = XSCALE_CPACCESS;
3534 break;
3535 default:
3536 command_print(cmd_ctx, "invalid register number");
3537 return ERROR_INVALID_ARGUMENTS;
3539 reg = &xscale->reg_cache->reg_list[reg_no];
3542 if (argc == 1)
3544 uint32_t value;
3546 /* read cp15 control register */
3547 xscale_get_reg(reg);
3548 value = buf_get_u32(reg->value, 0, 32);
3549 command_print(cmd_ctx, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3551 else if (argc == 2)
3553 uint32_t value;
3554 COMMAND_PARSE_NUMBER(u32, args[1], value);
3556 /* send CP write request (command 0x41) */
3557 xscale_send_u32(target, 0x41);
3559 /* send CP register number */
3560 xscale_send_u32(target, reg_no);
3562 /* send CP register value */
3563 xscale_send_u32(target, value);
3565 /* execute cpwait to ensure outstanding operations complete */
3566 xscale_send_u32(target, 0x53);
3568 else
3570 command_print(cmd_ctx, "usage: cp15 [register]<, [value]>");
3573 return ERROR_OK;
3576 static int xscale_register_commands(struct command_context *cmd_ctx)
3578 struct command *xscale_cmd;
3580 xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
3582 register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
3583 register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
3585 register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
3586 register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
3587 register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
3588 register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
3590 register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_vector_catch_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
3591 register_command(cmd_ctx, xscale_cmd, "vector_table", xscale_handle_vector_table_command, COMMAND_EXEC, "<high|low> <index> <code> set static code for exception handler entry");
3593 register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable | disable> ['fill' [n]|'wrap']");
3595 register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
3596 register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
3597 register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
3598 COMMAND_EXEC, "load image from <file> [base address]");
3600 register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
3602 armv4_5_register_commands(cmd_ctx);
3604 return ERROR_OK;
3607 struct target_type xscale_target =
3609 .name = "xscale",
3611 .poll = xscale_poll,
3612 .arch_state = xscale_arch_state,
3614 .target_request_data = NULL,
3616 .halt = xscale_halt,
3617 .resume = xscale_resume,
3618 .step = xscale_step,
3620 .assert_reset = xscale_assert_reset,
3621 .deassert_reset = xscale_deassert_reset,
3622 .soft_reset_halt = NULL,
3624 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
3626 .read_memory = xscale_read_memory,
3627 .write_memory = xscale_write_memory,
3628 .bulk_write_memory = xscale_bulk_write_memory,
3630 .checksum_memory = arm_checksum_memory,
3631 .blank_check_memory = arm_blank_check_memory,
3633 .run_algorithm = armv4_5_run_algorithm,
3635 .add_breakpoint = xscale_add_breakpoint,
3636 .remove_breakpoint = xscale_remove_breakpoint,
3637 .add_watchpoint = xscale_add_watchpoint,
3638 .remove_watchpoint = xscale_remove_watchpoint,
3640 .register_commands = xscale_register_commands,
3641 .target_create = xscale_target_create,
3642 .init_target = xscale_init_target,
3644 .virt2phys = xscale_virt2phys,
3645 .mmu = xscale_mmu