ARM: rework "arm reg" output for new mode
[openocd/ztw.git] / src / target / xscale.c
blob09e68254aba92b4e351083a6dfa179f32669512b
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include "time_support.h"
37 #include "register.h"
38 #include "image.h"
42 * Important XScale documents available as of October 2009 include:
44 * Intel XScale® Core Developer’s Manual, January 2004
45 * Order Number: 273473-002
46 * This has a chapter detailing debug facilities, and punts some
47 * details to chip-specific microarchitecture documents.
49 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
50 * Document Number: 273539-005
51 * Less detailed than the developer's manual, but summarizes those
52 * missing details (for most XScales) and gives LOTS of notes about
53 * debugger/handler interaction issues. Presents a simpler reset
54 * and load-handler sequence than the arch doc. (Note, OpenOCD
55 * doesn't currently support "Hot-Debug" as defined there.)
57 * Chip-specific microarchitecture documents may also be useful.
61 /* forward declarations */
62 static int xscale_resume(struct target *, int current,
63 uint32_t address, int handle_breakpoints, int debug_execution);
64 static int xscale_debug_entry(struct target *);
65 static int xscale_restore_context(struct target *);
66 static int xscale_get_reg(struct reg *reg);
67 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
68 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
69 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
70 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_read_trace(struct target *);
74 /* This XScale "debug handler" is loaded into the processor's
75 * mini-ICache, which is 2K of code writable only via JTAG.
77 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
78 * binary files cleanly. It's string oriented, and terminates them
79 * with a NUL character. Better would be to generate the constants
80 * and let other code decide names, scoping, and other housekeeping.
82 static /* unsigned const char xscale_debug_handler[] = ... */
83 #include "xscale_debug.h"
85 static char *const xscale_reg_list[] =
87 "XSCALE_MAINID", /* 0 */
88 "XSCALE_CACHETYPE",
89 "XSCALE_CTRL",
90 "XSCALE_AUXCTRL",
91 "XSCALE_TTB",
92 "XSCALE_DAC",
93 "XSCALE_FSR",
94 "XSCALE_FAR",
95 "XSCALE_PID",
96 "XSCALE_CPACCESS",
97 "XSCALE_IBCR0", /* 10 */
98 "XSCALE_IBCR1",
99 "XSCALE_DBR0",
100 "XSCALE_DBR1",
101 "XSCALE_DBCON",
102 "XSCALE_TBREG",
103 "XSCALE_CHKPT0",
104 "XSCALE_CHKPT1",
105 "XSCALE_DCSR",
106 "XSCALE_TX",
107 "XSCALE_RX", /* 20 */
108 "XSCALE_TXRXCTRL",
111 static const struct xscale_reg xscale_reg_arch_info[] =
113 {XSCALE_MAINID, NULL},
114 {XSCALE_CACHETYPE, NULL},
115 {XSCALE_CTRL, NULL},
116 {XSCALE_AUXCTRL, NULL},
117 {XSCALE_TTB, NULL},
118 {XSCALE_DAC, NULL},
119 {XSCALE_FSR, NULL},
120 {XSCALE_FAR, NULL},
121 {XSCALE_PID, NULL},
122 {XSCALE_CPACCESS, NULL},
123 {XSCALE_IBCR0, NULL},
124 {XSCALE_IBCR1, NULL},
125 {XSCALE_DBR0, NULL},
126 {XSCALE_DBR1, NULL},
127 {XSCALE_DBCON, NULL},
128 {XSCALE_TBREG, NULL},
129 {XSCALE_CHKPT0, NULL},
130 {XSCALE_CHKPT1, NULL},
131 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
132 {-1, NULL}, /* TX accessed via JTAG */
133 {-1, NULL}, /* RX accessed via JTAG */
134 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
137 /* convenience wrapper to access XScale specific registers */
138 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
140 uint8_t buf[4];
142 buf_set_u32(buf, 0, 32, value);
144 return xscale_set_reg(reg, buf);
147 static const char xscale_not[] = "target is not an XScale";
149 static int xscale_verify_pointer(struct command_context *cmd_ctx,
150 struct xscale_common *xscale)
152 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
153 command_print(cmd_ctx, xscale_not);
154 return ERROR_TARGET_INVALID;
156 return ERROR_OK;
159 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
161 if (tap == NULL)
162 return ERROR_FAIL;
164 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
166 struct scan_field field;
167 uint8_t scratch[4];
169 memset(&field, 0, sizeof field);
170 field.tap = tap;
171 field.num_bits = tap->ir_length;
172 field.out_value = scratch;
173 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
175 jtag_add_ir_scan(1, &field, jtag_get_end_state());
178 return ERROR_OK;
181 static int xscale_read_dcsr(struct target *target)
183 struct xscale_common *xscale = target_to_xscale(target);
184 int retval;
185 struct scan_field fields[3];
186 uint8_t field0 = 0x0;
187 uint8_t field0_check_value = 0x2;
188 uint8_t field0_check_mask = 0x7;
189 uint8_t field2 = 0x0;
190 uint8_t field2_check_value = 0x0;
191 uint8_t field2_check_mask = 0x1;
193 jtag_set_end_state(TAP_DRPAUSE);
194 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
196 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
197 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
199 memset(&fields, 0, sizeof fields);
201 fields[0].tap = target->tap;
202 fields[0].num_bits = 3;
203 fields[0].out_value = &field0;
204 uint8_t tmp;
205 fields[0].in_value = &tmp;
207 fields[1].tap = target->tap;
208 fields[1].num_bits = 32;
209 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
211 fields[2].tap = target->tap;
212 fields[2].num_bits = 1;
213 fields[2].out_value = &field2;
214 uint8_t tmp2;
215 fields[2].in_value = &tmp2;
217 jtag_add_dr_scan(3, fields, jtag_get_end_state());
219 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
220 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
222 if ((retval = jtag_execute_queue()) != ERROR_OK)
224 LOG_ERROR("JTAG error while reading DCSR");
225 return retval;
228 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
229 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
231 /* write the register with the value we just read
232 * on this second pass, only the first bit of field0 is guaranteed to be 0)
234 field0_check_mask = 0x1;
235 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
236 fields[1].in_value = NULL;
238 jtag_set_end_state(TAP_IDLE);
240 jtag_add_dr_scan(3, fields, jtag_get_end_state());
242 /* DANGER!!! this must be here. It will make sure that the arguments
243 * to jtag_set_check_value() does not go out of scope! */
244 return jtag_execute_queue();
248 static void xscale_getbuf(jtag_callback_data_t arg)
250 uint8_t *in = (uint8_t *)arg;
251 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
254 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
256 if (num_words == 0)
257 return ERROR_INVALID_ARGUMENTS;
259 int retval = ERROR_OK;
260 tap_state_t path[3];
261 struct scan_field fields[3];
262 uint8_t *field0 = malloc(num_words * 1);
263 uint8_t field0_check_value = 0x2;
264 uint8_t field0_check_mask = 0x6;
265 uint32_t *field1 = malloc(num_words * 4);
266 uint8_t field2_check_value = 0x0;
267 uint8_t field2_check_mask = 0x1;
268 int words_done = 0;
269 int words_scheduled = 0;
270 int i;
272 path[0] = TAP_DRSELECT;
273 path[1] = TAP_DRCAPTURE;
274 path[2] = TAP_DRSHIFT;
276 memset(&fields, 0, sizeof fields);
278 fields[0].tap = target->tap;
279 fields[0].num_bits = 3;
280 fields[0].check_value = &field0_check_value;
281 fields[0].check_mask = &field0_check_mask;
283 fields[1].tap = target->tap;
284 fields[1].num_bits = 32;
286 fields[2].tap = target->tap;
287 fields[2].num_bits = 1;
288 fields[2].check_value = &field2_check_value;
289 fields[2].check_mask = &field2_check_mask;
291 jtag_set_end_state(TAP_IDLE);
292 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
293 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
295 /* repeat until all words have been collected */
296 int attempts = 0;
297 while (words_done < num_words)
299 /* schedule reads */
300 words_scheduled = 0;
301 for (i = words_done; i < num_words; i++)
303 fields[0].in_value = &field0[i];
305 jtag_add_pathmove(3, path);
307 fields[1].in_value = (uint8_t *)(field1 + i);
309 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
311 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
313 words_scheduled++;
316 if ((retval = jtag_execute_queue()) != ERROR_OK)
318 LOG_ERROR("JTAG error while receiving data from debug handler");
319 break;
322 /* examine results */
323 for (i = words_done; i < num_words; i++)
325 if (!(field0[0] & 1))
327 /* move backwards if necessary */
328 int j;
329 for (j = i; j < num_words - 1; j++)
331 field0[j] = field0[j + 1];
332 field1[j] = field1[j + 1];
334 words_scheduled--;
337 if (words_scheduled == 0)
339 if (attempts++==1000)
341 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
342 retval = ERROR_TARGET_TIMEOUT;
343 break;
347 words_done += words_scheduled;
350 for (i = 0; i < num_words; i++)
351 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
353 free(field1);
355 return retval;
358 static int xscale_read_tx(struct target *target, int consume)
360 struct xscale_common *xscale = target_to_xscale(target);
361 tap_state_t path[3];
362 tap_state_t noconsume_path[6];
363 int retval;
364 struct timeval timeout, now;
365 struct scan_field fields[3];
366 uint8_t field0_in = 0x0;
367 uint8_t field0_check_value = 0x2;
368 uint8_t field0_check_mask = 0x6;
369 uint8_t field2_check_value = 0x0;
370 uint8_t field2_check_mask = 0x1;
372 jtag_set_end_state(TAP_IDLE);
374 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
376 path[0] = TAP_DRSELECT;
377 path[1] = TAP_DRCAPTURE;
378 path[2] = TAP_DRSHIFT;
380 noconsume_path[0] = TAP_DRSELECT;
381 noconsume_path[1] = TAP_DRCAPTURE;
382 noconsume_path[2] = TAP_DREXIT1;
383 noconsume_path[3] = TAP_DRPAUSE;
384 noconsume_path[4] = TAP_DREXIT2;
385 noconsume_path[5] = TAP_DRSHIFT;
387 memset(&fields, 0, sizeof fields);
389 fields[0].tap = target->tap;
390 fields[0].num_bits = 3;
391 fields[0].in_value = &field0_in;
393 fields[1].tap = target->tap;
394 fields[1].num_bits = 32;
395 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
397 fields[2].tap = target->tap;
398 fields[2].num_bits = 1;
399 uint8_t tmp;
400 fields[2].in_value = &tmp;
402 gettimeofday(&timeout, NULL);
403 timeval_add_time(&timeout, 1, 0);
405 for (;;)
407 /* if we want to consume the register content (i.e. clear TX_READY),
408 * we have to go straight from Capture-DR to Shift-DR
409 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
411 if (consume)
412 jtag_add_pathmove(3, path);
413 else
415 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
418 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
420 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
421 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
423 if ((retval = jtag_execute_queue()) != ERROR_OK)
425 LOG_ERROR("JTAG error while reading TX");
426 return ERROR_TARGET_TIMEOUT;
429 gettimeofday(&now, NULL);
430 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
432 LOG_ERROR("time out reading TX register");
433 return ERROR_TARGET_TIMEOUT;
435 if (!((!(field0_in & 1)) && consume))
437 goto done;
439 if (debug_level >= 3)
441 LOG_DEBUG("waiting 100ms");
442 alive_sleep(100); /* avoid flooding the logs */
443 } else
445 keep_alive();
448 done:
450 if (!(field0_in & 1))
451 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
453 return ERROR_OK;
456 static int xscale_write_rx(struct target *target)
458 struct xscale_common *xscale = target_to_xscale(target);
459 int retval;
460 struct timeval timeout, now;
461 struct scan_field fields[3];
462 uint8_t field0_out = 0x0;
463 uint8_t field0_in = 0x0;
464 uint8_t field0_check_value = 0x2;
465 uint8_t field0_check_mask = 0x6;
466 uint8_t field2 = 0x0;
467 uint8_t field2_check_value = 0x0;
468 uint8_t field2_check_mask = 0x1;
470 jtag_set_end_state(TAP_IDLE);
472 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
474 memset(&fields, 0, sizeof fields);
476 fields[0].tap = target->tap;
477 fields[0].num_bits = 3;
478 fields[0].out_value = &field0_out;
479 fields[0].in_value = &field0_in;
481 fields[1].tap = target->tap;
482 fields[1].num_bits = 32;
483 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
485 fields[2].tap = target->tap;
486 fields[2].num_bits = 1;
487 fields[2].out_value = &field2;
488 uint8_t tmp;
489 fields[2].in_value = &tmp;
491 gettimeofday(&timeout, NULL);
492 timeval_add_time(&timeout, 1, 0);
494 /* poll until rx_read is low */
495 LOG_DEBUG("polling RX");
496 for (;;)
498 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
500 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
501 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
503 if ((retval = jtag_execute_queue()) != ERROR_OK)
505 LOG_ERROR("JTAG error while writing RX");
506 return retval;
509 gettimeofday(&now, NULL);
510 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
512 LOG_ERROR("time out writing RX register");
513 return ERROR_TARGET_TIMEOUT;
515 if (!(field0_in & 1))
516 goto done;
517 if (debug_level >= 3)
519 LOG_DEBUG("waiting 100ms");
520 alive_sleep(100); /* avoid flooding the logs */
521 } else
523 keep_alive();
526 done:
528 /* set rx_valid */
529 field2 = 0x1;
530 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
532 if ((retval = jtag_execute_queue()) != ERROR_OK)
534 LOG_ERROR("JTAG error while writing RX");
535 return retval;
538 return ERROR_OK;
541 /* send count elements of size byte to the debug handler */
542 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
544 uint32_t t[3];
545 int bits[3];
546 int retval;
547 int done_count = 0;
549 jtag_set_end_state(TAP_IDLE);
551 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
553 bits[0]=3;
554 t[0]=0;
555 bits[1]=32;
556 t[2]=1;
557 bits[2]=1;
558 int endianness = target->endianness;
559 while (done_count++ < count)
561 switch (size)
563 case 4:
564 if (endianness == TARGET_LITTLE_ENDIAN)
566 t[1]=le_to_h_u32(buffer);
567 } else
569 t[1]=be_to_h_u32(buffer);
571 break;
572 case 2:
573 if (endianness == TARGET_LITTLE_ENDIAN)
575 t[1]=le_to_h_u16(buffer);
576 } else
578 t[1]=be_to_h_u16(buffer);
580 break;
581 case 1:
582 t[1]=buffer[0];
583 break;
584 default:
585 LOG_ERROR("BUG: size neither 4, 2 nor 1");
586 return ERROR_INVALID_ARGUMENTS;
588 jtag_add_dr_out(target->tap,
590 bits,
592 jtag_set_end_state(TAP_IDLE));
593 buffer += size;
596 if ((retval = jtag_execute_queue()) != ERROR_OK)
598 LOG_ERROR("JTAG error while sending data to debug handler");
599 return retval;
602 return ERROR_OK;
605 static int xscale_send_u32(struct target *target, uint32_t value)
607 struct xscale_common *xscale = target_to_xscale(target);
609 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
610 return xscale_write_rx(target);
613 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
615 struct xscale_common *xscale = target_to_xscale(target);
616 int retval;
617 struct scan_field fields[3];
618 uint8_t field0 = 0x0;
619 uint8_t field0_check_value = 0x2;
620 uint8_t field0_check_mask = 0x7;
621 uint8_t field2 = 0x0;
622 uint8_t field2_check_value = 0x0;
623 uint8_t field2_check_mask = 0x1;
625 if (hold_rst != -1)
626 xscale->hold_rst = hold_rst;
628 if (ext_dbg_brk != -1)
629 xscale->external_debug_break = ext_dbg_brk;
631 jtag_set_end_state(TAP_IDLE);
632 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
634 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
635 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
637 memset(&fields, 0, sizeof fields);
639 fields[0].tap = target->tap;
640 fields[0].num_bits = 3;
641 fields[0].out_value = &field0;
642 uint8_t tmp;
643 fields[0].in_value = &tmp;
645 fields[1].tap = target->tap;
646 fields[1].num_bits = 32;
647 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
649 fields[2].tap = target->tap;
650 fields[2].num_bits = 1;
651 fields[2].out_value = &field2;
652 uint8_t tmp2;
653 fields[2].in_value = &tmp2;
655 jtag_add_dr_scan(3, fields, jtag_get_end_state());
657 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
658 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
660 if ((retval = jtag_execute_queue()) != ERROR_OK)
662 LOG_ERROR("JTAG error while writing DCSR");
663 return retval;
666 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
667 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
669 return ERROR_OK;
672 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
673 static unsigned int parity (unsigned int v)
675 // unsigned int ov = v;
676 v ^= v >> 16;
677 v ^= v >> 8;
678 v ^= v >> 4;
679 v &= 0xf;
680 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
681 return (0x6996 >> v) & 1;
684 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
686 uint8_t packet[4];
687 uint8_t cmd;
688 int word;
689 struct scan_field fields[2];
691 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
693 /* LDIC into IR */
694 jtag_set_end_state(TAP_IDLE);
695 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
697 /* CMD is b011 to load a cacheline into the Mini ICache.
698 * Loading into the main ICache is deprecated, and unused.
699 * It's followed by three zero bits, and 27 address bits.
701 buf_set_u32(&cmd, 0, 6, 0x3);
703 /* virtual address of desired cache line */
704 buf_set_u32(packet, 0, 27, va >> 5);
706 memset(&fields, 0, sizeof fields);
708 fields[0].tap = target->tap;
709 fields[0].num_bits = 6;
710 fields[0].out_value = &cmd;
712 fields[1].tap = target->tap;
713 fields[1].num_bits = 27;
714 fields[1].out_value = packet;
716 jtag_add_dr_scan(2, fields, jtag_get_end_state());
718 /* rest of packet is a cacheline: 8 instructions, with parity */
719 fields[0].num_bits = 32;
720 fields[0].out_value = packet;
722 fields[1].num_bits = 1;
723 fields[1].out_value = &cmd;
725 for (word = 0; word < 8; word++)
727 buf_set_u32(packet, 0, 32, buffer[word]);
729 uint32_t value;
730 memcpy(&value, packet, sizeof(uint32_t));
731 cmd = parity(value);
733 jtag_add_dr_scan(2, fields, jtag_get_end_state());
736 return jtag_execute_queue();
739 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
741 uint8_t packet[4];
742 uint8_t cmd;
743 struct scan_field fields[2];
745 jtag_set_end_state(TAP_IDLE);
746 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
748 /* CMD for invalidate IC line b000, bits [6:4] b000 */
749 buf_set_u32(&cmd, 0, 6, 0x0);
751 /* virtual address of desired cache line */
752 buf_set_u32(packet, 0, 27, va >> 5);
754 memset(&fields, 0, sizeof fields);
756 fields[0].tap = target->tap;
757 fields[0].num_bits = 6;
758 fields[0].out_value = &cmd;
760 fields[1].tap = target->tap;
761 fields[1].num_bits = 27;
762 fields[1].out_value = packet;
764 jtag_add_dr_scan(2, fields, jtag_get_end_state());
766 return ERROR_OK;
769 static int xscale_update_vectors(struct target *target)
771 struct xscale_common *xscale = target_to_xscale(target);
772 int i;
773 int retval;
775 uint32_t low_reset_branch, high_reset_branch;
777 for (i = 1; i < 8; i++)
779 /* if there's a static vector specified for this exception, override */
780 if (xscale->static_high_vectors_set & (1 << i))
782 xscale->high_vectors[i] = xscale->static_high_vectors[i];
784 else
786 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
787 if (retval == ERROR_TARGET_TIMEOUT)
788 return retval;
789 if (retval != ERROR_OK)
791 /* Some of these reads will fail as part of normal execution */
792 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
797 for (i = 1; i < 8; i++)
799 if (xscale->static_low_vectors_set & (1 << i))
801 xscale->low_vectors[i] = xscale->static_low_vectors[i];
803 else
805 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
806 if (retval == ERROR_TARGET_TIMEOUT)
807 return retval;
808 if (retval != ERROR_OK)
810 /* Some of these reads will fail as part of normal execution */
811 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
816 /* calculate branches to debug handler */
817 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
818 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
820 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
821 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
823 /* invalidate and load exception vectors in mini i-cache */
824 xscale_invalidate_ic_line(target, 0x0);
825 xscale_invalidate_ic_line(target, 0xffff0000);
827 xscale_load_ic(target, 0x0, xscale->low_vectors);
828 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
830 return ERROR_OK;
833 static int xscale_arch_state(struct target *target)
835 struct xscale_common *xscale = target_to_xscale(target);
836 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
838 static const char *state[] =
840 "disabled", "enabled"
843 static const char *arch_dbg_reason[] =
845 "", "\n(processor reset)", "\n(trace buffer full)"
848 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
850 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
851 return ERROR_INVALID_ARGUMENTS;
854 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
855 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
856 "MMU: %s, D-Cache: %s, I-Cache: %s"
857 "%s",
858 armv4_5_state_strings[armv4_5->core_state],
859 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name ,
860 arm_mode_name(armv4_5->core_mode),
861 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
862 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
863 state[xscale->armv4_5_mmu.mmu_enabled],
864 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
865 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
866 arch_dbg_reason[xscale->arch_debug_reason]);
868 return ERROR_OK;
871 static int xscale_poll(struct target *target)
873 int retval = ERROR_OK;
875 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
877 enum target_state previous_state = target->state;
878 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
881 /* there's data to read from the tx register, we entered debug state */
882 target->state = TARGET_HALTED;
884 /* process debug entry, fetching current mode regs */
885 retval = xscale_debug_entry(target);
887 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
889 LOG_USER("error while polling TX register, reset CPU");
890 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
891 target->state = TARGET_HALTED;
894 /* debug_entry could have overwritten target state (i.e. immediate resume)
895 * don't signal event handlers in that case
897 if (target->state != TARGET_HALTED)
898 return ERROR_OK;
900 /* if target was running, signal that we halted
901 * otherwise we reentered from debug execution */
902 if (previous_state == TARGET_RUNNING)
903 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
904 else
905 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
908 return retval;
911 static int xscale_debug_entry(struct target *target)
913 struct xscale_common *xscale = target_to_xscale(target);
914 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
915 uint32_t pc;
916 uint32_t buffer[10];
917 int i;
918 int retval;
919 uint32_t moe;
921 /* clear external dbg break (will be written on next DCSR read) */
922 xscale->external_debug_break = 0;
923 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
924 return retval;
926 /* get r0, pc, r1 to r7 and cpsr */
927 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
928 return retval;
930 /* move r0 from buffer to register cache */
931 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
932 armv4_5->core_cache->reg_list[0].dirty = 1;
933 armv4_5->core_cache->reg_list[0].valid = 1;
934 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
936 /* move pc from buffer to register cache */
937 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
938 armv4_5->core_cache->reg_list[15].dirty = 1;
939 armv4_5->core_cache->reg_list[15].valid = 1;
940 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
942 /* move data from buffer to register cache */
943 for (i = 1; i <= 7; i++)
945 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
946 armv4_5->core_cache->reg_list[i].dirty = 1;
947 armv4_5->core_cache->reg_list[i].valid = 1;
948 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
951 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
952 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
953 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
954 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
956 armv4_5->core_mode = buffer[9] & 0x1f;
957 if (!is_arm_mode(armv4_5->core_mode))
959 target->state = TARGET_UNKNOWN;
960 LOG_ERROR("cpsr contains invalid mode value - communication failure");
961 return ERROR_TARGET_FAILURE;
963 LOG_DEBUG("target entered debug state in %s mode",
964 arm_mode_name(armv4_5->core_mode));
966 if (buffer[9] & 0x20)
967 armv4_5->core_state = ARMV4_5_STATE_THUMB;
968 else
969 armv4_5->core_state = ARMV4_5_STATE_ARM;
972 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
973 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
975 xscale_receive(target, buffer, 8);
976 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
977 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
978 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
980 else
982 /* r8 to r14, but no spsr */
983 xscale_receive(target, buffer, 7);
986 /* move data from buffer to register cache */
987 for (i = 8; i <= 14; i++)
989 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
990 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
991 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
994 /* examine debug reason */
995 xscale_read_dcsr(target);
996 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
998 /* stored PC (for calculating fixup) */
999 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1001 switch (moe)
1003 case 0x0: /* Processor reset */
1004 target->debug_reason = DBG_REASON_DBGRQ;
1005 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1006 pc -= 4;
1007 break;
1008 case 0x1: /* Instruction breakpoint hit */
1009 target->debug_reason = DBG_REASON_BREAKPOINT;
1010 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1011 pc -= 4;
1012 break;
1013 case 0x2: /* Data breakpoint hit */
1014 target->debug_reason = DBG_REASON_WATCHPOINT;
1015 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1016 pc -= 4;
1017 break;
1018 case 0x3: /* BKPT instruction executed */
1019 target->debug_reason = DBG_REASON_BREAKPOINT;
1020 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1021 pc -= 4;
1022 break;
1023 case 0x4: /* Ext. debug event */
1024 target->debug_reason = DBG_REASON_DBGRQ;
1025 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1026 pc -= 4;
1027 break;
1028 case 0x5: /* Vector trap occured */
1029 target->debug_reason = DBG_REASON_BREAKPOINT;
1030 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1031 pc -= 4;
1032 break;
1033 case 0x6: /* Trace buffer full break */
1034 target->debug_reason = DBG_REASON_DBGRQ;
1035 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1036 pc -= 4;
1037 break;
1038 case 0x7: /* Reserved (may flag Hot-Debug support) */
1039 default:
1040 LOG_ERROR("Method of Entry is 'Reserved'");
1041 exit(-1);
1042 break;
1045 /* apply PC fixup */
1046 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1048 /* on the first debug entry, identify cache type */
1049 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1051 uint32_t cache_type_reg;
1053 /* read cp15 cache type register */
1054 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1055 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1057 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1060 /* examine MMU and Cache settings */
1061 /* read cp15 control register */
1062 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1063 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1064 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1065 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1066 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1068 /* tracing enabled, read collected trace data */
1069 if (xscale->trace.buffer_enabled)
1071 xscale_read_trace(target);
1072 xscale->trace.buffer_fill--;
1074 /* resume if we're still collecting trace data */
1075 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1076 && (xscale->trace.buffer_fill > 0))
1078 xscale_resume(target, 1, 0x0, 1, 0);
1080 else
1082 xscale->trace.buffer_enabled = 0;
1086 return ERROR_OK;
1089 static int xscale_halt(struct target *target)
1091 struct xscale_common *xscale = target_to_xscale(target);
1093 LOG_DEBUG("target->state: %s",
1094 target_state_name(target));
1096 if (target->state == TARGET_HALTED)
1098 LOG_DEBUG("target was already halted");
1099 return ERROR_OK;
1101 else if (target->state == TARGET_UNKNOWN)
1103 /* this must not happen for a xscale target */
1104 LOG_ERROR("target was in unknown state when halt was requested");
1105 return ERROR_TARGET_INVALID;
1107 else if (target->state == TARGET_RESET)
1109 LOG_DEBUG("target->state == TARGET_RESET");
1111 else
1113 /* assert external dbg break */
1114 xscale->external_debug_break = 1;
1115 xscale_read_dcsr(target);
1117 target->debug_reason = DBG_REASON_DBGRQ;
1120 return ERROR_OK;
1123 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1125 struct xscale_common *xscale = target_to_xscale(target);
1126 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1127 int retval;
1129 if (xscale->ibcr0_used)
1131 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1133 if (ibcr0_bp)
1135 xscale_unset_breakpoint(target, ibcr0_bp);
1137 else
1139 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1140 exit(-1);
1144 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1145 return retval;
1147 return ERROR_OK;
1150 static int xscale_disable_single_step(struct target *target)
1152 struct xscale_common *xscale = target_to_xscale(target);
1153 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1154 int retval;
1156 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1157 return retval;
1159 return ERROR_OK;
1162 static void xscale_enable_watchpoints(struct target *target)
1164 struct watchpoint *watchpoint = target->watchpoints;
1166 while (watchpoint)
1168 if (watchpoint->set == 0)
1169 xscale_set_watchpoint(target, watchpoint);
1170 watchpoint = watchpoint->next;
1174 static void xscale_enable_breakpoints(struct target *target)
1176 struct breakpoint *breakpoint = target->breakpoints;
1178 /* set any pending breakpoints */
1179 while (breakpoint)
1181 if (breakpoint->set == 0)
1182 xscale_set_breakpoint(target, breakpoint);
1183 breakpoint = breakpoint->next;
1187 static int xscale_resume(struct target *target, int current,
1188 uint32_t address, int handle_breakpoints, int debug_execution)
1190 struct xscale_common *xscale = target_to_xscale(target);
1191 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
1192 struct breakpoint *breakpoint = target->breakpoints;
1193 uint32_t current_pc;
1194 int retval;
1195 int i;
1197 LOG_DEBUG("-");
1199 if (target->state != TARGET_HALTED)
1201 LOG_WARNING("target not halted");
1202 return ERROR_TARGET_NOT_HALTED;
1205 if (!debug_execution)
1207 target_free_all_working_areas(target);
1210 /* update vector tables */
1211 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1212 return retval;
1214 /* current = 1: continue on current pc, otherwise continue at <address> */
1215 if (!current)
1216 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1218 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1220 /* if we're at the reset vector, we have to simulate the branch */
1221 if (current_pc == 0x0)
1223 arm_simulate_step(target, NULL);
1224 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1227 /* the front-end may request us not to handle breakpoints */
1228 if (handle_breakpoints)
1230 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1232 uint32_t next_pc;
1234 /* there's a breakpoint at the current PC, we have to step over it */
1235 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1236 xscale_unset_breakpoint(target, breakpoint);
1238 /* calculate PC of next instruction */
1239 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1241 uint32_t current_opcode;
1242 target_read_u32(target, current_pc, &current_opcode);
1243 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1246 LOG_DEBUG("enable single-step");
1247 xscale_enable_single_step(target, next_pc);
1249 /* restore banked registers */
1250 xscale_restore_context(target);
1252 /* send resume request (command 0x30 or 0x31)
1253 * clean the trace buffer if it is to be enabled (0x62) */
1254 if (xscale->trace.buffer_enabled)
1256 xscale_send_u32(target, 0x62);
1257 xscale_send_u32(target, 0x31);
1259 else
1260 xscale_send_u32(target, 0x30);
1262 /* send CPSR */
1263 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1264 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1266 for (i = 7; i >= 0; i--)
1268 /* send register */
1269 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1270 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1273 /* send PC */
1274 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1275 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1277 /* wait for and process debug entry */
1278 xscale_debug_entry(target);
1280 LOG_DEBUG("disable single-step");
1281 xscale_disable_single_step(target);
1283 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1284 xscale_set_breakpoint(target, breakpoint);
1288 /* enable any pending breakpoints and watchpoints */
1289 xscale_enable_breakpoints(target);
1290 xscale_enable_watchpoints(target);
1292 /* restore banked registers */
1293 xscale_restore_context(target);
1295 /* send resume request (command 0x30 or 0x31)
1296 * clean the trace buffer if it is to be enabled (0x62) */
1297 if (xscale->trace.buffer_enabled)
1299 xscale_send_u32(target, 0x62);
1300 xscale_send_u32(target, 0x31);
1302 else
1303 xscale_send_u32(target, 0x30);
1305 /* send CPSR */
1306 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1307 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1309 for (i = 7; i >= 0; i--)
1311 /* send register */
1312 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1313 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1316 /* send PC */
1317 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1318 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1320 target->debug_reason = DBG_REASON_NOTHALTED;
1322 if (!debug_execution)
1324 /* registers are now invalid */
1325 armv4_5_invalidate_core_regs(target);
1326 target->state = TARGET_RUNNING;
1327 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1329 else
1331 target->state = TARGET_DEBUG_RUNNING;
1332 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1335 LOG_DEBUG("target resumed");
1337 return ERROR_OK;
1340 static int xscale_step_inner(struct target *target, int current,
1341 uint32_t address, int handle_breakpoints)
1343 struct xscale_common *xscale = target_to_xscale(target);
1344 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
1345 uint32_t next_pc;
1346 int retval;
1347 int i;
1349 target->debug_reason = DBG_REASON_SINGLESTEP;
1351 /* calculate PC of next instruction */
1352 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1354 uint32_t current_opcode, current_pc;
1355 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1357 target_read_u32(target, current_pc, &current_opcode);
1358 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1359 return retval;
1362 LOG_DEBUG("enable single-step");
1363 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1364 return retval;
1366 /* restore banked registers */
1367 if ((retval = xscale_restore_context(target)) != ERROR_OK)
1368 return retval;
1370 /* send resume request (command 0x30 or 0x31)
1371 * clean the trace buffer if it is to be enabled (0x62) */
1372 if (xscale->trace.buffer_enabled)
1374 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1375 return retval;
1376 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1377 return retval;
1379 else
1380 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1381 return retval;
1383 /* send CPSR */
1384 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32))) != ERROR_OK)
1385 return retval;
1386 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1388 for (i = 7; i >= 0; i--)
1390 /* send register */
1391 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1392 return retval;
1393 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1396 /* send PC */
1397 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
1398 return retval;
1399 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1401 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1403 /* registers are now invalid */
1404 if ((retval = armv4_5_invalidate_core_regs(target)) != ERROR_OK)
1405 return retval;
1407 /* wait for and process debug entry */
1408 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1409 return retval;
1411 LOG_DEBUG("disable single-step");
1412 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1413 return retval;
1415 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1417 return ERROR_OK;
1420 static int xscale_step(struct target *target, int current,
1421 uint32_t address, int handle_breakpoints)
1423 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1424 struct breakpoint *breakpoint = target->breakpoints;
1426 uint32_t current_pc;
1427 int retval;
1429 if (target->state != TARGET_HALTED)
1431 LOG_WARNING("target not halted");
1432 return ERROR_TARGET_NOT_HALTED;
1435 /* current = 1: continue on current pc, otherwise continue at <address> */
1436 if (!current)
1437 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1439 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1441 /* if we're at the reset vector, we have to simulate the step */
1442 if (current_pc == 0x0)
1444 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1445 return retval;
1446 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1448 target->debug_reason = DBG_REASON_SINGLESTEP;
1449 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1451 return ERROR_OK;
1454 /* the front-end may request us not to handle breakpoints */
1455 if (handle_breakpoints)
1456 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1458 if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
1459 return retval;
1462 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1464 if (breakpoint)
1466 xscale_set_breakpoint(target, breakpoint);
1469 LOG_DEBUG("target stepped");
1471 return ERROR_OK;
1475 static int xscale_assert_reset(struct target *target)
1477 struct xscale_common *xscale = target_to_xscale(target);
1479 LOG_DEBUG("target->state: %s",
1480 target_state_name(target));
1482 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1483 * end up in T-L-R, which would reset JTAG
1485 jtag_set_end_state(TAP_IDLE);
1486 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
1488 /* set Hold reset, Halt mode and Trap Reset */
1489 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1490 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1491 xscale_write_dcsr(target, 1, 0);
1493 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1494 xscale_jtag_set_instr(target->tap, 0x7f);
1495 jtag_execute_queue();
1497 /* assert reset */
1498 jtag_add_reset(0, 1);
1500 /* sleep 1ms, to be sure we fulfill any requirements */
1501 jtag_add_sleep(1000);
1502 jtag_execute_queue();
1504 target->state = TARGET_RESET;
1506 if (target->reset_halt)
1508 int retval;
1509 if ((retval = target_halt(target)) != ERROR_OK)
1510 return retval;
1513 return ERROR_OK;
1516 static int xscale_deassert_reset(struct target *target)
1518 struct xscale_common *xscale = target_to_xscale(target);
1519 struct breakpoint *breakpoint = target->breakpoints;
1521 LOG_DEBUG("-");
1523 xscale->ibcr_available = 2;
1524 xscale->ibcr0_used = 0;
1525 xscale->ibcr1_used = 0;
1527 xscale->dbr_available = 2;
1528 xscale->dbr0_used = 0;
1529 xscale->dbr1_used = 0;
1531 /* mark all hardware breakpoints as unset */
1532 while (breakpoint)
1534 if (breakpoint->type == BKPT_HARD)
1536 breakpoint->set = 0;
1538 breakpoint = breakpoint->next;
1541 armv4_5_invalidate_core_regs(target);
1543 /* FIXME mark hardware watchpoints got unset too. Also,
1544 * at least some of the XScale registers are invalid...
1548 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1549 * contents got invalidated. Safer to force that, so writing new
1550 * contents can't ever fail..
1553 uint32_t address;
1554 unsigned buf_cnt;
1555 const uint8_t *buffer = xscale_debug_handler;
1556 int retval;
1558 /* release SRST */
1559 jtag_add_reset(0, 0);
1561 /* wait 300ms; 150 and 100ms were not enough */
1562 jtag_add_sleep(300*1000);
1564 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1565 jtag_execute_queue();
1567 /* set Hold reset, Halt mode and Trap Reset */
1568 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1569 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1570 xscale_write_dcsr(target, 1, 0);
1572 /* Load the debug handler into the mini-icache. Since
1573 * it's using halt mode (not monitor mode), it runs in
1574 * "Special Debug State" for access to registers, memory,
1575 * coprocessors, trace data, etc.
1577 address = xscale->handler_address;
1578 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1579 binary_size > 0;
1580 binary_size -= buf_cnt, buffer += buf_cnt)
1582 uint32_t cache_line[8];
1583 unsigned i;
1585 buf_cnt = binary_size;
1586 if (buf_cnt > 32)
1587 buf_cnt = 32;
1589 for (i = 0; i < buf_cnt; i += 4)
1591 /* convert LE buffer to host-endian uint32_t */
1592 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1595 for (; i < 32; i += 4)
1597 cache_line[i / 4] = 0xe1a08008;
1600 /* only load addresses other than the reset vectors */
1601 if ((address % 0x400) != 0x0)
1603 retval = xscale_load_ic(target, address,
1604 cache_line);
1605 if (retval != ERROR_OK)
1606 return retval;
1609 address += buf_cnt;
1612 retval = xscale_load_ic(target, 0x0,
1613 xscale->low_vectors);
1614 if (retval != ERROR_OK)
1615 return retval;
1616 retval = xscale_load_ic(target, 0xffff0000,
1617 xscale->high_vectors);
1618 if (retval != ERROR_OK)
1619 return retval;
1621 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1623 jtag_add_sleep(100000);
1625 /* set Hold reset, Halt mode and Trap Reset */
1626 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1627 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1628 xscale_write_dcsr(target, 1, 0);
1630 /* clear Hold reset to let the target run (should enter debug handler) */
1631 xscale_write_dcsr(target, 0, 1);
1632 target->state = TARGET_RUNNING;
1634 if (!target->reset_halt)
1636 jtag_add_sleep(10000);
1638 /* we should have entered debug now */
1639 xscale_debug_entry(target);
1640 target->state = TARGET_HALTED;
1642 /* resume the target */
1643 xscale_resume(target, 1, 0x0, 1, 0);
1647 return ERROR_OK;
1650 static int xscale_read_core_reg(struct target *target, int num,
1651 enum armv4_5_mode mode)
1653 LOG_ERROR("not implemented");
1654 return ERROR_OK;
1657 static int xscale_write_core_reg(struct target *target, int num,
1658 enum armv4_5_mode mode, uint32_t value)
1660 LOG_ERROR("not implemented");
1661 return ERROR_OK;
1664 static int xscale_full_context(struct target *target)
1666 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1668 uint32_t *buffer;
1670 int i, j;
1672 LOG_DEBUG("-");
1674 if (target->state != TARGET_HALTED)
1676 LOG_WARNING("target not halted");
1677 return ERROR_TARGET_NOT_HALTED;
1680 buffer = malloc(4 * 8);
1682 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1683 * we can't enter User mode on an XScale (unpredictable),
1684 * but User shares registers with SYS
1686 for (i = 1; i < 7; i++)
1688 int valid = 1;
1690 /* check if there are invalid registers in the current mode
1692 for (j = 0; j <= 16; j++)
1694 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1695 valid = 0;
1698 if (!valid)
1700 uint32_t tmp_cpsr;
1702 /* request banked registers */
1703 xscale_send_u32(target, 0x0);
1705 tmp_cpsr = 0x0;
1706 tmp_cpsr |= armv4_5_number_to_mode(i);
1707 tmp_cpsr |= 0xc0; /* I/F bits */
1709 /* send CPSR for desired mode */
1710 xscale_send_u32(target, tmp_cpsr);
1712 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1713 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1715 xscale_receive(target, buffer, 8);
1716 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1717 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1718 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1720 else
1722 xscale_receive(target, buffer, 7);
1725 /* move data from buffer to register cache */
1726 for (j = 8; j <= 14; j++)
1728 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1729 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1730 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1735 free(buffer);
1737 return ERROR_OK;
1740 static int xscale_restore_context(struct target *target)
1742 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1744 int i, j;
1746 if (target->state != TARGET_HALTED)
1748 LOG_WARNING("target not halted");
1749 return ERROR_TARGET_NOT_HALTED;
1752 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1753 * we can't enter User mode on an XScale (unpredictable),
1754 * but User shares registers with SYS
1756 for (i = 1; i < 7; i++)
1758 int dirty = 0;
1760 /* check if there are invalid registers in the current mode
1762 for (j = 8; j <= 14; j++)
1764 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1765 dirty = 1;
1768 /* if not USR/SYS, check if the SPSR needs to be written */
1769 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1771 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1772 dirty = 1;
1775 if (dirty)
1777 uint32_t tmp_cpsr;
1779 /* send banked registers */
1780 xscale_send_u32(target, 0x1);
1782 tmp_cpsr = 0x0;
1783 tmp_cpsr |= armv4_5_number_to_mode(i);
1784 tmp_cpsr |= 0xc0; /* I/F bits */
1786 /* send CPSR for desired mode */
1787 xscale_send_u32(target, tmp_cpsr);
1789 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1790 for (j = 8; j <= 14; j++)
1792 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1793 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1796 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1798 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1799 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1804 return ERROR_OK;
1807 static int xscale_read_memory(struct target *target, uint32_t address,
1808 uint32_t size, uint32_t count, uint8_t *buffer)
1810 struct xscale_common *xscale = target_to_xscale(target);
1811 uint32_t *buf32;
1812 uint32_t i;
1813 int retval;
1815 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1817 if (target->state != TARGET_HALTED)
1819 LOG_WARNING("target not halted");
1820 return ERROR_TARGET_NOT_HALTED;
1823 /* sanitize arguments */
1824 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1825 return ERROR_INVALID_ARGUMENTS;
1827 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1828 return ERROR_TARGET_UNALIGNED_ACCESS;
1830 /* send memory read request (command 0x1n, n: access size) */
1831 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1832 return retval;
1834 /* send base address for read request */
1835 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1836 return retval;
1838 /* send number of requested data words */
1839 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1840 return retval;
1842 /* receive data from target (count times 32-bit words in host endianness) */
1843 buf32 = malloc(4 * count);
1844 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1845 return retval;
1847 /* extract data from host-endian buffer into byte stream */
1848 for (i = 0; i < count; i++)
1850 switch (size)
1852 case 4:
1853 target_buffer_set_u32(target, buffer, buf32[i]);
1854 buffer += 4;
1855 break;
1856 case 2:
1857 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1858 buffer += 2;
1859 break;
1860 case 1:
1861 *buffer++ = buf32[i] & 0xff;
1862 break;
1863 default:
1864 LOG_ERROR("invalid read size");
1865 return ERROR_INVALID_ARGUMENTS;
1869 free(buf32);
1871 /* examine DCSR, to see if Sticky Abort (SA) got set */
1872 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1873 return retval;
1874 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1876 /* clear SA bit */
1877 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1878 return retval;
1880 return ERROR_TARGET_DATA_ABORT;
1883 return ERROR_OK;
1886 static int xscale_write_memory(struct target *target, uint32_t address,
1887 uint32_t size, uint32_t count, uint8_t *buffer)
1889 struct xscale_common *xscale = target_to_xscale(target);
1890 int retval;
1892 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1894 if (target->state != TARGET_HALTED)
1896 LOG_WARNING("target not halted");
1897 return ERROR_TARGET_NOT_HALTED;
1900 /* sanitize arguments */
1901 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1902 return ERROR_INVALID_ARGUMENTS;
1904 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1905 return ERROR_TARGET_UNALIGNED_ACCESS;
1907 /* send memory write request (command 0x2n, n: access size) */
1908 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1909 return retval;
1911 /* send base address for read request */
1912 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1913 return retval;
1915 /* send number of requested data words to be written*/
1916 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1917 return retval;
1919 /* extract data from host-endian buffer into byte stream */
1920 #if 0
1921 for (i = 0; i < count; i++)
1923 switch (size)
1925 case 4:
1926 value = target_buffer_get_u32(target, buffer);
1927 xscale_send_u32(target, value);
1928 buffer += 4;
1929 break;
1930 case 2:
1931 value = target_buffer_get_u16(target, buffer);
1932 xscale_send_u32(target, value);
1933 buffer += 2;
1934 break;
1935 case 1:
1936 value = *buffer;
1937 xscale_send_u32(target, value);
1938 buffer += 1;
1939 break;
1940 default:
1941 LOG_ERROR("should never get here");
1942 exit(-1);
1945 #endif
1946 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1947 return retval;
1949 /* examine DCSR, to see if Sticky Abort (SA) got set */
1950 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1951 return retval;
1952 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1954 /* clear SA bit */
1955 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1956 return retval;
1958 return ERROR_TARGET_DATA_ABORT;
1961 return ERROR_OK;
1964 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
1965 uint32_t count, uint8_t *buffer)
1967 return xscale_write_memory(target, address, 4, count, buffer);
1970 static uint32_t xscale_get_ttb(struct target *target)
1972 struct xscale_common *xscale = target_to_xscale(target);
1973 uint32_t ttb;
1975 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
1976 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
1978 return ttb;
1981 static void xscale_disable_mmu_caches(struct target *target, int mmu,
1982 int d_u_cache, int i_cache)
1984 struct xscale_common *xscale = target_to_xscale(target);
1985 uint32_t cp15_control;
1987 /* read cp15 control register */
1988 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1989 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1991 if (mmu)
1992 cp15_control &= ~0x1U;
1994 if (d_u_cache)
1996 /* clean DCache */
1997 xscale_send_u32(target, 0x50);
1998 xscale_send_u32(target, xscale->cache_clean_address);
2000 /* invalidate DCache */
2001 xscale_send_u32(target, 0x51);
2003 cp15_control &= ~0x4U;
2006 if (i_cache)
2008 /* invalidate ICache */
2009 xscale_send_u32(target, 0x52);
2010 cp15_control &= ~0x1000U;
2013 /* write new cp15 control register */
2014 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2016 /* execute cpwait to ensure outstanding operations complete */
2017 xscale_send_u32(target, 0x53);
2020 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2021 int d_u_cache, int i_cache)
2023 struct xscale_common *xscale = target_to_xscale(target);
2024 uint32_t cp15_control;
2026 /* read cp15 control register */
2027 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2028 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2030 if (mmu)
2031 cp15_control |= 0x1U;
2033 if (d_u_cache)
2034 cp15_control |= 0x4U;
2036 if (i_cache)
2037 cp15_control |= 0x1000U;
2039 /* write new cp15 control register */
2040 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2042 /* execute cpwait to ensure outstanding operations complete */
2043 xscale_send_u32(target, 0x53);
2046 static int xscale_set_breakpoint(struct target *target,
2047 struct breakpoint *breakpoint)
2049 int retval;
2050 struct xscale_common *xscale = target_to_xscale(target);
2052 if (target->state != TARGET_HALTED)
2054 LOG_WARNING("target not halted");
2055 return ERROR_TARGET_NOT_HALTED;
2058 if (breakpoint->set)
2060 LOG_WARNING("breakpoint already set");
2061 return ERROR_OK;
2064 if (breakpoint->type == BKPT_HARD)
2066 uint32_t value = breakpoint->address | 1;
2067 if (!xscale->ibcr0_used)
2069 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2070 xscale->ibcr0_used = 1;
2071 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2073 else if (!xscale->ibcr1_used)
2075 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2076 xscale->ibcr1_used = 1;
2077 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2079 else
2081 LOG_ERROR("BUG: no hardware comparator available");
2082 return ERROR_OK;
2085 else if (breakpoint->type == BKPT_SOFT)
2087 if (breakpoint->length == 4)
2089 /* keep the original instruction in target endianness */
2090 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2092 return retval;
2094 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2095 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2097 return retval;
2100 else
2102 /* keep the original instruction in target endianness */
2103 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2105 return retval;
2107 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2108 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2110 return retval;
2113 breakpoint->set = 1;
2116 return ERROR_OK;
2119 static int xscale_add_breakpoint(struct target *target,
2120 struct breakpoint *breakpoint)
2122 struct xscale_common *xscale = target_to_xscale(target);
2124 if (target->state != TARGET_HALTED)
2126 LOG_WARNING("target not halted");
2127 return ERROR_TARGET_NOT_HALTED;
2130 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2132 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2133 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2136 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2138 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2139 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2142 if (breakpoint->type == BKPT_HARD)
2144 xscale->ibcr_available--;
2147 return ERROR_OK;
2150 static int xscale_unset_breakpoint(struct target *target,
2151 struct breakpoint *breakpoint)
2153 int retval;
2154 struct xscale_common *xscale = target_to_xscale(target);
2156 if (target->state != TARGET_HALTED)
2158 LOG_WARNING("target not halted");
2159 return ERROR_TARGET_NOT_HALTED;
2162 if (!breakpoint->set)
2164 LOG_WARNING("breakpoint not set");
2165 return ERROR_OK;
2168 if (breakpoint->type == BKPT_HARD)
2170 if (breakpoint->set == 1)
2172 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2173 xscale->ibcr0_used = 0;
2175 else if (breakpoint->set == 2)
2177 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2178 xscale->ibcr1_used = 0;
2180 breakpoint->set = 0;
2182 else
2184 /* restore original instruction (kept in target endianness) */
2185 if (breakpoint->length == 4)
2187 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2189 return retval;
2192 else
2194 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2196 return retval;
2199 breakpoint->set = 0;
2202 return ERROR_OK;
2205 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2207 struct xscale_common *xscale = target_to_xscale(target);
2209 if (target->state != TARGET_HALTED)
2211 LOG_WARNING("target not halted");
2212 return ERROR_TARGET_NOT_HALTED;
2215 if (breakpoint->set)
2217 xscale_unset_breakpoint(target, breakpoint);
2220 if (breakpoint->type == BKPT_HARD)
2221 xscale->ibcr_available++;
2223 return ERROR_OK;
2226 static int xscale_set_watchpoint(struct target *target,
2227 struct watchpoint *watchpoint)
2229 struct xscale_common *xscale = target_to_xscale(target);
2230 uint8_t enable = 0;
2231 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2232 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2234 if (target->state != TARGET_HALTED)
2236 LOG_WARNING("target not halted");
2237 return ERROR_TARGET_NOT_HALTED;
2240 xscale_get_reg(dbcon);
2242 switch (watchpoint->rw)
2244 case WPT_READ:
2245 enable = 0x3;
2246 break;
2247 case WPT_ACCESS:
2248 enable = 0x2;
2249 break;
2250 case WPT_WRITE:
2251 enable = 0x1;
2252 break;
2253 default:
2254 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2257 if (!xscale->dbr0_used)
2259 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2260 dbcon_value |= enable;
2261 xscale_set_reg_u32(dbcon, dbcon_value);
2262 watchpoint->set = 1;
2263 xscale->dbr0_used = 1;
2265 else if (!xscale->dbr1_used)
2267 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2268 dbcon_value |= enable << 2;
2269 xscale_set_reg_u32(dbcon, dbcon_value);
2270 watchpoint->set = 2;
2271 xscale->dbr1_used = 1;
2273 else
2275 LOG_ERROR("BUG: no hardware comparator available");
2276 return ERROR_OK;
2279 return ERROR_OK;
2282 static int xscale_add_watchpoint(struct target *target,
2283 struct watchpoint *watchpoint)
2285 struct xscale_common *xscale = target_to_xscale(target);
2287 if (target->state != TARGET_HALTED)
2289 LOG_WARNING("target not halted");
2290 return ERROR_TARGET_NOT_HALTED;
2293 if (xscale->dbr_available < 1)
2295 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2298 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2300 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2303 xscale->dbr_available--;
2305 return ERROR_OK;
2308 static int xscale_unset_watchpoint(struct target *target,
2309 struct watchpoint *watchpoint)
2311 struct xscale_common *xscale = target_to_xscale(target);
2312 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2313 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2315 if (target->state != TARGET_HALTED)
2317 LOG_WARNING("target not halted");
2318 return ERROR_TARGET_NOT_HALTED;
2321 if (!watchpoint->set)
2323 LOG_WARNING("breakpoint not set");
2324 return ERROR_OK;
2327 if (watchpoint->set == 1)
2329 dbcon_value &= ~0x3;
2330 xscale_set_reg_u32(dbcon, dbcon_value);
2331 xscale->dbr0_used = 0;
2333 else if (watchpoint->set == 2)
2335 dbcon_value &= ~0xc;
2336 xscale_set_reg_u32(dbcon, dbcon_value);
2337 xscale->dbr1_used = 0;
2339 watchpoint->set = 0;
2341 return ERROR_OK;
2344 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2346 struct xscale_common *xscale = target_to_xscale(target);
2348 if (target->state != TARGET_HALTED)
2350 LOG_WARNING("target not halted");
2351 return ERROR_TARGET_NOT_HALTED;
2354 if (watchpoint->set)
2356 xscale_unset_watchpoint(target, watchpoint);
2359 xscale->dbr_available++;
2361 return ERROR_OK;
2364 static int xscale_get_reg(struct reg *reg)
2366 struct xscale_reg *arch_info = reg->arch_info;
2367 struct target *target = arch_info->target;
2368 struct xscale_common *xscale = target_to_xscale(target);
2370 /* DCSR, TX and RX are accessible via JTAG */
2371 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2373 return xscale_read_dcsr(arch_info->target);
2375 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2377 /* 1 = consume register content */
2378 return xscale_read_tx(arch_info->target, 1);
2380 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2382 /* can't read from RX register (host -> debug handler) */
2383 return ERROR_OK;
2385 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2387 /* can't (explicitly) read from TXRXCTRL register */
2388 return ERROR_OK;
2390 else /* Other DBG registers have to be transfered by the debug handler */
2392 /* send CP read request (command 0x40) */
2393 xscale_send_u32(target, 0x40);
2395 /* send CP register number */
2396 xscale_send_u32(target, arch_info->dbg_handler_number);
2398 /* read register value */
2399 xscale_read_tx(target, 1);
2400 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2402 reg->dirty = 0;
2403 reg->valid = 1;
2406 return ERROR_OK;
2409 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2411 struct xscale_reg *arch_info = reg->arch_info;
2412 struct target *target = arch_info->target;
2413 struct xscale_common *xscale = target_to_xscale(target);
2414 uint32_t value = buf_get_u32(buf, 0, 32);
2416 /* DCSR, TX and RX are accessible via JTAG */
2417 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2419 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2420 return xscale_write_dcsr(arch_info->target, -1, -1);
2422 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2424 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2425 return xscale_write_rx(arch_info->target);
2427 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2429 /* can't write to TX register (debug-handler -> host) */
2430 return ERROR_OK;
2432 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2434 /* can't (explicitly) write to TXRXCTRL register */
2435 return ERROR_OK;
2437 else /* Other DBG registers have to be transfered by the debug handler */
2439 /* send CP write request (command 0x41) */
2440 xscale_send_u32(target, 0x41);
2442 /* send CP register number */
2443 xscale_send_u32(target, arch_info->dbg_handler_number);
2445 /* send CP register value */
2446 xscale_send_u32(target, value);
2447 buf_set_u32(reg->value, 0, 32, value);
2450 return ERROR_OK;
2453 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2455 struct xscale_common *xscale = target_to_xscale(target);
2456 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2457 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2459 /* send CP write request (command 0x41) */
2460 xscale_send_u32(target, 0x41);
2462 /* send CP register number */
2463 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2465 /* send CP register value */
2466 xscale_send_u32(target, value);
2467 buf_set_u32(dcsr->value, 0, 32, value);
2469 return ERROR_OK;
2472 static int xscale_read_trace(struct target *target)
2474 struct xscale_common *xscale = target_to_xscale(target);
2475 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
2476 struct xscale_trace_data **trace_data_p;
2478 /* 258 words from debug handler
2479 * 256 trace buffer entries
2480 * 2 checkpoint addresses
2482 uint32_t trace_buffer[258];
2483 int is_address[256];
2484 int i, j;
2486 if (target->state != TARGET_HALTED)
2488 LOG_WARNING("target must be stopped to read trace data");
2489 return ERROR_TARGET_NOT_HALTED;
2492 /* send read trace buffer command (command 0x61) */
2493 xscale_send_u32(target, 0x61);
2495 /* receive trace buffer content */
2496 xscale_receive(target, trace_buffer, 258);
2498 /* parse buffer backwards to identify address entries */
2499 for (i = 255; i >= 0; i--)
2501 is_address[i] = 0;
2502 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2503 ((trace_buffer[i] & 0xf0) == 0xd0))
2505 if (i >= 3)
2506 is_address[--i] = 1;
2507 if (i >= 2)
2508 is_address[--i] = 1;
2509 if (i >= 1)
2510 is_address[--i] = 1;
2511 if (i >= 0)
2512 is_address[--i] = 1;
2517 /* search first non-zero entry */
2518 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2521 if (j == 256)
2523 LOG_DEBUG("no trace data collected");
2524 return ERROR_XSCALE_NO_TRACE_DATA;
2527 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2530 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2531 (*trace_data_p)->next = NULL;
2532 (*trace_data_p)->chkpt0 = trace_buffer[256];
2533 (*trace_data_p)->chkpt1 = trace_buffer[257];
2534 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2535 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2536 (*trace_data_p)->depth = 256 - j;
2538 for (i = j; i < 256; i++)
2540 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2541 if (is_address[i])
2542 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2543 else
2544 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2547 return ERROR_OK;
2550 static int xscale_read_instruction(struct target *target,
2551 struct arm_instruction *instruction)
2553 struct xscale_common *xscale = target_to_xscale(target);
2554 int i;
2555 int section = -1;
2556 size_t size_read;
2557 uint32_t opcode;
2558 int retval;
2560 if (!xscale->trace.image)
2561 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2563 /* search for the section the current instruction belongs to */
2564 for (i = 0; i < xscale->trace.image->num_sections; i++)
2566 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2567 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2569 section = i;
2570 break;
2574 if (section == -1)
2576 /* current instruction couldn't be found in the image */
2577 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2580 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2582 uint8_t buf[4];
2583 if ((retval = image_read_section(xscale->trace.image, section,
2584 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2585 4, buf, &size_read)) != ERROR_OK)
2587 LOG_ERROR("error while reading instruction: %i", retval);
2588 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2590 opcode = target_buffer_get_u32(target, buf);
2591 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2593 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2595 uint8_t buf[2];
2596 if ((retval = image_read_section(xscale->trace.image, section,
2597 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2598 2, buf, &size_read)) != ERROR_OK)
2600 LOG_ERROR("error while reading instruction: %i", retval);
2601 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2603 opcode = target_buffer_get_u16(target, buf);
2604 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2606 else
2608 LOG_ERROR("BUG: unknown core state encountered");
2609 exit(-1);
2612 return ERROR_OK;
2615 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2616 int i, uint32_t *target)
2618 /* if there are less than four entries prior to the indirect branch message
2619 * we can't extract the address */
2620 if (i < 4)
2622 return -1;
2625 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2626 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2628 return 0;
2631 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2633 struct xscale_common *xscale = target_to_xscale(target);
2634 int next_pc_ok = 0;
2635 uint32_t next_pc = 0x0;
2636 struct xscale_trace_data *trace_data = xscale->trace.data;
2637 int retval;
2639 while (trace_data)
2641 int i, chkpt;
2642 int rollover;
2643 int branch;
2644 int exception;
2645 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2647 chkpt = 0;
2648 rollover = 0;
2650 for (i = 0; i < trace_data->depth; i++)
2652 next_pc_ok = 0;
2653 branch = 0;
2654 exception = 0;
2656 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2657 continue;
2659 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2661 case 0: /* Exceptions */
2662 case 1:
2663 case 2:
2664 case 3:
2665 case 4:
2666 case 5:
2667 case 6:
2668 case 7:
2669 exception = (trace_data->entries[i].data & 0x70) >> 4;
2670 next_pc_ok = 1;
2671 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2672 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2673 break;
2674 case 8: /* Direct Branch */
2675 branch = 1;
2676 break;
2677 case 9: /* Indirect Branch */
2678 branch = 1;
2679 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2681 next_pc_ok = 1;
2683 break;
2684 case 13: /* Checkpointed Indirect Branch */
2685 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2687 next_pc_ok = 1;
2688 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2689 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2690 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2692 /* explicit fall-through */
2693 case 12: /* Checkpointed Direct Branch */
2694 branch = 1;
2695 if (chkpt == 0)
2697 next_pc_ok = 1;
2698 next_pc = trace_data->chkpt0;
2699 chkpt++;
2701 else if (chkpt == 1)
2703 next_pc_ok = 1;
2704 next_pc = trace_data->chkpt0;
2705 chkpt++;
2707 else
2709 LOG_WARNING("more than two checkpointed branches encountered");
2711 break;
2712 case 15: /* Roll-over */
2713 rollover++;
2714 continue;
2715 default: /* Reserved */
2716 command_print(cmd_ctx, "--- reserved trace message ---");
2717 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2718 return ERROR_OK;
2721 if (xscale->trace.pc_ok)
2723 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2724 struct arm_instruction instruction;
2726 if ((exception == 6) || (exception == 7))
2728 /* IRQ or FIQ exception, no instruction executed */
2729 executed -= 1;
2732 while (executed-- >= 0)
2734 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2736 /* can't continue tracing with no image available */
2737 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2739 return retval;
2741 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2743 /* TODO: handle incomplete images */
2747 /* a precise abort on a load to the PC is included in the incremental
2748 * word count, other instructions causing data aborts are not included
2750 if ((executed == 0) && (exception == 4)
2751 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2753 if ((instruction.type == ARM_LDM)
2754 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2756 executed--;
2758 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2759 && (instruction.info.load_store.Rd != 15))
2761 executed--;
2765 /* only the last instruction executed
2766 * (the one that caused the control flow change)
2767 * could be a taken branch
2769 if (((executed == -1) && (branch == 1)) &&
2770 (((instruction.type == ARM_B) ||
2771 (instruction.type == ARM_BL) ||
2772 (instruction.type == ARM_BLX)) &&
2773 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2775 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2777 else
2779 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2781 command_print(cmd_ctx, "%s", instruction.text);
2784 rollover = 0;
2787 if (next_pc_ok)
2789 xscale->trace.current_pc = next_pc;
2790 xscale->trace.pc_ok = 1;
2794 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2796 struct arm_instruction instruction;
2797 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2799 /* can't continue tracing with no image available */
2800 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2802 return retval;
2804 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2806 /* TODO: handle incomplete images */
2809 command_print(cmd_ctx, "%s", instruction.text);
2812 trace_data = trace_data->next;
2815 return ERROR_OK;
2818 static const struct reg_arch_type xscale_reg_type = {
2819 .get = xscale_get_reg,
2820 .set = xscale_set_reg,
2823 static void xscale_build_reg_cache(struct target *target)
2825 struct xscale_common *xscale = target_to_xscale(target);
2826 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
2827 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2828 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2829 int i;
2830 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(struct xscale_reg);
2832 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2833 armv4_5->core_cache = (*cache_p);
2835 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2836 cache_p = &(*cache_p)->next;
2838 /* fill in values for the xscale reg cache */
2839 (*cache_p)->name = "XScale registers";
2840 (*cache_p)->next = NULL;
2841 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2842 (*cache_p)->num_regs = num_regs;
2844 for (i = 0; i < num_regs; i++)
2846 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2847 (*cache_p)->reg_list[i].value = calloc(4, 1);
2848 (*cache_p)->reg_list[i].dirty = 0;
2849 (*cache_p)->reg_list[i].valid = 0;
2850 (*cache_p)->reg_list[i].size = 32;
2851 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2852 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2853 arch_info[i] = xscale_reg_arch_info[i];
2854 arch_info[i].target = target;
2857 xscale->reg_cache = (*cache_p);
2860 static int xscale_init_target(struct command_context *cmd_ctx,
2861 struct target *target)
2863 xscale_build_reg_cache(target);
2864 return ERROR_OK;
2867 static int xscale_init_arch_info(struct target *target,
2868 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2870 struct arm *armv4_5;
2871 uint32_t high_reset_branch, low_reset_branch;
2872 int i;
2874 armv4_5 = &xscale->armv4_5_common;
2876 /* store architecture specfic data (none so far) */
2877 xscale->common_magic = XSCALE_COMMON_MAGIC;
2879 /* we don't really *need* variant info ... */
2880 if (variant) {
2881 int ir_length = 0;
2883 if (strcmp(variant, "pxa250") == 0
2884 || strcmp(variant, "pxa255") == 0
2885 || strcmp(variant, "pxa26x") == 0)
2886 ir_length = 5;
2887 else if (strcmp(variant, "pxa27x") == 0
2888 || strcmp(variant, "ixp42x") == 0
2889 || strcmp(variant, "ixp45x") == 0
2890 || strcmp(variant, "ixp46x") == 0)
2891 ir_length = 7;
2892 else
2893 LOG_WARNING("%s: unrecognized variant %s",
2894 tap->dotted_name, variant);
2896 if (ir_length && ir_length != tap->ir_length) {
2897 LOG_WARNING("%s: IR length for %s is %d; fixing",
2898 tap->dotted_name, variant, ir_length);
2899 tap->ir_length = ir_length;
2903 /* the debug handler isn't installed (and thus not running) at this time */
2904 xscale->handler_address = 0xfe000800;
2906 /* clear the vectors we keep locally for reference */
2907 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2908 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2910 /* no user-specified vectors have been configured yet */
2911 xscale->static_low_vectors_set = 0x0;
2912 xscale->static_high_vectors_set = 0x0;
2914 /* calculate branches to debug handler */
2915 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2916 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2918 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2919 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2921 for (i = 1; i <= 7; i++)
2923 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2924 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2927 /* 64kB aligned region used for DCache cleaning */
2928 xscale->cache_clean_address = 0xfffe0000;
2930 xscale->hold_rst = 0;
2931 xscale->external_debug_break = 0;
2933 xscale->ibcr_available = 2;
2934 xscale->ibcr0_used = 0;
2935 xscale->ibcr1_used = 0;
2937 xscale->dbr_available = 2;
2938 xscale->dbr0_used = 0;
2939 xscale->dbr1_used = 0;
2941 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2942 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2944 xscale->vector_catch = 0x1;
2946 xscale->trace.capture_status = TRACE_IDLE;
2947 xscale->trace.data = NULL;
2948 xscale->trace.image = NULL;
2949 xscale->trace.buffer_enabled = 0;
2950 xscale->trace.buffer_fill = 0;
2952 /* prepare ARMv4/5 specific information */
2953 armv4_5->arch_info = xscale;
2954 armv4_5->read_core_reg = xscale_read_core_reg;
2955 armv4_5->write_core_reg = xscale_write_core_reg;
2956 armv4_5->full_context = xscale_full_context;
2958 armv4_5_init_arch_info(target, armv4_5);
2960 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2961 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2962 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
2963 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
2964 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
2965 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
2966 xscale->armv4_5_mmu.has_tiny_pages = 1;
2967 xscale->armv4_5_mmu.mmu_enabled = 0;
2969 return ERROR_OK;
2972 static int xscale_target_create(struct target *target, Jim_Interp *interp)
2974 struct xscale_common *xscale;
2976 if (sizeof xscale_debug_handler - 1 > 0x800) {
2977 LOG_ERROR("debug_handler.bin: larger than 2kb");
2978 return ERROR_FAIL;
2981 xscale = calloc(1, sizeof(*xscale));
2982 if (!xscale)
2983 return ERROR_FAIL;
2985 return xscale_init_arch_info(target, xscale, target->tap,
2986 target->variant);
2989 COMMAND_HANDLER(xscale_handle_debug_handler_command)
2991 struct target *target = NULL;
2992 struct xscale_common *xscale;
2993 int retval;
2994 uint32_t handler_address;
2996 if (CMD_ARGC < 2)
2998 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
2999 return ERROR_OK;
3002 if ((target = get_target(CMD_ARGV[0])) == NULL)
3004 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3005 return ERROR_FAIL;
3008 xscale = target_to_xscale(target);
3009 retval = xscale_verify_pointer(CMD_CTX, xscale);
3010 if (retval != ERROR_OK)
3011 return retval;
3013 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3015 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3016 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3018 xscale->handler_address = handler_address;
3020 else
3022 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3023 return ERROR_FAIL;
3026 return ERROR_OK;
3029 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3031 struct target *target = NULL;
3032 struct xscale_common *xscale;
3033 int retval;
3034 uint32_t cache_clean_address;
3036 if (CMD_ARGC < 2)
3038 return ERROR_COMMAND_SYNTAX_ERROR;
3041 target = get_target(CMD_ARGV[0]);
3042 if (target == NULL)
3044 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3045 return ERROR_FAIL;
3047 xscale = target_to_xscale(target);
3048 retval = xscale_verify_pointer(CMD_CTX, xscale);
3049 if (retval != ERROR_OK)
3050 return retval;
3052 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3054 if (cache_clean_address & 0xffff)
3056 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3058 else
3060 xscale->cache_clean_address = cache_clean_address;
3063 return ERROR_OK;
3066 COMMAND_HANDLER(xscale_handle_cache_info_command)
3068 struct target *target = get_current_target(CMD_CTX);
3069 struct xscale_common *xscale = target_to_xscale(target);
3070 int retval;
3072 retval = xscale_verify_pointer(CMD_CTX, xscale);
3073 if (retval != ERROR_OK)
3074 return retval;
3076 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3079 static int xscale_virt2phys(struct target *target,
3080 uint32_t virtual, uint32_t *physical)
3082 struct xscale_common *xscale = target_to_xscale(target);
3083 int type;
3084 uint32_t cb;
3085 int domain;
3086 uint32_t ap;
3088 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3089 LOG_ERROR(xscale_not);
3090 return ERROR_TARGET_INVALID;
3093 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3094 if (type == -1)
3096 return ret;
3098 *physical = ret;
3099 return ERROR_OK;
3102 static int xscale_mmu(struct target *target, int *enabled)
3104 struct xscale_common *xscale = target_to_xscale(target);
3106 if (target->state != TARGET_HALTED)
3108 LOG_ERROR("Target not halted");
3109 return ERROR_TARGET_INVALID;
3111 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3112 return ERROR_OK;
3115 COMMAND_HANDLER(xscale_handle_mmu_command)
3117 struct target *target = get_current_target(CMD_CTX);
3118 struct xscale_common *xscale = target_to_xscale(target);
3119 int retval;
3121 retval = xscale_verify_pointer(CMD_CTX, xscale);
3122 if (retval != ERROR_OK)
3123 return retval;
3125 if (target->state != TARGET_HALTED)
3127 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3128 return ERROR_OK;
3131 if (CMD_ARGC >= 1)
3133 if (strcmp("enable", CMD_ARGV[0]) == 0)
3135 xscale_enable_mmu_caches(target, 1, 0, 0);
3136 xscale->armv4_5_mmu.mmu_enabled = 1;
3138 else if (strcmp("disable", CMD_ARGV[0]) == 0)
3140 xscale_disable_mmu_caches(target, 1, 0, 0);
3141 xscale->armv4_5_mmu.mmu_enabled = 0;
3145 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3147 return ERROR_OK;
3150 COMMAND_HANDLER(xscale_handle_idcache_command)
3152 struct target *target = get_current_target(CMD_CTX);
3153 struct xscale_common *xscale = target_to_xscale(target);
3154 int icache = 0, dcache = 0;
3155 int retval;
3157 retval = xscale_verify_pointer(CMD_CTX, xscale);
3158 if (retval != ERROR_OK)
3159 return retval;
3161 if (target->state != TARGET_HALTED)
3163 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3164 return ERROR_OK;
3167 if (strcmp(CMD_NAME, "icache") == 0)
3168 icache = 1;
3169 else if (strcmp(CMD_NAME, "dcache") == 0)
3170 dcache = 1;
3172 if (CMD_ARGC >= 1)
3174 if (strcmp("enable", CMD_ARGV[0]) == 0)
3176 xscale_enable_mmu_caches(target, 0, dcache, icache);
3178 if (icache)
3179 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
3180 else if (dcache)
3181 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
3183 else if (strcmp("disable", CMD_ARGV[0]) == 0)
3185 xscale_disable_mmu_caches(target, 0, dcache, icache);
3187 if (icache)
3188 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
3189 else if (dcache)
3190 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
3194 if (icache)
3195 command_print(CMD_CTX, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
3197 if (dcache)
3198 command_print(CMD_CTX, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
3200 return ERROR_OK;
3203 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3205 struct target *target = get_current_target(CMD_CTX);
3206 struct xscale_common *xscale = target_to_xscale(target);
3207 int retval;
3209 retval = xscale_verify_pointer(CMD_CTX, xscale);
3210 if (retval != ERROR_OK)
3211 return retval;
3213 if (CMD_ARGC < 1)
3215 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3217 else
3219 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3220 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3221 xscale_write_dcsr(target, -1, -1);
3224 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3226 return ERROR_OK;
3230 COMMAND_HANDLER(xscale_handle_vector_table_command)
3232 struct target *target = get_current_target(CMD_CTX);
3233 struct xscale_common *xscale = target_to_xscale(target);
3234 int err = 0;
3235 int retval;
3237 retval = xscale_verify_pointer(CMD_CTX, xscale);
3238 if (retval != ERROR_OK)
3239 return retval;
3241 if (CMD_ARGC == 0) /* print current settings */
3243 int idx;
3245 command_print(CMD_CTX, "active user-set static vectors:");
3246 for (idx = 1; idx < 8; idx++)
3247 if (xscale->static_low_vectors_set & (1 << idx))
3248 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3249 for (idx = 1; idx < 8; idx++)
3250 if (xscale->static_high_vectors_set & (1 << idx))
3251 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3252 return ERROR_OK;
3255 if (CMD_ARGC != 3)
3256 err = 1;
3257 else
3259 int idx;
3260 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3261 uint32_t vec;
3262 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3264 if (idx < 1 || idx >= 8)
3265 err = 1;
3267 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3269 xscale->static_low_vectors_set |= (1<<idx);
3270 xscale->static_low_vectors[idx] = vec;
3272 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3274 xscale->static_high_vectors_set |= (1<<idx);
3275 xscale->static_high_vectors[idx] = vec;
3277 else
3278 err = 1;
3281 if (err)
3282 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3284 return ERROR_OK;
3288 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3290 struct target *target = get_current_target(CMD_CTX);
3291 struct xscale_common *xscale = target_to_xscale(target);
3292 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
3293 uint32_t dcsr_value;
3294 int retval;
3296 retval = xscale_verify_pointer(CMD_CTX, xscale);
3297 if (retval != ERROR_OK)
3298 return retval;
3300 if (target->state != TARGET_HALTED)
3302 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3303 return ERROR_OK;
3306 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3308 struct xscale_trace_data *td, *next_td;
3309 xscale->trace.buffer_enabled = 1;
3311 /* free old trace data */
3312 td = xscale->trace.data;
3313 while (td)
3315 next_td = td->next;
3317 if (td->entries)
3318 free(td->entries);
3319 free(td);
3320 td = next_td;
3322 xscale->trace.data = NULL;
3324 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3326 xscale->trace.buffer_enabled = 0;
3329 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3331 uint32_t fill = 1;
3332 if (CMD_ARGC >= 3)
3333 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3334 xscale->trace.buffer_fill = fill;
3336 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3338 xscale->trace.buffer_fill = -1;
3341 if (xscale->trace.buffer_enabled)
3343 /* if we enable the trace buffer in fill-once
3344 * mode we know the address of the first instruction */
3345 xscale->trace.pc_ok = 1;
3346 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3348 else
3350 /* otherwise the address is unknown, and we have no known good PC */
3351 xscale->trace.pc_ok = 0;
3354 command_print(CMD_CTX, "trace buffer %s (%s)",
3355 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3356 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3358 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3359 if (xscale->trace.buffer_fill >= 0)
3360 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3361 else
3362 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3364 return ERROR_OK;
3367 COMMAND_HANDLER(xscale_handle_trace_image_command)
3369 struct target *target = get_current_target(CMD_CTX);
3370 struct xscale_common *xscale = target_to_xscale(target);
3371 int retval;
3373 if (CMD_ARGC < 1)
3375 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3376 return ERROR_OK;
3379 retval = xscale_verify_pointer(CMD_CTX, xscale);
3380 if (retval != ERROR_OK)
3381 return retval;
3383 if (xscale->trace.image)
3385 image_close(xscale->trace.image);
3386 free(xscale->trace.image);
3387 command_print(CMD_CTX, "previously loaded image found and closed");
3390 xscale->trace.image = malloc(sizeof(struct image));
3391 xscale->trace.image->base_address_set = 0;
3392 xscale->trace.image->start_address_set = 0;
3394 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3395 if (CMD_ARGC >= 2)
3397 xscale->trace.image->base_address_set = 1;
3398 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], xscale->trace.image->base_address);
3400 else
3402 xscale->trace.image->base_address_set = 0;
3405 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3407 free(xscale->trace.image);
3408 xscale->trace.image = NULL;
3409 return ERROR_OK;
3412 return ERROR_OK;
3415 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3417 struct target *target = get_current_target(CMD_CTX);
3418 struct xscale_common *xscale = target_to_xscale(target);
3419 struct xscale_trace_data *trace_data;
3420 struct fileio file;
3421 int retval;
3423 retval = xscale_verify_pointer(CMD_CTX, xscale);
3424 if (retval != ERROR_OK)
3425 return retval;
3427 if (target->state != TARGET_HALTED)
3429 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3430 return ERROR_OK;
3433 if (CMD_ARGC < 1)
3435 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3436 return ERROR_OK;
3439 trace_data = xscale->trace.data;
3441 if (!trace_data)
3443 command_print(CMD_CTX, "no trace data collected");
3444 return ERROR_OK;
3447 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3449 return ERROR_OK;
3452 while (trace_data)
3454 int i;
3456 fileio_write_u32(&file, trace_data->chkpt0);
3457 fileio_write_u32(&file, trace_data->chkpt1);
3458 fileio_write_u32(&file, trace_data->last_instruction);
3459 fileio_write_u32(&file, trace_data->depth);
3461 for (i = 0; i < trace_data->depth; i++)
3462 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3464 trace_data = trace_data->next;
3467 fileio_close(&file);
3469 return ERROR_OK;
3472 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3474 struct target *target = get_current_target(CMD_CTX);
3475 struct xscale_common *xscale = target_to_xscale(target);
3476 int retval;
3478 retval = xscale_verify_pointer(CMD_CTX, xscale);
3479 if (retval != ERROR_OK)
3480 return retval;
3482 xscale_analyze_trace(target, CMD_CTX);
3484 return ERROR_OK;
3487 COMMAND_HANDLER(xscale_handle_cp15)
3489 struct target *target = get_current_target(CMD_CTX);
3490 struct xscale_common *xscale = target_to_xscale(target);
3491 int retval;
3493 retval = xscale_verify_pointer(CMD_CTX, xscale);
3494 if (retval != ERROR_OK)
3495 return retval;
3497 if (target->state != TARGET_HALTED)
3499 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3500 return ERROR_OK;
3502 uint32_t reg_no = 0;
3503 struct reg *reg = NULL;
3504 if (CMD_ARGC > 0)
3506 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3507 /*translate from xscale cp15 register no to openocd register*/
3508 switch (reg_no)
3510 case 0:
3511 reg_no = XSCALE_MAINID;
3512 break;
3513 case 1:
3514 reg_no = XSCALE_CTRL;
3515 break;
3516 case 2:
3517 reg_no = XSCALE_TTB;
3518 break;
3519 case 3:
3520 reg_no = XSCALE_DAC;
3521 break;
3522 case 5:
3523 reg_no = XSCALE_FSR;
3524 break;
3525 case 6:
3526 reg_no = XSCALE_FAR;
3527 break;
3528 case 13:
3529 reg_no = XSCALE_PID;
3530 break;
3531 case 15:
3532 reg_no = XSCALE_CPACCESS;
3533 break;
3534 default:
3535 command_print(CMD_CTX, "invalid register number");
3536 return ERROR_INVALID_ARGUMENTS;
3538 reg = &xscale->reg_cache->reg_list[reg_no];
3541 if (CMD_ARGC == 1)
3543 uint32_t value;
3545 /* read cp15 control register */
3546 xscale_get_reg(reg);
3547 value = buf_get_u32(reg->value, 0, 32);
3548 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3550 else if (CMD_ARGC == 2)
3552 uint32_t value;
3553 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3555 /* send CP write request (command 0x41) */
3556 xscale_send_u32(target, 0x41);
3558 /* send CP register number */
3559 xscale_send_u32(target, reg_no);
3561 /* send CP register value */
3562 xscale_send_u32(target, value);
3564 /* execute cpwait to ensure outstanding operations complete */
3565 xscale_send_u32(target, 0x53);
3567 else
3569 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3572 return ERROR_OK;
3575 static int xscale_register_commands(struct command_context *cmd_ctx)
3577 struct command *xscale_cmd;
3579 xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
3581 register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
3582 register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
3584 register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
3585 register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
3586 register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
3587 register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
3589 register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_vector_catch_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
3590 register_command(cmd_ctx, xscale_cmd, "vector_table", xscale_handle_vector_table_command, COMMAND_EXEC, "<high|low> <index> <code> set static code for exception handler entry");
3592 register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable | disable> ['fill' [n]|'wrap']");
3594 register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
3595 register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
3596 register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
3597 COMMAND_EXEC, "load image from <file> [base address]");
3599 register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
3601 armv4_5_register_commands(cmd_ctx);
3603 return ERROR_OK;
3606 struct target_type xscale_target =
3608 .name = "xscale",
3610 .poll = xscale_poll,
3611 .arch_state = xscale_arch_state,
3613 .target_request_data = NULL,
3615 .halt = xscale_halt,
3616 .resume = xscale_resume,
3617 .step = xscale_step,
3619 .assert_reset = xscale_assert_reset,
3620 .deassert_reset = xscale_deassert_reset,
3621 .soft_reset_halt = NULL,
3623 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
3625 .read_memory = xscale_read_memory,
3626 .write_memory = xscale_write_memory,
3627 .bulk_write_memory = xscale_bulk_write_memory,
3629 .checksum_memory = arm_checksum_memory,
3630 .blank_check_memory = arm_blank_check_memory,
3632 .run_algorithm = armv4_5_run_algorithm,
3634 .add_breakpoint = xscale_add_breakpoint,
3635 .remove_breakpoint = xscale_remove_breakpoint,
3636 .add_watchpoint = xscale_add_watchpoint,
3637 .remove_watchpoint = xscale_remove_watchpoint,
3639 .register_commands = xscale_register_commands,
3640 .target_create = xscale_target_create,
3641 .init_target = xscale_init_target,
3643 .virt2phys = xscale_virt2phys,
3644 .mmu = xscale_mmu