zy1000: keep up with new command registration stuff
[openocd/genbsdl.git] / src / target / xscale.c
blob1a18ab85aac8de544d348f28dbb779417b7e6cbe
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include "time_support.h"
37 #include "register.h"
38 #include "image.h"
42 * Important XScale documents available as of October 2009 include:
44 * Intel XScale® Core Developer’s Manual, January 2004
45 * Order Number: 273473-002
46 * This has a chapter detailing debug facilities, and punts some
47 * details to chip-specific microarchitecture documents.
49 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
50 * Document Number: 273539-005
51 * Less detailed than the developer's manual, but summarizes those
52 * missing details (for most XScales) and gives LOTS of notes about
53 * debugger/handler interaction issues. Presents a simpler reset
54 * and load-handler sequence than the arch doc. (Note, OpenOCD
55 * doesn't currently support "Hot-Debug" as defined there.)
57 * Chip-specific microarchitecture documents may also be useful.
61 /* forward declarations */
62 static int xscale_resume(struct target *, int current,
63 uint32_t address, int handle_breakpoints, int debug_execution);
64 static int xscale_debug_entry(struct target *);
65 static int xscale_restore_context(struct target *);
66 static int xscale_get_reg(struct reg *reg);
67 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
68 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
69 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
70 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_read_trace(struct target *);
74 /* This XScale "debug handler" is loaded into the processor's
75 * mini-ICache, which is 2K of code writable only via JTAG.
77 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
78 * binary files cleanly. It's string oriented, and terminates them
79 * with a NUL character. Better would be to generate the constants
80 * and let other code decide names, scoping, and other housekeeping.
82 static /* unsigned const char xscale_debug_handler[] = ... */
83 #include "xscale_debug.h"
85 static char *const xscale_reg_list[] =
87 "XSCALE_MAINID", /* 0 */
88 "XSCALE_CACHETYPE",
89 "XSCALE_CTRL",
90 "XSCALE_AUXCTRL",
91 "XSCALE_TTB",
92 "XSCALE_DAC",
93 "XSCALE_FSR",
94 "XSCALE_FAR",
95 "XSCALE_PID",
96 "XSCALE_CPACCESS",
97 "XSCALE_IBCR0", /* 10 */
98 "XSCALE_IBCR1",
99 "XSCALE_DBR0",
100 "XSCALE_DBR1",
101 "XSCALE_DBCON",
102 "XSCALE_TBREG",
103 "XSCALE_CHKPT0",
104 "XSCALE_CHKPT1",
105 "XSCALE_DCSR",
106 "XSCALE_TX",
107 "XSCALE_RX", /* 20 */
108 "XSCALE_TXRXCTRL",
111 static const struct xscale_reg xscale_reg_arch_info[] =
113 {XSCALE_MAINID, NULL},
114 {XSCALE_CACHETYPE, NULL},
115 {XSCALE_CTRL, NULL},
116 {XSCALE_AUXCTRL, NULL},
117 {XSCALE_TTB, NULL},
118 {XSCALE_DAC, NULL},
119 {XSCALE_FSR, NULL},
120 {XSCALE_FAR, NULL},
121 {XSCALE_PID, NULL},
122 {XSCALE_CPACCESS, NULL},
123 {XSCALE_IBCR0, NULL},
124 {XSCALE_IBCR1, NULL},
125 {XSCALE_DBR0, NULL},
126 {XSCALE_DBR1, NULL},
127 {XSCALE_DBCON, NULL},
128 {XSCALE_TBREG, NULL},
129 {XSCALE_CHKPT0, NULL},
130 {XSCALE_CHKPT1, NULL},
131 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
132 {-1, NULL}, /* TX accessed via JTAG */
133 {-1, NULL}, /* RX accessed via JTAG */
134 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
137 /* convenience wrapper to access XScale specific registers */
138 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
140 uint8_t buf[4];
142 buf_set_u32(buf, 0, 32, value);
144 return xscale_set_reg(reg, buf);
147 static const char xscale_not[] = "target is not an XScale";
149 static int xscale_verify_pointer(struct command_context *cmd_ctx,
150 struct xscale_common *xscale)
152 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
153 command_print(cmd_ctx, xscale_not);
154 return ERROR_TARGET_INVALID;
156 return ERROR_OK;
159 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
161 if (tap == NULL)
162 return ERROR_FAIL;
164 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
166 struct scan_field field;
167 uint8_t scratch[4];
169 memset(&field, 0, sizeof field);
170 field.tap = tap;
171 field.num_bits = tap->ir_length;
172 field.out_value = scratch;
173 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
175 jtag_add_ir_scan(1, &field, jtag_get_end_state());
178 return ERROR_OK;
181 static int xscale_read_dcsr(struct target *target)
183 struct xscale_common *xscale = target_to_xscale(target);
184 int retval;
185 struct scan_field fields[3];
186 uint8_t field0 = 0x0;
187 uint8_t field0_check_value = 0x2;
188 uint8_t field0_check_mask = 0x7;
189 uint8_t field2 = 0x0;
190 uint8_t field2_check_value = 0x0;
191 uint8_t field2_check_mask = 0x1;
193 jtag_set_end_state(TAP_DRPAUSE);
194 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
196 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
197 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
199 memset(&fields, 0, sizeof fields);
201 fields[0].tap = target->tap;
202 fields[0].num_bits = 3;
203 fields[0].out_value = &field0;
204 uint8_t tmp;
205 fields[0].in_value = &tmp;
207 fields[1].tap = target->tap;
208 fields[1].num_bits = 32;
209 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
211 fields[2].tap = target->tap;
212 fields[2].num_bits = 1;
213 fields[2].out_value = &field2;
214 uint8_t tmp2;
215 fields[2].in_value = &tmp2;
217 jtag_add_dr_scan(3, fields, jtag_get_end_state());
219 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
220 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
222 if ((retval = jtag_execute_queue()) != ERROR_OK)
224 LOG_ERROR("JTAG error while reading DCSR");
225 return retval;
228 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
229 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
231 /* write the register with the value we just read
232 * on this second pass, only the first bit of field0 is guaranteed to be 0)
234 field0_check_mask = 0x1;
235 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
236 fields[1].in_value = NULL;
238 jtag_set_end_state(TAP_IDLE);
240 jtag_add_dr_scan(3, fields, jtag_get_end_state());
242 /* DANGER!!! this must be here. It will make sure that the arguments
243 * to jtag_set_check_value() does not go out of scope! */
244 return jtag_execute_queue();
248 static void xscale_getbuf(jtag_callback_data_t arg)
250 uint8_t *in = (uint8_t *)arg;
251 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
254 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
256 if (num_words == 0)
257 return ERROR_INVALID_ARGUMENTS;
259 int retval = ERROR_OK;
260 tap_state_t path[3];
261 struct scan_field fields[3];
262 uint8_t *field0 = malloc(num_words * 1);
263 uint8_t field0_check_value = 0x2;
264 uint8_t field0_check_mask = 0x6;
265 uint32_t *field1 = malloc(num_words * 4);
266 uint8_t field2_check_value = 0x0;
267 uint8_t field2_check_mask = 0x1;
268 int words_done = 0;
269 int words_scheduled = 0;
270 int i;
272 path[0] = TAP_DRSELECT;
273 path[1] = TAP_DRCAPTURE;
274 path[2] = TAP_DRSHIFT;
276 memset(&fields, 0, sizeof fields);
278 fields[0].tap = target->tap;
279 fields[0].num_bits = 3;
280 fields[0].check_value = &field0_check_value;
281 fields[0].check_mask = &field0_check_mask;
283 fields[1].tap = target->tap;
284 fields[1].num_bits = 32;
286 fields[2].tap = target->tap;
287 fields[2].num_bits = 1;
288 fields[2].check_value = &field2_check_value;
289 fields[2].check_mask = &field2_check_mask;
291 jtag_set_end_state(TAP_IDLE);
292 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
293 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
295 /* repeat until all words have been collected */
296 int attempts = 0;
297 while (words_done < num_words)
299 /* schedule reads */
300 words_scheduled = 0;
301 for (i = words_done; i < num_words; i++)
303 fields[0].in_value = &field0[i];
305 jtag_add_pathmove(3, path);
307 fields[1].in_value = (uint8_t *)(field1 + i);
309 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
311 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
313 words_scheduled++;
316 if ((retval = jtag_execute_queue()) != ERROR_OK)
318 LOG_ERROR("JTAG error while receiving data from debug handler");
319 break;
322 /* examine results */
323 for (i = words_done; i < num_words; i++)
325 if (!(field0[0] & 1))
327 /* move backwards if necessary */
328 int j;
329 for (j = i; j < num_words - 1; j++)
331 field0[j] = field0[j + 1];
332 field1[j] = field1[j + 1];
334 words_scheduled--;
337 if (words_scheduled == 0)
339 if (attempts++==1000)
341 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
342 retval = ERROR_TARGET_TIMEOUT;
343 break;
347 words_done += words_scheduled;
350 for (i = 0; i < num_words; i++)
351 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
353 free(field1);
355 return retval;
358 static int xscale_read_tx(struct target *target, int consume)
360 struct xscale_common *xscale = target_to_xscale(target);
361 tap_state_t path[3];
362 tap_state_t noconsume_path[6];
363 int retval;
364 struct timeval timeout, now;
365 struct scan_field fields[3];
366 uint8_t field0_in = 0x0;
367 uint8_t field0_check_value = 0x2;
368 uint8_t field0_check_mask = 0x6;
369 uint8_t field2_check_value = 0x0;
370 uint8_t field2_check_mask = 0x1;
372 jtag_set_end_state(TAP_IDLE);
374 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
376 path[0] = TAP_DRSELECT;
377 path[1] = TAP_DRCAPTURE;
378 path[2] = TAP_DRSHIFT;
380 noconsume_path[0] = TAP_DRSELECT;
381 noconsume_path[1] = TAP_DRCAPTURE;
382 noconsume_path[2] = TAP_DREXIT1;
383 noconsume_path[3] = TAP_DRPAUSE;
384 noconsume_path[4] = TAP_DREXIT2;
385 noconsume_path[5] = TAP_DRSHIFT;
387 memset(&fields, 0, sizeof fields);
389 fields[0].tap = target->tap;
390 fields[0].num_bits = 3;
391 fields[0].in_value = &field0_in;
393 fields[1].tap = target->tap;
394 fields[1].num_bits = 32;
395 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
397 fields[2].tap = target->tap;
398 fields[2].num_bits = 1;
399 uint8_t tmp;
400 fields[2].in_value = &tmp;
402 gettimeofday(&timeout, NULL);
403 timeval_add_time(&timeout, 1, 0);
405 for (;;)
407 /* if we want to consume the register content (i.e. clear TX_READY),
408 * we have to go straight from Capture-DR to Shift-DR
409 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
411 if (consume)
412 jtag_add_pathmove(3, path);
413 else
415 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
418 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
420 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
421 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
423 if ((retval = jtag_execute_queue()) != ERROR_OK)
425 LOG_ERROR("JTAG error while reading TX");
426 return ERROR_TARGET_TIMEOUT;
429 gettimeofday(&now, NULL);
430 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
432 LOG_ERROR("time out reading TX register");
433 return ERROR_TARGET_TIMEOUT;
435 if (!((!(field0_in & 1)) && consume))
437 goto done;
439 if (debug_level >= 3)
441 LOG_DEBUG("waiting 100ms");
442 alive_sleep(100); /* avoid flooding the logs */
443 } else
445 keep_alive();
448 done:
450 if (!(field0_in & 1))
451 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
453 return ERROR_OK;
456 static int xscale_write_rx(struct target *target)
458 struct xscale_common *xscale = target_to_xscale(target);
459 int retval;
460 struct timeval timeout, now;
461 struct scan_field fields[3];
462 uint8_t field0_out = 0x0;
463 uint8_t field0_in = 0x0;
464 uint8_t field0_check_value = 0x2;
465 uint8_t field0_check_mask = 0x6;
466 uint8_t field2 = 0x0;
467 uint8_t field2_check_value = 0x0;
468 uint8_t field2_check_mask = 0x1;
470 jtag_set_end_state(TAP_IDLE);
472 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
474 memset(&fields, 0, sizeof fields);
476 fields[0].tap = target->tap;
477 fields[0].num_bits = 3;
478 fields[0].out_value = &field0_out;
479 fields[0].in_value = &field0_in;
481 fields[1].tap = target->tap;
482 fields[1].num_bits = 32;
483 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
485 fields[2].tap = target->tap;
486 fields[2].num_bits = 1;
487 fields[2].out_value = &field2;
488 uint8_t tmp;
489 fields[2].in_value = &tmp;
491 gettimeofday(&timeout, NULL);
492 timeval_add_time(&timeout, 1, 0);
494 /* poll until rx_read is low */
495 LOG_DEBUG("polling RX");
496 for (;;)
498 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
500 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
501 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
503 if ((retval = jtag_execute_queue()) != ERROR_OK)
505 LOG_ERROR("JTAG error while writing RX");
506 return retval;
509 gettimeofday(&now, NULL);
510 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
512 LOG_ERROR("time out writing RX register");
513 return ERROR_TARGET_TIMEOUT;
515 if (!(field0_in & 1))
516 goto done;
517 if (debug_level >= 3)
519 LOG_DEBUG("waiting 100ms");
520 alive_sleep(100); /* avoid flooding the logs */
521 } else
523 keep_alive();
526 done:
528 /* set rx_valid */
529 field2 = 0x1;
530 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
532 if ((retval = jtag_execute_queue()) != ERROR_OK)
534 LOG_ERROR("JTAG error while writing RX");
535 return retval;
538 return ERROR_OK;
541 /* send count elements of size byte to the debug handler */
542 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
544 uint32_t t[3];
545 int bits[3];
546 int retval;
547 int done_count = 0;
549 jtag_set_end_state(TAP_IDLE);
551 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
553 bits[0]=3;
554 t[0]=0;
555 bits[1]=32;
556 t[2]=1;
557 bits[2]=1;
558 int endianness = target->endianness;
559 while (done_count++ < count)
561 switch (size)
563 case 4:
564 if (endianness == TARGET_LITTLE_ENDIAN)
566 t[1]=le_to_h_u32(buffer);
567 } else
569 t[1]=be_to_h_u32(buffer);
571 break;
572 case 2:
573 if (endianness == TARGET_LITTLE_ENDIAN)
575 t[1]=le_to_h_u16(buffer);
576 } else
578 t[1]=be_to_h_u16(buffer);
580 break;
581 case 1:
582 t[1]=buffer[0];
583 break;
584 default:
585 LOG_ERROR("BUG: size neither 4, 2 nor 1");
586 return ERROR_INVALID_ARGUMENTS;
588 jtag_add_dr_out(target->tap,
590 bits,
592 jtag_set_end_state(TAP_IDLE));
593 buffer += size;
596 if ((retval = jtag_execute_queue()) != ERROR_OK)
598 LOG_ERROR("JTAG error while sending data to debug handler");
599 return retval;
602 return ERROR_OK;
605 static int xscale_send_u32(struct target *target, uint32_t value)
607 struct xscale_common *xscale = target_to_xscale(target);
609 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
610 return xscale_write_rx(target);
613 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
615 struct xscale_common *xscale = target_to_xscale(target);
616 int retval;
617 struct scan_field fields[3];
618 uint8_t field0 = 0x0;
619 uint8_t field0_check_value = 0x2;
620 uint8_t field0_check_mask = 0x7;
621 uint8_t field2 = 0x0;
622 uint8_t field2_check_value = 0x0;
623 uint8_t field2_check_mask = 0x1;
625 if (hold_rst != -1)
626 xscale->hold_rst = hold_rst;
628 if (ext_dbg_brk != -1)
629 xscale->external_debug_break = ext_dbg_brk;
631 jtag_set_end_state(TAP_IDLE);
632 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
634 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
635 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
637 memset(&fields, 0, sizeof fields);
639 fields[0].tap = target->tap;
640 fields[0].num_bits = 3;
641 fields[0].out_value = &field0;
642 uint8_t tmp;
643 fields[0].in_value = &tmp;
645 fields[1].tap = target->tap;
646 fields[1].num_bits = 32;
647 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
649 fields[2].tap = target->tap;
650 fields[2].num_bits = 1;
651 fields[2].out_value = &field2;
652 uint8_t tmp2;
653 fields[2].in_value = &tmp2;
655 jtag_add_dr_scan(3, fields, jtag_get_end_state());
657 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
658 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
660 if ((retval = jtag_execute_queue()) != ERROR_OK)
662 LOG_ERROR("JTAG error while writing DCSR");
663 return retval;
666 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
667 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
669 return ERROR_OK;
672 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
673 static unsigned int parity (unsigned int v)
675 // unsigned int ov = v;
676 v ^= v >> 16;
677 v ^= v >> 8;
678 v ^= v >> 4;
679 v &= 0xf;
680 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
681 return (0x6996 >> v) & 1;
684 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
686 uint8_t packet[4];
687 uint8_t cmd;
688 int word;
689 struct scan_field fields[2];
691 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
693 /* LDIC into IR */
694 jtag_set_end_state(TAP_IDLE);
695 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
697 /* CMD is b011 to load a cacheline into the Mini ICache.
698 * Loading into the main ICache is deprecated, and unused.
699 * It's followed by three zero bits, and 27 address bits.
701 buf_set_u32(&cmd, 0, 6, 0x3);
703 /* virtual address of desired cache line */
704 buf_set_u32(packet, 0, 27, va >> 5);
706 memset(&fields, 0, sizeof fields);
708 fields[0].tap = target->tap;
709 fields[0].num_bits = 6;
710 fields[0].out_value = &cmd;
712 fields[1].tap = target->tap;
713 fields[1].num_bits = 27;
714 fields[1].out_value = packet;
716 jtag_add_dr_scan(2, fields, jtag_get_end_state());
718 /* rest of packet is a cacheline: 8 instructions, with parity */
719 fields[0].num_bits = 32;
720 fields[0].out_value = packet;
722 fields[1].num_bits = 1;
723 fields[1].out_value = &cmd;
725 for (word = 0; word < 8; word++)
727 buf_set_u32(packet, 0, 32, buffer[word]);
729 uint32_t value;
730 memcpy(&value, packet, sizeof(uint32_t));
731 cmd = parity(value);
733 jtag_add_dr_scan(2, fields, jtag_get_end_state());
736 return jtag_execute_queue();
739 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
741 uint8_t packet[4];
742 uint8_t cmd;
743 struct scan_field fields[2];
745 jtag_set_end_state(TAP_IDLE);
746 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
748 /* CMD for invalidate IC line b000, bits [6:4] b000 */
749 buf_set_u32(&cmd, 0, 6, 0x0);
751 /* virtual address of desired cache line */
752 buf_set_u32(packet, 0, 27, va >> 5);
754 memset(&fields, 0, sizeof fields);
756 fields[0].tap = target->tap;
757 fields[0].num_bits = 6;
758 fields[0].out_value = &cmd;
760 fields[1].tap = target->tap;
761 fields[1].num_bits = 27;
762 fields[1].out_value = packet;
764 jtag_add_dr_scan(2, fields, jtag_get_end_state());
766 return ERROR_OK;
769 static int xscale_update_vectors(struct target *target)
771 struct xscale_common *xscale = target_to_xscale(target);
772 int i;
773 int retval;
775 uint32_t low_reset_branch, high_reset_branch;
777 for (i = 1; i < 8; i++)
779 /* if there's a static vector specified for this exception, override */
780 if (xscale->static_high_vectors_set & (1 << i))
782 xscale->high_vectors[i] = xscale->static_high_vectors[i];
784 else
786 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
787 if (retval == ERROR_TARGET_TIMEOUT)
788 return retval;
789 if (retval != ERROR_OK)
791 /* Some of these reads will fail as part of normal execution */
792 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
797 for (i = 1; i < 8; i++)
799 if (xscale->static_low_vectors_set & (1 << i))
801 xscale->low_vectors[i] = xscale->static_low_vectors[i];
803 else
805 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
806 if (retval == ERROR_TARGET_TIMEOUT)
807 return retval;
808 if (retval != ERROR_OK)
810 /* Some of these reads will fail as part of normal execution */
811 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
816 /* calculate branches to debug handler */
817 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
818 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
820 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
821 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
823 /* invalidate and load exception vectors in mini i-cache */
824 xscale_invalidate_ic_line(target, 0x0);
825 xscale_invalidate_ic_line(target, 0xffff0000);
827 xscale_load_ic(target, 0x0, xscale->low_vectors);
828 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
830 return ERROR_OK;
833 static int xscale_arch_state(struct target *target)
835 struct xscale_common *xscale = target_to_xscale(target);
836 struct arm *armv4_5 = &xscale->armv4_5_common;
838 static const char *state[] =
840 "disabled", "enabled"
843 static const char *arch_dbg_reason[] =
845 "", "\n(processor reset)", "\n(trace buffer full)"
848 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
850 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
851 return ERROR_INVALID_ARGUMENTS;
854 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
855 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
856 "MMU: %s, D-Cache: %s, I-Cache: %s"
857 "%s",
858 armv4_5_state_strings[armv4_5->core_state],
859 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name ,
860 arm_mode_name(armv4_5->core_mode),
861 buf_get_u32(armv4_5->cpsr->value, 0, 32),
862 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
863 state[xscale->armv4_5_mmu.mmu_enabled],
864 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
865 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
866 arch_dbg_reason[xscale->arch_debug_reason]);
868 return ERROR_OK;
871 static int xscale_poll(struct target *target)
873 int retval = ERROR_OK;
875 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
877 enum target_state previous_state = target->state;
878 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
881 /* there's data to read from the tx register, we entered debug state */
882 target->state = TARGET_HALTED;
884 /* process debug entry, fetching current mode regs */
885 retval = xscale_debug_entry(target);
887 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
889 LOG_USER("error while polling TX register, reset CPU");
890 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
891 target->state = TARGET_HALTED;
894 /* debug_entry could have overwritten target state (i.e. immediate resume)
895 * don't signal event handlers in that case
897 if (target->state != TARGET_HALTED)
898 return ERROR_OK;
900 /* if target was running, signal that we halted
901 * otherwise we reentered from debug execution */
902 if (previous_state == TARGET_RUNNING)
903 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
904 else
905 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
908 return retval;
911 static int xscale_debug_entry(struct target *target)
913 struct xscale_common *xscale = target_to_xscale(target);
914 struct arm *armv4_5 = &xscale->armv4_5_common;
915 uint32_t pc;
916 uint32_t buffer[10];
917 int i;
918 int retval;
919 uint32_t moe;
921 /* clear external dbg break (will be written on next DCSR read) */
922 xscale->external_debug_break = 0;
923 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
924 return retval;
926 /* get r0, pc, r1 to r7 and cpsr */
927 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
928 return retval;
930 /* move r0 from buffer to register cache */
931 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
932 armv4_5->core_cache->reg_list[0].dirty = 1;
933 armv4_5->core_cache->reg_list[0].valid = 1;
934 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
936 /* move pc from buffer to register cache */
937 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
938 armv4_5->core_cache->reg_list[15].dirty = 1;
939 armv4_5->core_cache->reg_list[15].valid = 1;
940 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
942 /* move data from buffer to register cache */
943 for (i = 1; i <= 7; i++)
945 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
946 armv4_5->core_cache->reg_list[i].dirty = 1;
947 armv4_5->core_cache->reg_list[i].valid = 1;
948 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
951 arm_set_cpsr(armv4_5, buffer[9]);
952 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
954 if (!is_arm_mode(armv4_5->core_mode))
956 target->state = TARGET_UNKNOWN;
957 LOG_ERROR("cpsr contains invalid mode value - communication failure");
958 return ERROR_TARGET_FAILURE;
960 LOG_DEBUG("target entered debug state in %s mode",
961 arm_mode_name(armv4_5->core_mode));
963 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
964 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
966 xscale_receive(target, buffer, 8);
967 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
968 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
969 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
971 else
973 /* r8 to r14, but no spsr */
974 xscale_receive(target, buffer, 7);
977 /* move data from buffer to register cache */
978 for (i = 8; i <= 14; i++)
980 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
981 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
982 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
985 /* examine debug reason */
986 xscale_read_dcsr(target);
987 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
989 /* stored PC (for calculating fixup) */
990 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
992 switch (moe)
994 case 0x0: /* Processor reset */
995 target->debug_reason = DBG_REASON_DBGRQ;
996 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
997 pc -= 4;
998 break;
999 case 0x1: /* Instruction breakpoint hit */
1000 target->debug_reason = DBG_REASON_BREAKPOINT;
1001 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1002 pc -= 4;
1003 break;
1004 case 0x2: /* Data breakpoint hit */
1005 target->debug_reason = DBG_REASON_WATCHPOINT;
1006 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1007 pc -= 4;
1008 break;
1009 case 0x3: /* BKPT instruction executed */
1010 target->debug_reason = DBG_REASON_BREAKPOINT;
1011 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1012 pc -= 4;
1013 break;
1014 case 0x4: /* Ext. debug event */
1015 target->debug_reason = DBG_REASON_DBGRQ;
1016 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1017 pc -= 4;
1018 break;
1019 case 0x5: /* Vector trap occured */
1020 target->debug_reason = DBG_REASON_BREAKPOINT;
1021 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1022 pc -= 4;
1023 break;
1024 case 0x6: /* Trace buffer full break */
1025 target->debug_reason = DBG_REASON_DBGRQ;
1026 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1027 pc -= 4;
1028 break;
1029 case 0x7: /* Reserved (may flag Hot-Debug support) */
1030 default:
1031 LOG_ERROR("Method of Entry is 'Reserved'");
1032 exit(-1);
1033 break;
1036 /* apply PC fixup */
1037 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1039 /* on the first debug entry, identify cache type */
1040 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1042 uint32_t cache_type_reg;
1044 /* read cp15 cache type register */
1045 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1046 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1048 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1051 /* examine MMU and Cache settings */
1052 /* read cp15 control register */
1053 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1054 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1055 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1056 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1057 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1059 /* tracing enabled, read collected trace data */
1060 if (xscale->trace.buffer_enabled)
1062 xscale_read_trace(target);
1063 xscale->trace.buffer_fill--;
1065 /* resume if we're still collecting trace data */
1066 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1067 && (xscale->trace.buffer_fill > 0))
1069 xscale_resume(target, 1, 0x0, 1, 0);
1071 else
1073 xscale->trace.buffer_enabled = 0;
1077 return ERROR_OK;
1080 static int xscale_halt(struct target *target)
1082 struct xscale_common *xscale = target_to_xscale(target);
1084 LOG_DEBUG("target->state: %s",
1085 target_state_name(target));
1087 if (target->state == TARGET_HALTED)
1089 LOG_DEBUG("target was already halted");
1090 return ERROR_OK;
1092 else if (target->state == TARGET_UNKNOWN)
1094 /* this must not happen for a xscale target */
1095 LOG_ERROR("target was in unknown state when halt was requested");
1096 return ERROR_TARGET_INVALID;
1098 else if (target->state == TARGET_RESET)
1100 LOG_DEBUG("target->state == TARGET_RESET");
1102 else
1104 /* assert external dbg break */
1105 xscale->external_debug_break = 1;
1106 xscale_read_dcsr(target);
1108 target->debug_reason = DBG_REASON_DBGRQ;
1111 return ERROR_OK;
1114 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1116 struct xscale_common *xscale = target_to_xscale(target);
1117 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1118 int retval;
1120 if (xscale->ibcr0_used)
1122 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1124 if (ibcr0_bp)
1126 xscale_unset_breakpoint(target, ibcr0_bp);
1128 else
1130 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1131 exit(-1);
1135 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1136 return retval;
1138 return ERROR_OK;
1141 static int xscale_disable_single_step(struct target *target)
1143 struct xscale_common *xscale = target_to_xscale(target);
1144 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1145 int retval;
1147 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1148 return retval;
1150 return ERROR_OK;
1153 static void xscale_enable_watchpoints(struct target *target)
1155 struct watchpoint *watchpoint = target->watchpoints;
1157 while (watchpoint)
1159 if (watchpoint->set == 0)
1160 xscale_set_watchpoint(target, watchpoint);
1161 watchpoint = watchpoint->next;
1165 static void xscale_enable_breakpoints(struct target *target)
1167 struct breakpoint *breakpoint = target->breakpoints;
1169 /* set any pending breakpoints */
1170 while (breakpoint)
1172 if (breakpoint->set == 0)
1173 xscale_set_breakpoint(target, breakpoint);
1174 breakpoint = breakpoint->next;
1178 static int xscale_resume(struct target *target, int current,
1179 uint32_t address, int handle_breakpoints, int debug_execution)
1181 struct xscale_common *xscale = target_to_xscale(target);
1182 struct arm *armv4_5 = &xscale->armv4_5_common;
1183 struct breakpoint *breakpoint = target->breakpoints;
1184 uint32_t current_pc;
1185 int retval;
1186 int i;
1188 LOG_DEBUG("-");
1190 if (target->state != TARGET_HALTED)
1192 LOG_WARNING("target not halted");
1193 return ERROR_TARGET_NOT_HALTED;
1196 if (!debug_execution)
1198 target_free_all_working_areas(target);
1201 /* update vector tables */
1202 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1203 return retval;
1205 /* current = 1: continue on current pc, otherwise continue at <address> */
1206 if (!current)
1207 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1209 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1211 /* if we're at the reset vector, we have to simulate the branch */
1212 if (current_pc == 0x0)
1214 arm_simulate_step(target, NULL);
1215 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1218 /* the front-end may request us not to handle breakpoints */
1219 if (handle_breakpoints)
1221 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1223 uint32_t next_pc;
1225 /* there's a breakpoint at the current PC, we have to step over it */
1226 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1227 xscale_unset_breakpoint(target, breakpoint);
1229 /* calculate PC of next instruction */
1230 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1232 uint32_t current_opcode;
1233 target_read_u32(target, current_pc, &current_opcode);
1234 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1237 LOG_DEBUG("enable single-step");
1238 xscale_enable_single_step(target, next_pc);
1240 /* restore banked registers */
1241 xscale_restore_context(target);
1243 /* send resume request (command 0x30 or 0x31)
1244 * clean the trace buffer if it is to be enabled (0x62) */
1245 if (xscale->trace.buffer_enabled)
1247 xscale_send_u32(target, 0x62);
1248 xscale_send_u32(target, 0x31);
1250 else
1251 xscale_send_u32(target, 0x30);
1253 /* send CPSR */
1254 xscale_send_u32(target,
1255 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1256 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1257 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1259 for (i = 7; i >= 0; i--)
1261 /* send register */
1262 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1263 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1266 /* send PC */
1267 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1268 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1270 /* wait for and process debug entry */
1271 xscale_debug_entry(target);
1273 LOG_DEBUG("disable single-step");
1274 xscale_disable_single_step(target);
1276 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1277 xscale_set_breakpoint(target, breakpoint);
1281 /* enable any pending breakpoints and watchpoints */
1282 xscale_enable_breakpoints(target);
1283 xscale_enable_watchpoints(target);
1285 /* restore banked registers */
1286 xscale_restore_context(target);
1288 /* send resume request (command 0x30 or 0x31)
1289 * clean the trace buffer if it is to be enabled (0x62) */
1290 if (xscale->trace.buffer_enabled)
1292 xscale_send_u32(target, 0x62);
1293 xscale_send_u32(target, 0x31);
1295 else
1296 xscale_send_u32(target, 0x30);
1298 /* send CPSR */
1299 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1300 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1301 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1303 for (i = 7; i >= 0; i--)
1305 /* send register */
1306 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1307 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1310 /* send PC */
1311 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1312 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1314 target->debug_reason = DBG_REASON_NOTHALTED;
1316 if (!debug_execution)
1318 /* registers are now invalid */
1319 register_cache_invalidate(armv4_5->core_cache);
1320 target->state = TARGET_RUNNING;
1321 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1323 else
1325 target->state = TARGET_DEBUG_RUNNING;
1326 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1329 LOG_DEBUG("target resumed");
1331 return ERROR_OK;
1334 static int xscale_step_inner(struct target *target, int current,
1335 uint32_t address, int handle_breakpoints)
1337 struct xscale_common *xscale = target_to_xscale(target);
1338 struct arm *armv4_5 = &xscale->armv4_5_common;
1339 uint32_t next_pc;
1340 int retval;
1341 int i;
1343 target->debug_reason = DBG_REASON_SINGLESTEP;
1345 /* calculate PC of next instruction */
1346 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1348 uint32_t current_opcode, current_pc;
1349 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1351 target_read_u32(target, current_pc, &current_opcode);
1352 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1353 return retval;
1356 LOG_DEBUG("enable single-step");
1357 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1358 return retval;
1360 /* restore banked registers */
1361 if ((retval = xscale_restore_context(target)) != ERROR_OK)
1362 return retval;
1364 /* send resume request (command 0x30 or 0x31)
1365 * clean the trace buffer if it is to be enabled (0x62) */
1366 if (xscale->trace.buffer_enabled)
1368 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1369 return retval;
1370 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1371 return retval;
1373 else
1374 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1375 return retval;
1377 /* send CPSR */
1378 retval = xscale_send_u32(target,
1379 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1380 if (retval != ERROR_OK)
1381 return retval;
1382 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1383 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1385 for (i = 7; i >= 0; i--)
1387 /* send register */
1388 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1389 return retval;
1390 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1393 /* send PC */
1394 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
1395 return retval;
1396 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1398 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1400 /* registers are now invalid */
1401 register_cache_invalidate(armv4_5->core_cache);
1403 /* wait for and process debug entry */
1404 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1405 return retval;
1407 LOG_DEBUG("disable single-step");
1408 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1409 return retval;
1411 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1413 return ERROR_OK;
1416 static int xscale_step(struct target *target, int current,
1417 uint32_t address, int handle_breakpoints)
1419 struct arm *armv4_5 = target_to_armv4_5(target);
1420 struct breakpoint *breakpoint = target->breakpoints;
1422 uint32_t current_pc;
1423 int retval;
1425 if (target->state != TARGET_HALTED)
1427 LOG_WARNING("target not halted");
1428 return ERROR_TARGET_NOT_HALTED;
1431 /* current = 1: continue on current pc, otherwise continue at <address> */
1432 if (!current)
1433 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1435 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1437 /* if we're at the reset vector, we have to simulate the step */
1438 if (current_pc == 0x0)
1440 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1441 return retval;
1442 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1444 target->debug_reason = DBG_REASON_SINGLESTEP;
1445 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1447 return ERROR_OK;
1450 /* the front-end may request us not to handle breakpoints */
1451 if (handle_breakpoints)
1452 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1454 if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
1455 return retval;
1458 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1460 if (breakpoint)
1462 xscale_set_breakpoint(target, breakpoint);
1465 LOG_DEBUG("target stepped");
1467 return ERROR_OK;
1471 static int xscale_assert_reset(struct target *target)
1473 struct xscale_common *xscale = target_to_xscale(target);
1475 LOG_DEBUG("target->state: %s",
1476 target_state_name(target));
1478 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1479 * end up in T-L-R, which would reset JTAG
1481 jtag_set_end_state(TAP_IDLE);
1482 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
1484 /* set Hold reset, Halt mode and Trap Reset */
1485 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1486 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1487 xscale_write_dcsr(target, 1, 0);
1489 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1490 xscale_jtag_set_instr(target->tap, 0x7f);
1491 jtag_execute_queue();
1493 /* assert reset */
1494 jtag_add_reset(0, 1);
1496 /* sleep 1ms, to be sure we fulfill any requirements */
1497 jtag_add_sleep(1000);
1498 jtag_execute_queue();
1500 target->state = TARGET_RESET;
1502 if (target->reset_halt)
1504 int retval;
1505 if ((retval = target_halt(target)) != ERROR_OK)
1506 return retval;
1509 return ERROR_OK;
1512 static int xscale_deassert_reset(struct target *target)
1514 struct xscale_common *xscale = target_to_xscale(target);
1515 struct breakpoint *breakpoint = target->breakpoints;
1517 LOG_DEBUG("-");
1519 xscale->ibcr_available = 2;
1520 xscale->ibcr0_used = 0;
1521 xscale->ibcr1_used = 0;
1523 xscale->dbr_available = 2;
1524 xscale->dbr0_used = 0;
1525 xscale->dbr1_used = 0;
1527 /* mark all hardware breakpoints as unset */
1528 while (breakpoint)
1530 if (breakpoint->type == BKPT_HARD)
1532 breakpoint->set = 0;
1534 breakpoint = breakpoint->next;
1537 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1539 /* FIXME mark hardware watchpoints got unset too. Also,
1540 * at least some of the XScale registers are invalid...
1544 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1545 * contents got invalidated. Safer to force that, so writing new
1546 * contents can't ever fail..
1549 uint32_t address;
1550 unsigned buf_cnt;
1551 const uint8_t *buffer = xscale_debug_handler;
1552 int retval;
1554 /* release SRST */
1555 jtag_add_reset(0, 0);
1557 /* wait 300ms; 150 and 100ms were not enough */
1558 jtag_add_sleep(300*1000);
1560 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1561 jtag_execute_queue();
1563 /* set Hold reset, Halt mode and Trap Reset */
1564 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1565 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1566 xscale_write_dcsr(target, 1, 0);
1568 /* Load the debug handler into the mini-icache. Since
1569 * it's using halt mode (not monitor mode), it runs in
1570 * "Special Debug State" for access to registers, memory,
1571 * coprocessors, trace data, etc.
1573 address = xscale->handler_address;
1574 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1575 binary_size > 0;
1576 binary_size -= buf_cnt, buffer += buf_cnt)
1578 uint32_t cache_line[8];
1579 unsigned i;
1581 buf_cnt = binary_size;
1582 if (buf_cnt > 32)
1583 buf_cnt = 32;
1585 for (i = 0; i < buf_cnt; i += 4)
1587 /* convert LE buffer to host-endian uint32_t */
1588 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1591 for (; i < 32; i += 4)
1593 cache_line[i / 4] = 0xe1a08008;
1596 /* only load addresses other than the reset vectors */
1597 if ((address % 0x400) != 0x0)
1599 retval = xscale_load_ic(target, address,
1600 cache_line);
1601 if (retval != ERROR_OK)
1602 return retval;
1605 address += buf_cnt;
1608 retval = xscale_load_ic(target, 0x0,
1609 xscale->low_vectors);
1610 if (retval != ERROR_OK)
1611 return retval;
1612 retval = xscale_load_ic(target, 0xffff0000,
1613 xscale->high_vectors);
1614 if (retval != ERROR_OK)
1615 return retval;
1617 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1619 jtag_add_sleep(100000);
1621 /* set Hold reset, Halt mode and Trap Reset */
1622 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1623 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1624 xscale_write_dcsr(target, 1, 0);
1626 /* clear Hold reset to let the target run (should enter debug handler) */
1627 xscale_write_dcsr(target, 0, 1);
1628 target->state = TARGET_RUNNING;
1630 if (!target->reset_halt)
1632 jtag_add_sleep(10000);
1634 /* we should have entered debug now */
1635 xscale_debug_entry(target);
1636 target->state = TARGET_HALTED;
1638 /* resume the target */
1639 xscale_resume(target, 1, 0x0, 1, 0);
1643 return ERROR_OK;
1646 static int xscale_read_core_reg(struct target *target, struct reg *r,
1647 int num, enum armv4_5_mode mode)
1649 /** \todo add debug handler support for core register reads */
1650 LOG_ERROR("not implemented");
1651 return ERROR_OK;
1654 static int xscale_write_core_reg(struct target *target, struct reg *r,
1655 int num, enum armv4_5_mode mode, uint32_t value)
1657 /** \todo add debug handler support for core register writes */
1658 LOG_ERROR("not implemented");
1659 return ERROR_OK;
1662 static int xscale_full_context(struct target *target)
1664 struct arm *armv4_5 = target_to_armv4_5(target);
1666 uint32_t *buffer;
1668 int i, j;
1670 LOG_DEBUG("-");
1672 if (target->state != TARGET_HALTED)
1674 LOG_WARNING("target not halted");
1675 return ERROR_TARGET_NOT_HALTED;
1678 buffer = malloc(4 * 8);
1680 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1681 * we can't enter User mode on an XScale (unpredictable),
1682 * but User shares registers with SYS
1684 for (i = 1; i < 7; i++)
1686 int valid = 1;
1688 /* check if there are invalid registers in the current mode
1690 for (j = 0; j <= 16; j++)
1692 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1693 valid = 0;
1696 if (!valid)
1698 uint32_t tmp_cpsr;
1700 /* request banked registers */
1701 xscale_send_u32(target, 0x0);
1703 tmp_cpsr = 0x0;
1704 tmp_cpsr |= armv4_5_number_to_mode(i);
1705 tmp_cpsr |= 0xc0; /* I/F bits */
1707 /* send CPSR for desired mode */
1708 xscale_send_u32(target, tmp_cpsr);
1710 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1711 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1713 xscale_receive(target, buffer, 8);
1714 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1715 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1716 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1718 else
1720 xscale_receive(target, buffer, 7);
1723 /* move data from buffer to register cache */
1724 for (j = 8; j <= 14; j++)
1726 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1727 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1728 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1733 free(buffer);
1735 return ERROR_OK;
1738 static int xscale_restore_context(struct target *target)
1740 struct arm *armv4_5 = target_to_armv4_5(target);
1742 int i, j;
1744 if (target->state != TARGET_HALTED)
1746 LOG_WARNING("target not halted");
1747 return ERROR_TARGET_NOT_HALTED;
1750 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1751 * we can't enter User mode on an XScale (unpredictable),
1752 * but User shares registers with SYS
1754 for (i = 1; i < 7; i++)
1756 int dirty = 0;
1758 /* check if there are invalid registers in the current mode
1760 for (j = 8; j <= 14; j++)
1762 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1763 dirty = 1;
1766 /* if not USR/SYS, check if the SPSR needs to be written */
1767 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1769 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1770 dirty = 1;
1773 if (dirty)
1775 uint32_t tmp_cpsr;
1777 /* send banked registers */
1778 xscale_send_u32(target, 0x1);
1780 tmp_cpsr = 0x0;
1781 tmp_cpsr |= armv4_5_number_to_mode(i);
1782 tmp_cpsr |= 0xc0; /* I/F bits */
1784 /* send CPSR for desired mode */
1785 xscale_send_u32(target, tmp_cpsr);
1787 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1788 for (j = 8; j <= 14; j++)
1790 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1791 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1794 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1796 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1797 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1802 return ERROR_OK;
1805 static int xscale_read_memory(struct target *target, uint32_t address,
1806 uint32_t size, uint32_t count, uint8_t *buffer)
1808 struct xscale_common *xscale = target_to_xscale(target);
1809 uint32_t *buf32;
1810 uint32_t i;
1811 int retval;
1813 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1815 if (target->state != TARGET_HALTED)
1817 LOG_WARNING("target not halted");
1818 return ERROR_TARGET_NOT_HALTED;
1821 /* sanitize arguments */
1822 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1823 return ERROR_INVALID_ARGUMENTS;
1825 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1826 return ERROR_TARGET_UNALIGNED_ACCESS;
1828 /* send memory read request (command 0x1n, n: access size) */
1829 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1830 return retval;
1832 /* send base address for read request */
1833 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1834 return retval;
1836 /* send number of requested data words */
1837 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1838 return retval;
1840 /* receive data from target (count times 32-bit words in host endianness) */
1841 buf32 = malloc(4 * count);
1842 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1843 return retval;
1845 /* extract data from host-endian buffer into byte stream */
1846 for (i = 0; i < count; i++)
1848 switch (size)
1850 case 4:
1851 target_buffer_set_u32(target, buffer, buf32[i]);
1852 buffer += 4;
1853 break;
1854 case 2:
1855 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1856 buffer += 2;
1857 break;
1858 case 1:
1859 *buffer++ = buf32[i] & 0xff;
1860 break;
1861 default:
1862 LOG_ERROR("invalid read size");
1863 return ERROR_INVALID_ARGUMENTS;
1867 free(buf32);
1869 /* examine DCSR, to see if Sticky Abort (SA) got set */
1870 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1871 return retval;
1872 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1874 /* clear SA bit */
1875 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1876 return retval;
1878 return ERROR_TARGET_DATA_ABORT;
1881 return ERROR_OK;
1884 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1885 uint32_t size, uint32_t count, uint8_t *buffer)
1887 /** \todo: provide a non-stub implementtion of this routine. */
1888 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1889 target_name(target), __func__);
1890 return ERROR_FAIL;
1893 static int xscale_write_memory(struct target *target, uint32_t address,
1894 uint32_t size, uint32_t count, uint8_t *buffer)
1896 struct xscale_common *xscale = target_to_xscale(target);
1897 int retval;
1899 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1901 if (target->state != TARGET_HALTED)
1903 LOG_WARNING("target not halted");
1904 return ERROR_TARGET_NOT_HALTED;
1907 /* sanitize arguments */
1908 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1909 return ERROR_INVALID_ARGUMENTS;
1911 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1912 return ERROR_TARGET_UNALIGNED_ACCESS;
1914 /* send memory write request (command 0x2n, n: access size) */
1915 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1916 return retval;
1918 /* send base address for read request */
1919 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1920 return retval;
1922 /* send number of requested data words to be written*/
1923 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1924 return retval;
1926 /* extract data from host-endian buffer into byte stream */
1927 #if 0
1928 for (i = 0; i < count; i++)
1930 switch (size)
1932 case 4:
1933 value = target_buffer_get_u32(target, buffer);
1934 xscale_send_u32(target, value);
1935 buffer += 4;
1936 break;
1937 case 2:
1938 value = target_buffer_get_u16(target, buffer);
1939 xscale_send_u32(target, value);
1940 buffer += 2;
1941 break;
1942 case 1:
1943 value = *buffer;
1944 xscale_send_u32(target, value);
1945 buffer += 1;
1946 break;
1947 default:
1948 LOG_ERROR("should never get here");
1949 exit(-1);
1952 #endif
1953 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1954 return retval;
1956 /* examine DCSR, to see if Sticky Abort (SA) got set */
1957 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1958 return retval;
1959 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1961 /* clear SA bit */
1962 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1963 return retval;
1965 return ERROR_TARGET_DATA_ABORT;
1968 return ERROR_OK;
1971 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1972 uint32_t size, uint32_t count, uint8_t *buffer)
1974 /** \todo: provide a non-stub implementtion of this routine. */
1975 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1976 target_name(target), __func__);
1977 return ERROR_FAIL;
1980 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
1981 uint32_t count, uint8_t *buffer)
1983 return xscale_write_memory(target, address, 4, count, buffer);
1986 static uint32_t xscale_get_ttb(struct target *target)
1988 struct xscale_common *xscale = target_to_xscale(target);
1989 uint32_t ttb;
1991 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
1992 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
1994 return ttb;
1997 static void xscale_disable_mmu_caches(struct target *target, int mmu,
1998 int d_u_cache, int i_cache)
2000 struct xscale_common *xscale = target_to_xscale(target);
2001 uint32_t cp15_control;
2003 /* read cp15 control register */
2004 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2005 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2007 if (mmu)
2008 cp15_control &= ~0x1U;
2010 if (d_u_cache)
2012 /* clean DCache */
2013 xscale_send_u32(target, 0x50);
2014 xscale_send_u32(target, xscale->cache_clean_address);
2016 /* invalidate DCache */
2017 xscale_send_u32(target, 0x51);
2019 cp15_control &= ~0x4U;
2022 if (i_cache)
2024 /* invalidate ICache */
2025 xscale_send_u32(target, 0x52);
2026 cp15_control &= ~0x1000U;
2029 /* write new cp15 control register */
2030 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2032 /* execute cpwait to ensure outstanding operations complete */
2033 xscale_send_u32(target, 0x53);
2036 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2037 int d_u_cache, int i_cache)
2039 struct xscale_common *xscale = target_to_xscale(target);
2040 uint32_t cp15_control;
2042 /* read cp15 control register */
2043 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2044 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2046 if (mmu)
2047 cp15_control |= 0x1U;
2049 if (d_u_cache)
2050 cp15_control |= 0x4U;
2052 if (i_cache)
2053 cp15_control |= 0x1000U;
2055 /* write new cp15 control register */
2056 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2058 /* execute cpwait to ensure outstanding operations complete */
2059 xscale_send_u32(target, 0x53);
2062 static int xscale_set_breakpoint(struct target *target,
2063 struct breakpoint *breakpoint)
2065 int retval;
2066 struct xscale_common *xscale = target_to_xscale(target);
2068 if (target->state != TARGET_HALTED)
2070 LOG_WARNING("target not halted");
2071 return ERROR_TARGET_NOT_HALTED;
2074 if (breakpoint->set)
2076 LOG_WARNING("breakpoint already set");
2077 return ERROR_OK;
2080 if (breakpoint->type == BKPT_HARD)
2082 uint32_t value = breakpoint->address | 1;
2083 if (!xscale->ibcr0_used)
2085 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2086 xscale->ibcr0_used = 1;
2087 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2089 else if (!xscale->ibcr1_used)
2091 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2092 xscale->ibcr1_used = 1;
2093 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2095 else
2097 LOG_ERROR("BUG: no hardware comparator available");
2098 return ERROR_OK;
2101 else if (breakpoint->type == BKPT_SOFT)
2103 if (breakpoint->length == 4)
2105 /* keep the original instruction in target endianness */
2106 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2108 return retval;
2110 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2111 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2113 return retval;
2116 else
2118 /* keep the original instruction in target endianness */
2119 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2121 return retval;
2123 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2124 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2126 return retval;
2129 breakpoint->set = 1;
2132 return ERROR_OK;
2135 static int xscale_add_breakpoint(struct target *target,
2136 struct breakpoint *breakpoint)
2138 struct xscale_common *xscale = target_to_xscale(target);
2140 if (target->state != TARGET_HALTED)
2142 LOG_WARNING("target not halted");
2143 return ERROR_TARGET_NOT_HALTED;
2146 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2148 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2149 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2152 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2154 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2155 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2158 if (breakpoint->type == BKPT_HARD)
2160 xscale->ibcr_available--;
2163 return ERROR_OK;
2166 static int xscale_unset_breakpoint(struct target *target,
2167 struct breakpoint *breakpoint)
2169 int retval;
2170 struct xscale_common *xscale = target_to_xscale(target);
2172 if (target->state != TARGET_HALTED)
2174 LOG_WARNING("target not halted");
2175 return ERROR_TARGET_NOT_HALTED;
2178 if (!breakpoint->set)
2180 LOG_WARNING("breakpoint not set");
2181 return ERROR_OK;
2184 if (breakpoint->type == BKPT_HARD)
2186 if (breakpoint->set == 1)
2188 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2189 xscale->ibcr0_used = 0;
2191 else if (breakpoint->set == 2)
2193 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2194 xscale->ibcr1_used = 0;
2196 breakpoint->set = 0;
2198 else
2200 /* restore original instruction (kept in target endianness) */
2201 if (breakpoint->length == 4)
2203 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2205 return retval;
2208 else
2210 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2212 return retval;
2215 breakpoint->set = 0;
2218 return ERROR_OK;
2221 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2223 struct xscale_common *xscale = target_to_xscale(target);
2225 if (target->state != TARGET_HALTED)
2227 LOG_WARNING("target not halted");
2228 return ERROR_TARGET_NOT_HALTED;
2231 if (breakpoint->set)
2233 xscale_unset_breakpoint(target, breakpoint);
2236 if (breakpoint->type == BKPT_HARD)
2237 xscale->ibcr_available++;
2239 return ERROR_OK;
2242 static int xscale_set_watchpoint(struct target *target,
2243 struct watchpoint *watchpoint)
2245 struct xscale_common *xscale = target_to_xscale(target);
2246 uint8_t enable = 0;
2247 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2248 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2250 if (target->state != TARGET_HALTED)
2252 LOG_WARNING("target not halted");
2253 return ERROR_TARGET_NOT_HALTED;
2256 xscale_get_reg(dbcon);
2258 switch (watchpoint->rw)
2260 case WPT_READ:
2261 enable = 0x3;
2262 break;
2263 case WPT_ACCESS:
2264 enable = 0x2;
2265 break;
2266 case WPT_WRITE:
2267 enable = 0x1;
2268 break;
2269 default:
2270 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2273 if (!xscale->dbr0_used)
2275 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2276 dbcon_value |= enable;
2277 xscale_set_reg_u32(dbcon, dbcon_value);
2278 watchpoint->set = 1;
2279 xscale->dbr0_used = 1;
2281 else if (!xscale->dbr1_used)
2283 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2284 dbcon_value |= enable << 2;
2285 xscale_set_reg_u32(dbcon, dbcon_value);
2286 watchpoint->set = 2;
2287 xscale->dbr1_used = 1;
2289 else
2291 LOG_ERROR("BUG: no hardware comparator available");
2292 return ERROR_OK;
2295 return ERROR_OK;
2298 static int xscale_add_watchpoint(struct target *target,
2299 struct watchpoint *watchpoint)
2301 struct xscale_common *xscale = target_to_xscale(target);
2303 if (target->state != TARGET_HALTED)
2305 LOG_WARNING("target not halted");
2306 return ERROR_TARGET_NOT_HALTED;
2309 if (xscale->dbr_available < 1)
2311 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2314 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2316 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2319 xscale->dbr_available--;
2321 return ERROR_OK;
2324 static int xscale_unset_watchpoint(struct target *target,
2325 struct watchpoint *watchpoint)
2327 struct xscale_common *xscale = target_to_xscale(target);
2328 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2329 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2331 if (target->state != TARGET_HALTED)
2333 LOG_WARNING("target not halted");
2334 return ERROR_TARGET_NOT_HALTED;
2337 if (!watchpoint->set)
2339 LOG_WARNING("breakpoint not set");
2340 return ERROR_OK;
2343 if (watchpoint->set == 1)
2345 dbcon_value &= ~0x3;
2346 xscale_set_reg_u32(dbcon, dbcon_value);
2347 xscale->dbr0_used = 0;
2349 else if (watchpoint->set == 2)
2351 dbcon_value &= ~0xc;
2352 xscale_set_reg_u32(dbcon, dbcon_value);
2353 xscale->dbr1_used = 0;
2355 watchpoint->set = 0;
2357 return ERROR_OK;
2360 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2362 struct xscale_common *xscale = target_to_xscale(target);
2364 if (target->state != TARGET_HALTED)
2366 LOG_WARNING("target not halted");
2367 return ERROR_TARGET_NOT_HALTED;
2370 if (watchpoint->set)
2372 xscale_unset_watchpoint(target, watchpoint);
2375 xscale->dbr_available++;
2377 return ERROR_OK;
2380 static int xscale_get_reg(struct reg *reg)
2382 struct xscale_reg *arch_info = reg->arch_info;
2383 struct target *target = arch_info->target;
2384 struct xscale_common *xscale = target_to_xscale(target);
2386 /* DCSR, TX and RX are accessible via JTAG */
2387 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2389 return xscale_read_dcsr(arch_info->target);
2391 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2393 /* 1 = consume register content */
2394 return xscale_read_tx(arch_info->target, 1);
2396 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2398 /* can't read from RX register (host -> debug handler) */
2399 return ERROR_OK;
2401 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2403 /* can't (explicitly) read from TXRXCTRL register */
2404 return ERROR_OK;
2406 else /* Other DBG registers have to be transfered by the debug handler */
2408 /* send CP read request (command 0x40) */
2409 xscale_send_u32(target, 0x40);
2411 /* send CP register number */
2412 xscale_send_u32(target, arch_info->dbg_handler_number);
2414 /* read register value */
2415 xscale_read_tx(target, 1);
2416 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2418 reg->dirty = 0;
2419 reg->valid = 1;
2422 return ERROR_OK;
2425 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2427 struct xscale_reg *arch_info = reg->arch_info;
2428 struct target *target = arch_info->target;
2429 struct xscale_common *xscale = target_to_xscale(target);
2430 uint32_t value = buf_get_u32(buf, 0, 32);
2432 /* DCSR, TX and RX are accessible via JTAG */
2433 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2435 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2436 return xscale_write_dcsr(arch_info->target, -1, -1);
2438 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2440 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2441 return xscale_write_rx(arch_info->target);
2443 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2445 /* can't write to TX register (debug-handler -> host) */
2446 return ERROR_OK;
2448 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2450 /* can't (explicitly) write to TXRXCTRL register */
2451 return ERROR_OK;
2453 else /* Other DBG registers have to be transfered by the debug handler */
2455 /* send CP write request (command 0x41) */
2456 xscale_send_u32(target, 0x41);
2458 /* send CP register number */
2459 xscale_send_u32(target, arch_info->dbg_handler_number);
2461 /* send CP register value */
2462 xscale_send_u32(target, value);
2463 buf_set_u32(reg->value, 0, 32, value);
2466 return ERROR_OK;
2469 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2471 struct xscale_common *xscale = target_to_xscale(target);
2472 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2473 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2475 /* send CP write request (command 0x41) */
2476 xscale_send_u32(target, 0x41);
2478 /* send CP register number */
2479 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2481 /* send CP register value */
2482 xscale_send_u32(target, value);
2483 buf_set_u32(dcsr->value, 0, 32, value);
2485 return ERROR_OK;
2488 static int xscale_read_trace(struct target *target)
2490 struct xscale_common *xscale = target_to_xscale(target);
2491 struct arm *armv4_5 = &xscale->armv4_5_common;
2492 struct xscale_trace_data **trace_data_p;
2494 /* 258 words from debug handler
2495 * 256 trace buffer entries
2496 * 2 checkpoint addresses
2498 uint32_t trace_buffer[258];
2499 int is_address[256];
2500 int i, j;
2502 if (target->state != TARGET_HALTED)
2504 LOG_WARNING("target must be stopped to read trace data");
2505 return ERROR_TARGET_NOT_HALTED;
2508 /* send read trace buffer command (command 0x61) */
2509 xscale_send_u32(target, 0x61);
2511 /* receive trace buffer content */
2512 xscale_receive(target, trace_buffer, 258);
2514 /* parse buffer backwards to identify address entries */
2515 for (i = 255; i >= 0; i--)
2517 is_address[i] = 0;
2518 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2519 ((trace_buffer[i] & 0xf0) == 0xd0))
2521 if (i >= 3)
2522 is_address[--i] = 1;
2523 if (i >= 2)
2524 is_address[--i] = 1;
2525 if (i >= 1)
2526 is_address[--i] = 1;
2527 if (i >= 0)
2528 is_address[--i] = 1;
2533 /* search first non-zero entry */
2534 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2537 if (j == 256)
2539 LOG_DEBUG("no trace data collected");
2540 return ERROR_XSCALE_NO_TRACE_DATA;
2543 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2546 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2547 (*trace_data_p)->next = NULL;
2548 (*trace_data_p)->chkpt0 = trace_buffer[256];
2549 (*trace_data_p)->chkpt1 = trace_buffer[257];
2550 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2551 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2552 (*trace_data_p)->depth = 256 - j;
2554 for (i = j; i < 256; i++)
2556 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2557 if (is_address[i])
2558 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2559 else
2560 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2563 return ERROR_OK;
2566 static int xscale_read_instruction(struct target *target,
2567 struct arm_instruction *instruction)
2569 struct xscale_common *xscale = target_to_xscale(target);
2570 int i;
2571 int section = -1;
2572 size_t size_read;
2573 uint32_t opcode;
2574 int retval;
2576 if (!xscale->trace.image)
2577 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2579 /* search for the section the current instruction belongs to */
2580 for (i = 0; i < xscale->trace.image->num_sections; i++)
2582 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2583 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2585 section = i;
2586 break;
2590 if (section == -1)
2592 /* current instruction couldn't be found in the image */
2593 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2596 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2598 uint8_t buf[4];
2599 if ((retval = image_read_section(xscale->trace.image, section,
2600 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2601 4, buf, &size_read)) != ERROR_OK)
2603 LOG_ERROR("error while reading instruction: %i", retval);
2604 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2606 opcode = target_buffer_get_u32(target, buf);
2607 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2609 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2611 uint8_t buf[2];
2612 if ((retval = image_read_section(xscale->trace.image, section,
2613 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2614 2, buf, &size_read)) != ERROR_OK)
2616 LOG_ERROR("error while reading instruction: %i", retval);
2617 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2619 opcode = target_buffer_get_u16(target, buf);
2620 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2622 else
2624 LOG_ERROR("BUG: unknown core state encountered");
2625 exit(-1);
2628 return ERROR_OK;
2631 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2632 int i, uint32_t *target)
2634 /* if there are less than four entries prior to the indirect branch message
2635 * we can't extract the address */
2636 if (i < 4)
2638 return -1;
2641 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2642 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2644 return 0;
2647 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2649 struct xscale_common *xscale = target_to_xscale(target);
2650 int next_pc_ok = 0;
2651 uint32_t next_pc = 0x0;
2652 struct xscale_trace_data *trace_data = xscale->trace.data;
2653 int retval;
2655 while (trace_data)
2657 int i, chkpt;
2658 int rollover;
2659 int branch;
2660 int exception;
2661 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2663 chkpt = 0;
2664 rollover = 0;
2666 for (i = 0; i < trace_data->depth; i++)
2668 next_pc_ok = 0;
2669 branch = 0;
2670 exception = 0;
2672 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2673 continue;
2675 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2677 case 0: /* Exceptions */
2678 case 1:
2679 case 2:
2680 case 3:
2681 case 4:
2682 case 5:
2683 case 6:
2684 case 7:
2685 exception = (trace_data->entries[i].data & 0x70) >> 4;
2686 next_pc_ok = 1;
2687 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2688 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2689 break;
2690 case 8: /* Direct Branch */
2691 branch = 1;
2692 break;
2693 case 9: /* Indirect Branch */
2694 branch = 1;
2695 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2697 next_pc_ok = 1;
2699 break;
2700 case 13: /* Checkpointed Indirect Branch */
2701 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2703 next_pc_ok = 1;
2704 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2705 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2706 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2708 /* explicit fall-through */
2709 case 12: /* Checkpointed Direct Branch */
2710 branch = 1;
2711 if (chkpt == 0)
2713 next_pc_ok = 1;
2714 next_pc = trace_data->chkpt0;
2715 chkpt++;
2717 else if (chkpt == 1)
2719 next_pc_ok = 1;
2720 next_pc = trace_data->chkpt0;
2721 chkpt++;
2723 else
2725 LOG_WARNING("more than two checkpointed branches encountered");
2727 break;
2728 case 15: /* Roll-over */
2729 rollover++;
2730 continue;
2731 default: /* Reserved */
2732 command_print(cmd_ctx, "--- reserved trace message ---");
2733 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2734 return ERROR_OK;
2737 if (xscale->trace.pc_ok)
2739 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2740 struct arm_instruction instruction;
2742 if ((exception == 6) || (exception == 7))
2744 /* IRQ or FIQ exception, no instruction executed */
2745 executed -= 1;
2748 while (executed-- >= 0)
2750 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2752 /* can't continue tracing with no image available */
2753 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2755 return retval;
2757 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2759 /* TODO: handle incomplete images */
2763 /* a precise abort on a load to the PC is included in the incremental
2764 * word count, other instructions causing data aborts are not included
2766 if ((executed == 0) && (exception == 4)
2767 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2769 if ((instruction.type == ARM_LDM)
2770 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2772 executed--;
2774 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2775 && (instruction.info.load_store.Rd != 15))
2777 executed--;
2781 /* only the last instruction executed
2782 * (the one that caused the control flow change)
2783 * could be a taken branch
2785 if (((executed == -1) && (branch == 1)) &&
2786 (((instruction.type == ARM_B) ||
2787 (instruction.type == ARM_BL) ||
2788 (instruction.type == ARM_BLX)) &&
2789 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2791 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2793 else
2795 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2797 command_print(cmd_ctx, "%s", instruction.text);
2800 rollover = 0;
2803 if (next_pc_ok)
2805 xscale->trace.current_pc = next_pc;
2806 xscale->trace.pc_ok = 1;
2810 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2812 struct arm_instruction instruction;
2813 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2815 /* can't continue tracing with no image available */
2816 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2818 return retval;
2820 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2822 /* TODO: handle incomplete images */
2825 command_print(cmd_ctx, "%s", instruction.text);
2828 trace_data = trace_data->next;
2831 return ERROR_OK;
2834 static const struct reg_arch_type xscale_reg_type = {
2835 .get = xscale_get_reg,
2836 .set = xscale_set_reg,
2839 static void xscale_build_reg_cache(struct target *target)
2841 struct xscale_common *xscale = target_to_xscale(target);
2842 struct arm *armv4_5 = &xscale->armv4_5_common;
2843 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2844 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2845 int i;
2846 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2848 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2850 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2851 cache_p = &(*cache_p)->next;
2853 /* fill in values for the xscale reg cache */
2854 (*cache_p)->name = "XScale registers";
2855 (*cache_p)->next = NULL;
2856 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2857 (*cache_p)->num_regs = num_regs;
2859 for (i = 0; i < num_regs; i++)
2861 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2862 (*cache_p)->reg_list[i].value = calloc(4, 1);
2863 (*cache_p)->reg_list[i].dirty = 0;
2864 (*cache_p)->reg_list[i].valid = 0;
2865 (*cache_p)->reg_list[i].size = 32;
2866 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2867 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2868 arch_info[i] = xscale_reg_arch_info[i];
2869 arch_info[i].target = target;
2872 xscale->reg_cache = (*cache_p);
2875 static int xscale_init_target(struct command_context *cmd_ctx,
2876 struct target *target)
2878 xscale_build_reg_cache(target);
2879 return ERROR_OK;
2882 static int xscale_init_arch_info(struct target *target,
2883 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2885 struct arm *armv4_5;
2886 uint32_t high_reset_branch, low_reset_branch;
2887 int i;
2889 armv4_5 = &xscale->armv4_5_common;
2891 /* store architecture specfic data (none so far) */
2892 xscale->common_magic = XSCALE_COMMON_MAGIC;
2894 /* we don't really *need* variant info ... */
2895 if (variant) {
2896 int ir_length = 0;
2898 if (strcmp(variant, "pxa250") == 0
2899 || strcmp(variant, "pxa255") == 0
2900 || strcmp(variant, "pxa26x") == 0)
2901 ir_length = 5;
2902 else if (strcmp(variant, "pxa27x") == 0
2903 || strcmp(variant, "ixp42x") == 0
2904 || strcmp(variant, "ixp45x") == 0
2905 || strcmp(variant, "ixp46x") == 0)
2906 ir_length = 7;
2907 else
2908 LOG_WARNING("%s: unrecognized variant %s",
2909 tap->dotted_name, variant);
2911 if (ir_length && ir_length != tap->ir_length) {
2912 LOG_WARNING("%s: IR length for %s is %d; fixing",
2913 tap->dotted_name, variant, ir_length);
2914 tap->ir_length = ir_length;
2918 /* the debug handler isn't installed (and thus not running) at this time */
2919 xscale->handler_address = 0xfe000800;
2921 /* clear the vectors we keep locally for reference */
2922 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2923 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2925 /* no user-specified vectors have been configured yet */
2926 xscale->static_low_vectors_set = 0x0;
2927 xscale->static_high_vectors_set = 0x0;
2929 /* calculate branches to debug handler */
2930 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2931 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2933 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2934 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2936 for (i = 1; i <= 7; i++)
2938 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2939 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2942 /* 64kB aligned region used for DCache cleaning */
2943 xscale->cache_clean_address = 0xfffe0000;
2945 xscale->hold_rst = 0;
2946 xscale->external_debug_break = 0;
2948 xscale->ibcr_available = 2;
2949 xscale->ibcr0_used = 0;
2950 xscale->ibcr1_used = 0;
2952 xscale->dbr_available = 2;
2953 xscale->dbr0_used = 0;
2954 xscale->dbr1_used = 0;
2956 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2957 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2959 xscale->vector_catch = 0x1;
2961 xscale->trace.capture_status = TRACE_IDLE;
2962 xscale->trace.data = NULL;
2963 xscale->trace.image = NULL;
2964 xscale->trace.buffer_enabled = 0;
2965 xscale->trace.buffer_fill = 0;
2967 /* prepare ARMv4/5 specific information */
2968 armv4_5->arch_info = xscale;
2969 armv4_5->read_core_reg = xscale_read_core_reg;
2970 armv4_5->write_core_reg = xscale_write_core_reg;
2971 armv4_5->full_context = xscale_full_context;
2973 armv4_5_init_arch_info(target, armv4_5);
2975 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2976 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2977 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
2978 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
2979 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
2980 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
2981 xscale->armv4_5_mmu.has_tiny_pages = 1;
2982 xscale->armv4_5_mmu.mmu_enabled = 0;
2984 return ERROR_OK;
2987 static int xscale_target_create(struct target *target, Jim_Interp *interp)
2989 struct xscale_common *xscale;
2991 if (sizeof xscale_debug_handler - 1 > 0x800) {
2992 LOG_ERROR("debug_handler.bin: larger than 2kb");
2993 return ERROR_FAIL;
2996 xscale = calloc(1, sizeof(*xscale));
2997 if (!xscale)
2998 return ERROR_FAIL;
3000 return xscale_init_arch_info(target, xscale, target->tap,
3001 target->variant);
3004 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3006 struct target *target = NULL;
3007 struct xscale_common *xscale;
3008 int retval;
3009 uint32_t handler_address;
3011 if (CMD_ARGC < 2)
3013 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3014 return ERROR_OK;
3017 if ((target = get_target(CMD_ARGV[0])) == NULL)
3019 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3020 return ERROR_FAIL;
3023 xscale = target_to_xscale(target);
3024 retval = xscale_verify_pointer(CMD_CTX, xscale);
3025 if (retval != ERROR_OK)
3026 return retval;
3028 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3030 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3031 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3033 xscale->handler_address = handler_address;
3035 else
3037 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3038 return ERROR_FAIL;
3041 return ERROR_OK;
3044 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3046 struct target *target = NULL;
3047 struct xscale_common *xscale;
3048 int retval;
3049 uint32_t cache_clean_address;
3051 if (CMD_ARGC < 2)
3053 return ERROR_COMMAND_SYNTAX_ERROR;
3056 target = get_target(CMD_ARGV[0]);
3057 if (target == NULL)
3059 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3060 return ERROR_FAIL;
3062 xscale = target_to_xscale(target);
3063 retval = xscale_verify_pointer(CMD_CTX, xscale);
3064 if (retval != ERROR_OK)
3065 return retval;
3067 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3069 if (cache_clean_address & 0xffff)
3071 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3073 else
3075 xscale->cache_clean_address = cache_clean_address;
3078 return ERROR_OK;
3081 COMMAND_HANDLER(xscale_handle_cache_info_command)
3083 struct target *target = get_current_target(CMD_CTX);
3084 struct xscale_common *xscale = target_to_xscale(target);
3085 int retval;
3087 retval = xscale_verify_pointer(CMD_CTX, xscale);
3088 if (retval != ERROR_OK)
3089 return retval;
3091 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3094 static int xscale_virt2phys(struct target *target,
3095 uint32_t virtual, uint32_t *physical)
3097 struct xscale_common *xscale = target_to_xscale(target);
3098 int type;
3099 uint32_t cb;
3100 int domain;
3101 uint32_t ap;
3103 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3104 LOG_ERROR(xscale_not);
3105 return ERROR_TARGET_INVALID;
3108 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3109 if (type == -1)
3111 return ret;
3113 *physical = ret;
3114 return ERROR_OK;
3117 static int xscale_mmu(struct target *target, int *enabled)
3119 struct xscale_common *xscale = target_to_xscale(target);
3121 if (target->state != TARGET_HALTED)
3123 LOG_ERROR("Target not halted");
3124 return ERROR_TARGET_INVALID;
3126 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3127 return ERROR_OK;
3130 COMMAND_HANDLER(xscale_handle_mmu_command)
3132 struct target *target = get_current_target(CMD_CTX);
3133 struct xscale_common *xscale = target_to_xscale(target);
3134 int retval;
3136 retval = xscale_verify_pointer(CMD_CTX, xscale);
3137 if (retval != ERROR_OK)
3138 return retval;
3140 if (target->state != TARGET_HALTED)
3142 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3143 return ERROR_OK;
3146 if (CMD_ARGC >= 1)
3148 bool enable;
3149 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3150 if (enable)
3151 xscale_enable_mmu_caches(target, 1, 0, 0);
3152 else
3153 xscale_disable_mmu_caches(target, 1, 0, 0);
3154 xscale->armv4_5_mmu.mmu_enabled = enable;
3157 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3159 return ERROR_OK;
3162 COMMAND_HANDLER(xscale_handle_idcache_command)
3164 struct target *target = get_current_target(CMD_CTX);
3165 struct xscale_common *xscale = target_to_xscale(target);
3167 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3168 if (retval != ERROR_OK)
3169 return retval;
3171 if (target->state != TARGET_HALTED)
3173 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3174 return ERROR_OK;
3177 bool icache;
3178 COMMAND_PARSE_BOOL(CMD_NAME, icache, "icache", "dcache");
3180 if (CMD_ARGC >= 1)
3182 bool enable;
3183 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3184 if (enable)
3185 xscale_enable_mmu_caches(target, 1, 0, 0);
3186 else
3187 xscale_disable_mmu_caches(target, 1, 0, 0);
3188 if (icache)
3189 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3190 else
3191 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3194 bool enabled = icache ?
3195 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3196 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3197 const char *msg = enabled ? "enabled" : "disabled";
3198 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3200 return ERROR_OK;
3203 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3205 struct target *target = get_current_target(CMD_CTX);
3206 struct xscale_common *xscale = target_to_xscale(target);
3207 int retval;
3209 retval = xscale_verify_pointer(CMD_CTX, xscale);
3210 if (retval != ERROR_OK)
3211 return retval;
3213 if (CMD_ARGC < 1)
3215 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3217 else
3219 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3220 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3221 xscale_write_dcsr(target, -1, -1);
3224 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3226 return ERROR_OK;
3230 COMMAND_HANDLER(xscale_handle_vector_table_command)
3232 struct target *target = get_current_target(CMD_CTX);
3233 struct xscale_common *xscale = target_to_xscale(target);
3234 int err = 0;
3235 int retval;
3237 retval = xscale_verify_pointer(CMD_CTX, xscale);
3238 if (retval != ERROR_OK)
3239 return retval;
3241 if (CMD_ARGC == 0) /* print current settings */
3243 int idx;
3245 command_print(CMD_CTX, "active user-set static vectors:");
3246 for (idx = 1; idx < 8; idx++)
3247 if (xscale->static_low_vectors_set & (1 << idx))
3248 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3249 for (idx = 1; idx < 8; idx++)
3250 if (xscale->static_high_vectors_set & (1 << idx))
3251 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3252 return ERROR_OK;
3255 if (CMD_ARGC != 3)
3256 err = 1;
3257 else
3259 int idx;
3260 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3261 uint32_t vec;
3262 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3264 if (idx < 1 || idx >= 8)
3265 err = 1;
3267 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3269 xscale->static_low_vectors_set |= (1<<idx);
3270 xscale->static_low_vectors[idx] = vec;
3272 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3274 xscale->static_high_vectors_set |= (1<<idx);
3275 xscale->static_high_vectors[idx] = vec;
3277 else
3278 err = 1;
3281 if (err)
3282 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3284 return ERROR_OK;
3288 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3290 struct target *target = get_current_target(CMD_CTX);
3291 struct xscale_common *xscale = target_to_xscale(target);
3292 struct arm *armv4_5 = &xscale->armv4_5_common;
3293 uint32_t dcsr_value;
3294 int retval;
3296 retval = xscale_verify_pointer(CMD_CTX, xscale);
3297 if (retval != ERROR_OK)
3298 return retval;
3300 if (target->state != TARGET_HALTED)
3302 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3303 return ERROR_OK;
3306 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3308 struct xscale_trace_data *td, *next_td;
3309 xscale->trace.buffer_enabled = 1;
3311 /* free old trace data */
3312 td = xscale->trace.data;
3313 while (td)
3315 next_td = td->next;
3317 if (td->entries)
3318 free(td->entries);
3319 free(td);
3320 td = next_td;
3322 xscale->trace.data = NULL;
3324 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3326 xscale->trace.buffer_enabled = 0;
3329 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3331 uint32_t fill = 1;
3332 if (CMD_ARGC >= 3)
3333 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3334 xscale->trace.buffer_fill = fill;
3336 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3338 xscale->trace.buffer_fill = -1;
3341 if (xscale->trace.buffer_enabled)
3343 /* if we enable the trace buffer in fill-once
3344 * mode we know the address of the first instruction */
3345 xscale->trace.pc_ok = 1;
3346 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3348 else
3350 /* otherwise the address is unknown, and we have no known good PC */
3351 xscale->trace.pc_ok = 0;
3354 command_print(CMD_CTX, "trace buffer %s (%s)",
3355 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3356 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3358 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3359 if (xscale->trace.buffer_fill >= 0)
3360 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3361 else
3362 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3364 return ERROR_OK;
3367 COMMAND_HANDLER(xscale_handle_trace_image_command)
3369 struct target *target = get_current_target(CMD_CTX);
3370 struct xscale_common *xscale = target_to_xscale(target);
3371 int retval;
3373 if (CMD_ARGC < 1)
3375 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3376 return ERROR_OK;
3379 retval = xscale_verify_pointer(CMD_CTX, xscale);
3380 if (retval != ERROR_OK)
3381 return retval;
3383 if (xscale->trace.image)
3385 image_close(xscale->trace.image);
3386 free(xscale->trace.image);
3387 command_print(CMD_CTX, "previously loaded image found and closed");
3390 xscale->trace.image = malloc(sizeof(struct image));
3391 xscale->trace.image->base_address_set = 0;
3392 xscale->trace.image->start_address_set = 0;
3394 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3395 if (CMD_ARGC >= 2)
3397 xscale->trace.image->base_address_set = 1;
3398 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], xscale->trace.image->base_address);
3400 else
3402 xscale->trace.image->base_address_set = 0;
3405 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3407 free(xscale->trace.image);
3408 xscale->trace.image = NULL;
3409 return ERROR_OK;
3412 return ERROR_OK;
3415 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3417 struct target *target = get_current_target(CMD_CTX);
3418 struct xscale_common *xscale = target_to_xscale(target);
3419 struct xscale_trace_data *trace_data;
3420 struct fileio file;
3421 int retval;
3423 retval = xscale_verify_pointer(CMD_CTX, xscale);
3424 if (retval != ERROR_OK)
3425 return retval;
3427 if (target->state != TARGET_HALTED)
3429 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3430 return ERROR_OK;
3433 if (CMD_ARGC < 1)
3435 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3436 return ERROR_OK;
3439 trace_data = xscale->trace.data;
3441 if (!trace_data)
3443 command_print(CMD_CTX, "no trace data collected");
3444 return ERROR_OK;
3447 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3449 return ERROR_OK;
3452 while (trace_data)
3454 int i;
3456 fileio_write_u32(&file, trace_data->chkpt0);
3457 fileio_write_u32(&file, trace_data->chkpt1);
3458 fileio_write_u32(&file, trace_data->last_instruction);
3459 fileio_write_u32(&file, trace_data->depth);
3461 for (i = 0; i < trace_data->depth; i++)
3462 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3464 trace_data = trace_data->next;
3467 fileio_close(&file);
3469 return ERROR_OK;
3472 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3474 struct target *target = get_current_target(CMD_CTX);
3475 struct xscale_common *xscale = target_to_xscale(target);
3476 int retval;
3478 retval = xscale_verify_pointer(CMD_CTX, xscale);
3479 if (retval != ERROR_OK)
3480 return retval;
3482 xscale_analyze_trace(target, CMD_CTX);
3484 return ERROR_OK;
3487 COMMAND_HANDLER(xscale_handle_cp15)
3489 struct target *target = get_current_target(CMD_CTX);
3490 struct xscale_common *xscale = target_to_xscale(target);
3491 int retval;
3493 retval = xscale_verify_pointer(CMD_CTX, xscale);
3494 if (retval != ERROR_OK)
3495 return retval;
3497 if (target->state != TARGET_HALTED)
3499 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3500 return ERROR_OK;
3502 uint32_t reg_no = 0;
3503 struct reg *reg = NULL;
3504 if (CMD_ARGC > 0)
3506 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3507 /*translate from xscale cp15 register no to openocd register*/
3508 switch (reg_no)
3510 case 0:
3511 reg_no = XSCALE_MAINID;
3512 break;
3513 case 1:
3514 reg_no = XSCALE_CTRL;
3515 break;
3516 case 2:
3517 reg_no = XSCALE_TTB;
3518 break;
3519 case 3:
3520 reg_no = XSCALE_DAC;
3521 break;
3522 case 5:
3523 reg_no = XSCALE_FSR;
3524 break;
3525 case 6:
3526 reg_no = XSCALE_FAR;
3527 break;
3528 case 13:
3529 reg_no = XSCALE_PID;
3530 break;
3531 case 15:
3532 reg_no = XSCALE_CPACCESS;
3533 break;
3534 default:
3535 command_print(CMD_CTX, "invalid register number");
3536 return ERROR_INVALID_ARGUMENTS;
3538 reg = &xscale->reg_cache->reg_list[reg_no];
3541 if (CMD_ARGC == 1)
3543 uint32_t value;
3545 /* read cp15 control register */
3546 xscale_get_reg(reg);
3547 value = buf_get_u32(reg->value, 0, 32);
3548 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3550 else if (CMD_ARGC == 2)
3552 uint32_t value;
3553 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3555 /* send CP write request (command 0x41) */
3556 xscale_send_u32(target, 0x41);
3558 /* send CP register number */
3559 xscale_send_u32(target, reg_no);
3561 /* send CP register value */
3562 xscale_send_u32(target, value);
3564 /* execute cpwait to ensure outstanding operations complete */
3565 xscale_send_u32(target, 0x53);
3567 else
3569 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3572 return ERROR_OK;
3575 static const struct command_registration xscale_exec_command_handlers[] = {
3577 .name = "cache_info",
3578 .handler = &xscale_handle_cache_info_command,
3579 .mode = COMMAND_EXEC, NULL,
3583 .name = "mmu",
3584 .handler = &xscale_handle_mmu_command,
3585 .mode = COMMAND_EXEC,
3586 .usage = "[enable|disable]",
3587 .help = "enable or disable the MMU",
3590 .name = "icache",
3591 .handler = &xscale_handle_idcache_command,
3592 .mode = COMMAND_EXEC,
3593 .usage = "[enable|disable]",
3594 .help = "enable or disable the ICache",
3597 .name = "dcache",
3598 .handler = &xscale_handle_idcache_command,
3599 .mode = COMMAND_EXEC,
3600 .usage = "[enable|disable]",
3601 .help = "enable or disable the DCache",
3605 .name = "vector_catch",
3606 .handler = &xscale_handle_vector_catch_command,
3607 .mode = COMMAND_EXEC,
3608 .help = "mask of vectors that should be caught",
3609 .usage = "[<mask>]",
3612 .name = "vector_table",
3613 .handler = &xscale_handle_vector_table_command,
3614 .mode = COMMAND_EXEC,
3615 .usage = "<high|low> <index> <code>",
3616 .help = "set static code for exception handler entry",
3620 .name = "trace_buffer",
3621 .handler = &xscale_handle_trace_buffer_command,
3622 .mode = COMMAND_EXEC,
3623 .usage = "<enable | disable> [fill [n]|wrap]",
3626 .name = "dump_trace",
3627 .handler = &xscale_handle_dump_trace_command,
3628 .mode = COMMAND_EXEC,
3629 .help = "dump content of trace buffer to <file>",
3630 .usage = "<file>",
3633 .name = "analyze_trace",
3634 .handler = &xscale_handle_analyze_trace_buffer_command,
3635 .mode = COMMAND_EXEC,
3636 .help = "analyze content of trace buffer",
3639 .name = "trace_image",
3640 .handler = &xscale_handle_trace_image_command,
3641 COMMAND_EXEC,
3642 .help = "load image from <file> [base address]",
3643 .usage = "<file> [address] [type]",
3647 .name = "cp15",
3648 .handler = &xscale_handle_cp15,
3649 .mode = COMMAND_EXEC,
3650 .help = "access coproc 15",
3651 .usage = "<register> [value]",
3653 COMMAND_REGISTRATION_DONE
3655 static const struct command_registration xscale_any_command_handlers[] = {
3657 .name = "debug_handler",
3658 .handler = &xscale_handle_debug_handler_command,
3659 .mode = COMMAND_ANY,
3660 .usage = "<target#> <address>",
3663 .name = "cache_clean_address",
3664 .handler = &xscale_handle_cache_clean_address_command,
3665 .mode = COMMAND_ANY,
3668 .chain = xscale_exec_command_handlers,
3670 COMMAND_REGISTRATION_DONE
3672 static const struct command_registration xscale_command_handlers[] = {
3674 .chain = arm_command_handlers,
3677 .name = "xscale",
3678 .mode = COMMAND_ANY,
3679 .help = "xscale command group",
3680 .chain = xscale_any_command_handlers,
3682 COMMAND_REGISTRATION_DONE
3685 struct target_type xscale_target =
3687 .name = "xscale",
3689 .poll = xscale_poll,
3690 .arch_state = xscale_arch_state,
3692 .target_request_data = NULL,
3694 .halt = xscale_halt,
3695 .resume = xscale_resume,
3696 .step = xscale_step,
3698 .assert_reset = xscale_assert_reset,
3699 .deassert_reset = xscale_deassert_reset,
3700 .soft_reset_halt = NULL,
3702 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
3704 .read_memory = xscale_read_memory,
3705 .read_phys_memory = xscale_read_phys_memory,
3706 .write_memory = xscale_write_memory,
3707 .write_phys_memory = xscale_write_phys_memory,
3708 .bulk_write_memory = xscale_bulk_write_memory,
3710 .checksum_memory = arm_checksum_memory,
3711 .blank_check_memory = arm_blank_check_memory,
3713 .run_algorithm = armv4_5_run_algorithm,
3715 .add_breakpoint = xscale_add_breakpoint,
3716 .remove_breakpoint = xscale_remove_breakpoint,
3717 .add_watchpoint = xscale_add_watchpoint,
3718 .remove_watchpoint = xscale_remove_watchpoint,
3720 .commands = xscale_command_handlers,
3721 .target_create = xscale_target_create,
3722 .init_target = xscale_init_target,
3724 .virt2phys = xscale_virt2phys,
3725 .mmu = xscale_mmu