ARM: keep a handle to the PC
[openocd/dave.git] / src / target / xscale.c
blob48dbc47e49733eefd871cb8c374127bdbcd703d1
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
37 #include "register.h"
38 #include "image.h"
39 #include "arm_opcodes.h"
40 #include "armv4_5.h"
44 * Important XScale documents available as of October 2009 include:
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
59 * Chip-specific microarchitecture documents may also be useful.
63 /* forward declarations */
64 static int xscale_resume(struct target *, int current,
65 uint32_t address, int handle_breakpoints, int debug_execution);
66 static int xscale_debug_entry(struct target *);
67 static int xscale_restore_banked(struct target *);
68 static int xscale_get_reg(struct reg *reg);
69 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
70 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
72 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
73 static int xscale_read_trace(struct target *);
76 /* This XScale "debug handler" is loaded into the processor's
77 * mini-ICache, which is 2K of code writable only via JTAG.
79 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
80 * binary files cleanly. It's string oriented, and terminates them
81 * with a NUL character. Better would be to generate the constants
82 * and let other code decide names, scoping, and other housekeeping.
84 static /* unsigned const char xscale_debug_handler[] = ... */
85 #include "xscale_debug.h"
87 static char *const xscale_reg_list[] =
89 "XSCALE_MAINID", /* 0 */
90 "XSCALE_CACHETYPE",
91 "XSCALE_CTRL",
92 "XSCALE_AUXCTRL",
93 "XSCALE_TTB",
94 "XSCALE_DAC",
95 "XSCALE_FSR",
96 "XSCALE_FAR",
97 "XSCALE_PID",
98 "XSCALE_CPACCESS",
99 "XSCALE_IBCR0", /* 10 */
100 "XSCALE_IBCR1",
101 "XSCALE_DBR0",
102 "XSCALE_DBR1",
103 "XSCALE_DBCON",
104 "XSCALE_TBREG",
105 "XSCALE_CHKPT0",
106 "XSCALE_CHKPT1",
107 "XSCALE_DCSR",
108 "XSCALE_TX",
109 "XSCALE_RX", /* 20 */
110 "XSCALE_TXRXCTRL",
113 static const struct xscale_reg xscale_reg_arch_info[] =
115 {XSCALE_MAINID, NULL},
116 {XSCALE_CACHETYPE, NULL},
117 {XSCALE_CTRL, NULL},
118 {XSCALE_AUXCTRL, NULL},
119 {XSCALE_TTB, NULL},
120 {XSCALE_DAC, NULL},
121 {XSCALE_FSR, NULL},
122 {XSCALE_FAR, NULL},
123 {XSCALE_PID, NULL},
124 {XSCALE_CPACCESS, NULL},
125 {XSCALE_IBCR0, NULL},
126 {XSCALE_IBCR1, NULL},
127 {XSCALE_DBR0, NULL},
128 {XSCALE_DBR1, NULL},
129 {XSCALE_DBCON, NULL},
130 {XSCALE_TBREG, NULL},
131 {XSCALE_CHKPT0, NULL},
132 {XSCALE_CHKPT1, NULL},
133 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
134 {-1, NULL}, /* TX accessed via JTAG */
135 {-1, NULL}, /* RX accessed via JTAG */
136 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
139 /* convenience wrapper to access XScale specific registers */
140 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
142 uint8_t buf[4];
144 buf_set_u32(buf, 0, 32, value);
146 return xscale_set_reg(reg, buf);
149 static const char xscale_not[] = "target is not an XScale";
151 static int xscale_verify_pointer(struct command_context *cmd_ctx,
152 struct xscale_common *xscale)
154 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
155 command_print(cmd_ctx, xscale_not);
156 return ERROR_TARGET_INVALID;
158 return ERROR_OK;
161 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
163 if (tap == NULL)
164 return ERROR_FAIL;
166 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
168 struct scan_field field;
169 uint8_t scratch[4];
171 memset(&field, 0, sizeof field);
172 field.tap = tap;
173 field.num_bits = tap->ir_length;
174 field.out_value = scratch;
175 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
177 jtag_add_ir_scan(1, &field, jtag_get_end_state());
180 return ERROR_OK;
183 static int xscale_read_dcsr(struct target *target)
185 struct xscale_common *xscale = target_to_xscale(target);
186 int retval;
187 struct scan_field fields[3];
188 uint8_t field0 = 0x0;
189 uint8_t field0_check_value = 0x2;
190 uint8_t field0_check_mask = 0x7;
191 uint8_t field2 = 0x0;
192 uint8_t field2_check_value = 0x0;
193 uint8_t field2_check_mask = 0x1;
195 jtag_set_end_state(TAP_DRPAUSE);
196 xscale_jtag_set_instr(target->tap,
197 XSCALE_SELDCSR << xscale->xscale_variant);
199 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
200 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
202 memset(&fields, 0, sizeof fields);
204 fields[0].tap = target->tap;
205 fields[0].num_bits = 3;
206 fields[0].out_value = &field0;
207 uint8_t tmp;
208 fields[0].in_value = &tmp;
210 fields[1].tap = target->tap;
211 fields[1].num_bits = 32;
212 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
214 fields[2].tap = target->tap;
215 fields[2].num_bits = 1;
216 fields[2].out_value = &field2;
217 uint8_t tmp2;
218 fields[2].in_value = &tmp2;
220 jtag_add_dr_scan(3, fields, jtag_get_end_state());
222 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
223 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
225 if ((retval = jtag_execute_queue()) != ERROR_OK)
227 LOG_ERROR("JTAG error while reading DCSR");
228 return retval;
231 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
232 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
234 /* write the register with the value we just read
235 * on this second pass, only the first bit of field0 is guaranteed to be 0)
237 field0_check_mask = 0x1;
238 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
239 fields[1].in_value = NULL;
241 jtag_set_end_state(TAP_IDLE);
243 jtag_add_dr_scan(3, fields, jtag_get_end_state());
245 /* DANGER!!! this must be here. It will make sure that the arguments
246 * to jtag_set_check_value() does not go out of scope! */
247 return jtag_execute_queue();
251 static void xscale_getbuf(jtag_callback_data_t arg)
253 uint8_t *in = (uint8_t *)arg;
254 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
257 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
259 if (num_words == 0)
260 return ERROR_INVALID_ARGUMENTS;
262 struct xscale_common *xscale = target_to_xscale(target);
263 int retval = ERROR_OK;
264 tap_state_t path[3];
265 struct scan_field fields[3];
266 uint8_t *field0 = malloc(num_words * 1);
267 uint8_t field0_check_value = 0x2;
268 uint8_t field0_check_mask = 0x6;
269 uint32_t *field1 = malloc(num_words * 4);
270 uint8_t field2_check_value = 0x0;
271 uint8_t field2_check_mask = 0x1;
272 int words_done = 0;
273 int words_scheduled = 0;
274 int i;
276 path[0] = TAP_DRSELECT;
277 path[1] = TAP_DRCAPTURE;
278 path[2] = TAP_DRSHIFT;
280 memset(&fields, 0, sizeof fields);
282 fields[0].tap = target->tap;
283 fields[0].num_bits = 3;
284 fields[0].check_value = &field0_check_value;
285 fields[0].check_mask = &field0_check_mask;
287 fields[1].tap = target->tap;
288 fields[1].num_bits = 32;
290 fields[2].tap = target->tap;
291 fields[2].num_bits = 1;
292 fields[2].check_value = &field2_check_value;
293 fields[2].check_mask = &field2_check_mask;
295 jtag_set_end_state(TAP_IDLE);
296 xscale_jtag_set_instr(target->tap,
297 XSCALE_DBGTX << xscale->xscale_variant);
298 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
300 /* repeat until all words have been collected */
301 int attempts = 0;
302 while (words_done < num_words)
304 /* schedule reads */
305 words_scheduled = 0;
306 for (i = words_done; i < num_words; i++)
308 fields[0].in_value = &field0[i];
310 jtag_add_pathmove(3, path);
312 fields[1].in_value = (uint8_t *)(field1 + i);
314 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
316 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
318 words_scheduled++;
321 if ((retval = jtag_execute_queue()) != ERROR_OK)
323 LOG_ERROR("JTAG error while receiving data from debug handler");
324 break;
327 /* examine results */
328 for (i = words_done; i < num_words; i++)
330 if (!(field0[0] & 1))
332 /* move backwards if necessary */
333 int j;
334 for (j = i; j < num_words - 1; j++)
336 field0[j] = field0[j + 1];
337 field1[j] = field1[j + 1];
339 words_scheduled--;
342 if (words_scheduled == 0)
344 if (attempts++==1000)
346 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
347 retval = ERROR_TARGET_TIMEOUT;
348 break;
352 words_done += words_scheduled;
355 for (i = 0; i < num_words; i++)
356 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
358 free(field1);
360 return retval;
363 static int xscale_read_tx(struct target *target, int consume)
365 struct xscale_common *xscale = target_to_xscale(target);
366 tap_state_t path[3];
367 tap_state_t noconsume_path[6];
368 int retval;
369 struct timeval timeout, now;
370 struct scan_field fields[3];
371 uint8_t field0_in = 0x0;
372 uint8_t field0_check_value = 0x2;
373 uint8_t field0_check_mask = 0x6;
374 uint8_t field2_check_value = 0x0;
375 uint8_t field2_check_mask = 0x1;
377 jtag_set_end_state(TAP_IDLE);
379 xscale_jtag_set_instr(target->tap,
380 XSCALE_DBGTX << xscale->xscale_variant);
382 path[0] = TAP_DRSELECT;
383 path[1] = TAP_DRCAPTURE;
384 path[2] = TAP_DRSHIFT;
386 noconsume_path[0] = TAP_DRSELECT;
387 noconsume_path[1] = TAP_DRCAPTURE;
388 noconsume_path[2] = TAP_DREXIT1;
389 noconsume_path[3] = TAP_DRPAUSE;
390 noconsume_path[4] = TAP_DREXIT2;
391 noconsume_path[5] = TAP_DRSHIFT;
393 memset(&fields, 0, sizeof fields);
395 fields[0].tap = target->tap;
396 fields[0].num_bits = 3;
397 fields[0].in_value = &field0_in;
399 fields[1].tap = target->tap;
400 fields[1].num_bits = 32;
401 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
403 fields[2].tap = target->tap;
404 fields[2].num_bits = 1;
405 uint8_t tmp;
406 fields[2].in_value = &tmp;
408 gettimeofday(&timeout, NULL);
409 timeval_add_time(&timeout, 1, 0);
411 for (;;)
413 /* if we want to consume the register content (i.e. clear TX_READY),
414 * we have to go straight from Capture-DR to Shift-DR
415 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
417 if (consume)
418 jtag_add_pathmove(3, path);
419 else
421 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
424 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
426 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
427 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
429 if ((retval = jtag_execute_queue()) != ERROR_OK)
431 LOG_ERROR("JTAG error while reading TX");
432 return ERROR_TARGET_TIMEOUT;
435 gettimeofday(&now, NULL);
436 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
438 LOG_ERROR("time out reading TX register");
439 return ERROR_TARGET_TIMEOUT;
441 if (!((!(field0_in & 1)) && consume))
443 goto done;
445 if (debug_level >= 3)
447 LOG_DEBUG("waiting 100ms");
448 alive_sleep(100); /* avoid flooding the logs */
449 } else
451 keep_alive();
454 done:
456 if (!(field0_in & 1))
457 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
459 return ERROR_OK;
462 static int xscale_write_rx(struct target *target)
464 struct xscale_common *xscale = target_to_xscale(target);
465 int retval;
466 struct timeval timeout, now;
467 struct scan_field fields[3];
468 uint8_t field0_out = 0x0;
469 uint8_t field0_in = 0x0;
470 uint8_t field0_check_value = 0x2;
471 uint8_t field0_check_mask = 0x6;
472 uint8_t field2 = 0x0;
473 uint8_t field2_check_value = 0x0;
474 uint8_t field2_check_mask = 0x1;
476 jtag_set_end_state(TAP_IDLE);
478 xscale_jtag_set_instr(target->tap,
479 XSCALE_DBGRX << xscale->xscale_variant);
481 memset(&fields, 0, sizeof fields);
483 fields[0].tap = target->tap;
484 fields[0].num_bits = 3;
485 fields[0].out_value = &field0_out;
486 fields[0].in_value = &field0_in;
488 fields[1].tap = target->tap;
489 fields[1].num_bits = 32;
490 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
492 fields[2].tap = target->tap;
493 fields[2].num_bits = 1;
494 fields[2].out_value = &field2;
495 uint8_t tmp;
496 fields[2].in_value = &tmp;
498 gettimeofday(&timeout, NULL);
499 timeval_add_time(&timeout, 1, 0);
501 /* poll until rx_read is low */
502 LOG_DEBUG("polling RX");
503 for (;;)
505 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
507 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
508 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
510 if ((retval = jtag_execute_queue()) != ERROR_OK)
512 LOG_ERROR("JTAG error while writing RX");
513 return retval;
516 gettimeofday(&now, NULL);
517 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
519 LOG_ERROR("time out writing RX register");
520 return ERROR_TARGET_TIMEOUT;
522 if (!(field0_in & 1))
523 goto done;
524 if (debug_level >= 3)
526 LOG_DEBUG("waiting 100ms");
527 alive_sleep(100); /* avoid flooding the logs */
528 } else
530 keep_alive();
533 done:
535 /* set rx_valid */
536 field2 = 0x1;
537 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
539 if ((retval = jtag_execute_queue()) != ERROR_OK)
541 LOG_ERROR("JTAG error while writing RX");
542 return retval;
545 return ERROR_OK;
548 /* send count elements of size byte to the debug handler */
549 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
551 struct xscale_common *xscale = target_to_xscale(target);
552 uint32_t t[3];
553 int bits[3];
554 int retval;
555 int done_count = 0;
557 jtag_set_end_state(TAP_IDLE);
559 xscale_jtag_set_instr(target->tap,
560 XSCALE_DBGRX << xscale->xscale_variant);
562 bits[0]=3;
563 t[0]=0;
564 bits[1]=32;
565 t[2]=1;
566 bits[2]=1;
567 int endianness = target->endianness;
568 while (done_count++ < count)
570 switch (size)
572 case 4:
573 if (endianness == TARGET_LITTLE_ENDIAN)
575 t[1]=le_to_h_u32(buffer);
576 } else
578 t[1]=be_to_h_u32(buffer);
580 break;
581 case 2:
582 if (endianness == TARGET_LITTLE_ENDIAN)
584 t[1]=le_to_h_u16(buffer);
585 } else
587 t[1]=be_to_h_u16(buffer);
589 break;
590 case 1:
591 t[1]=buffer[0];
592 break;
593 default:
594 LOG_ERROR("BUG: size neither 4, 2 nor 1");
595 return ERROR_INVALID_ARGUMENTS;
597 jtag_add_dr_out(target->tap,
599 bits,
601 jtag_set_end_state(TAP_IDLE));
602 buffer += size;
605 if ((retval = jtag_execute_queue()) != ERROR_OK)
607 LOG_ERROR("JTAG error while sending data to debug handler");
608 return retval;
611 return ERROR_OK;
614 static int xscale_send_u32(struct target *target, uint32_t value)
616 struct xscale_common *xscale = target_to_xscale(target);
618 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
619 return xscale_write_rx(target);
622 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
624 struct xscale_common *xscale = target_to_xscale(target);
625 int retval;
626 struct scan_field fields[3];
627 uint8_t field0 = 0x0;
628 uint8_t field0_check_value = 0x2;
629 uint8_t field0_check_mask = 0x7;
630 uint8_t field2 = 0x0;
631 uint8_t field2_check_value = 0x0;
632 uint8_t field2_check_mask = 0x1;
634 if (hold_rst != -1)
635 xscale->hold_rst = hold_rst;
637 if (ext_dbg_brk != -1)
638 xscale->external_debug_break = ext_dbg_brk;
640 jtag_set_end_state(TAP_IDLE);
641 xscale_jtag_set_instr(target->tap,
642 XSCALE_SELDCSR << xscale->xscale_variant);
644 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
645 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
647 memset(&fields, 0, sizeof fields);
649 fields[0].tap = target->tap;
650 fields[0].num_bits = 3;
651 fields[0].out_value = &field0;
652 uint8_t tmp;
653 fields[0].in_value = &tmp;
655 fields[1].tap = target->tap;
656 fields[1].num_bits = 32;
657 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
659 fields[2].tap = target->tap;
660 fields[2].num_bits = 1;
661 fields[2].out_value = &field2;
662 uint8_t tmp2;
663 fields[2].in_value = &tmp2;
665 jtag_add_dr_scan(3, fields, jtag_get_end_state());
667 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
668 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
670 if ((retval = jtag_execute_queue()) != ERROR_OK)
672 LOG_ERROR("JTAG error while writing DCSR");
673 return retval;
676 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
677 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
679 return ERROR_OK;
682 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
683 static unsigned int parity (unsigned int v)
685 // unsigned int ov = v;
686 v ^= v >> 16;
687 v ^= v >> 8;
688 v ^= v >> 4;
689 v &= 0xf;
690 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
691 return (0x6996 >> v) & 1;
694 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
696 struct xscale_common *xscale = target_to_xscale(target);
697 uint8_t packet[4];
698 uint8_t cmd;
699 int word;
700 struct scan_field fields[2];
702 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
704 /* LDIC into IR */
705 jtag_set_end_state(TAP_IDLE);
706 xscale_jtag_set_instr(target->tap,
707 XSCALE_LDIC << xscale->xscale_variant);
709 /* CMD is b011 to load a cacheline into the Mini ICache.
710 * Loading into the main ICache is deprecated, and unused.
711 * It's followed by three zero bits, and 27 address bits.
713 buf_set_u32(&cmd, 0, 6, 0x3);
715 /* virtual address of desired cache line */
716 buf_set_u32(packet, 0, 27, va >> 5);
718 memset(&fields, 0, sizeof fields);
720 fields[0].tap = target->tap;
721 fields[0].num_bits = 6;
722 fields[0].out_value = &cmd;
724 fields[1].tap = target->tap;
725 fields[1].num_bits = 27;
726 fields[1].out_value = packet;
728 jtag_add_dr_scan(2, fields, jtag_get_end_state());
730 /* rest of packet is a cacheline: 8 instructions, with parity */
731 fields[0].num_bits = 32;
732 fields[0].out_value = packet;
734 fields[1].num_bits = 1;
735 fields[1].out_value = &cmd;
737 for (word = 0; word < 8; word++)
739 buf_set_u32(packet, 0, 32, buffer[word]);
741 uint32_t value;
742 memcpy(&value, packet, sizeof(uint32_t));
743 cmd = parity(value);
745 jtag_add_dr_scan(2, fields, jtag_get_end_state());
748 return jtag_execute_queue();
751 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
753 struct xscale_common *xscale = target_to_xscale(target);
754 uint8_t packet[4];
755 uint8_t cmd;
756 struct scan_field fields[2];
758 jtag_set_end_state(TAP_IDLE);
759 xscale_jtag_set_instr(target->tap,
760 XSCALE_LDIC << xscale->xscale_variant);
762 /* CMD for invalidate IC line b000, bits [6:4] b000 */
763 buf_set_u32(&cmd, 0, 6, 0x0);
765 /* virtual address of desired cache line */
766 buf_set_u32(packet, 0, 27, va >> 5);
768 memset(&fields, 0, sizeof fields);
770 fields[0].tap = target->tap;
771 fields[0].num_bits = 6;
772 fields[0].out_value = &cmd;
774 fields[1].tap = target->tap;
775 fields[1].num_bits = 27;
776 fields[1].out_value = packet;
778 jtag_add_dr_scan(2, fields, jtag_get_end_state());
780 return ERROR_OK;
783 static int xscale_update_vectors(struct target *target)
785 struct xscale_common *xscale = target_to_xscale(target);
786 int i;
787 int retval;
789 uint32_t low_reset_branch, high_reset_branch;
791 for (i = 1; i < 8; i++)
793 /* if there's a static vector specified for this exception, override */
794 if (xscale->static_high_vectors_set & (1 << i))
796 xscale->high_vectors[i] = xscale->static_high_vectors[i];
798 else
800 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
801 if (retval == ERROR_TARGET_TIMEOUT)
802 return retval;
803 if (retval != ERROR_OK)
805 /* Some of these reads will fail as part of normal execution */
806 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
811 for (i = 1; i < 8; i++)
813 if (xscale->static_low_vectors_set & (1 << i))
815 xscale->low_vectors[i] = xscale->static_low_vectors[i];
817 else
819 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
820 if (retval == ERROR_TARGET_TIMEOUT)
821 return retval;
822 if (retval != ERROR_OK)
824 /* Some of these reads will fail as part of normal execution */
825 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
830 /* calculate branches to debug handler */
831 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
832 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
834 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
835 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
837 /* invalidate and load exception vectors in mini i-cache */
838 xscale_invalidate_ic_line(target, 0x0);
839 xscale_invalidate_ic_line(target, 0xffff0000);
841 xscale_load_ic(target, 0x0, xscale->low_vectors);
842 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
844 return ERROR_OK;
847 static int xscale_arch_state(struct target *target)
849 struct xscale_common *xscale = target_to_xscale(target);
850 struct arm *armv4_5 = &xscale->armv4_5_common;
852 static const char *state[] =
854 "disabled", "enabled"
857 static const char *arch_dbg_reason[] =
859 "", "\n(processor reset)", "\n(trace buffer full)"
862 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
864 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
865 return ERROR_INVALID_ARGUMENTS;
868 arm_arch_state(target);
869 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
870 state[xscale->armv4_5_mmu.mmu_enabled],
871 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
872 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
873 arch_dbg_reason[xscale->arch_debug_reason]);
875 return ERROR_OK;
878 static int xscale_poll(struct target *target)
880 int retval = ERROR_OK;
882 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
884 enum target_state previous_state = target->state;
885 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
888 /* there's data to read from the tx register, we entered debug state */
889 target->state = TARGET_HALTED;
891 /* process debug entry, fetching current mode regs */
892 retval = xscale_debug_entry(target);
894 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
896 LOG_USER("error while polling TX register, reset CPU");
897 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
898 target->state = TARGET_HALTED;
901 /* debug_entry could have overwritten target state (i.e. immediate resume)
902 * don't signal event handlers in that case
904 if (target->state != TARGET_HALTED)
905 return ERROR_OK;
907 /* if target was running, signal that we halted
908 * otherwise we reentered from debug execution */
909 if (previous_state == TARGET_RUNNING)
910 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
911 else
912 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
915 return retval;
918 static int xscale_debug_entry(struct target *target)
920 struct xscale_common *xscale = target_to_xscale(target);
921 struct arm *armv4_5 = &xscale->armv4_5_common;
922 uint32_t pc;
923 uint32_t buffer[10];
924 int i;
925 int retval;
926 uint32_t moe;
928 /* clear external dbg break (will be written on next DCSR read) */
929 xscale->external_debug_break = 0;
930 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
931 return retval;
933 /* get r0, pc, r1 to r7 and cpsr */
934 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
935 return retval;
937 /* move r0 from buffer to register cache */
938 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
939 armv4_5->core_cache->reg_list[0].dirty = 1;
940 armv4_5->core_cache->reg_list[0].valid = 1;
941 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
943 /* move pc from buffer to register cache */
944 buf_set_u32(armv4_5->pc->value, 0, 32, buffer[1]);
945 armv4_5->pc->dirty = 1;
946 armv4_5->pc->valid = 1;
947 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
949 /* move data from buffer to register cache */
950 for (i = 1; i <= 7; i++)
952 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
953 armv4_5->core_cache->reg_list[i].dirty = 1;
954 armv4_5->core_cache->reg_list[i].valid = 1;
955 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
958 arm_set_cpsr(armv4_5, buffer[9]);
959 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
961 if (!is_arm_mode(armv4_5->core_mode))
963 target->state = TARGET_UNKNOWN;
964 LOG_ERROR("cpsr contains invalid mode value - communication failure");
965 return ERROR_TARGET_FAILURE;
967 LOG_DEBUG("target entered debug state in %s mode",
968 arm_mode_name(armv4_5->core_mode));
970 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
971 if (armv4_5->spsr) {
972 xscale_receive(target, buffer, 8);
973 buf_set_u32(armv4_5->spsr->value, 0, 32, buffer[7]);
974 armv4_5->spsr->dirty = false;
975 armv4_5->spsr->valid = true;
977 else
979 /* r8 to r14, but no spsr */
980 xscale_receive(target, buffer, 7);
983 /* move data from buffer to right banked register in cache */
984 for (i = 8; i <= 14; i++)
986 struct reg *r = arm_reg_current(armv4_5, i);
988 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
989 r->dirty = false;
990 r->valid = true;
993 /* examine debug reason */
994 xscale_read_dcsr(target);
995 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
997 /* stored PC (for calculating fixup) */
998 pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1000 switch (moe)
1002 case 0x0: /* Processor reset */
1003 target->debug_reason = DBG_REASON_DBGRQ;
1004 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1005 pc -= 4;
1006 break;
1007 case 0x1: /* Instruction breakpoint hit */
1008 target->debug_reason = DBG_REASON_BREAKPOINT;
1009 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1010 pc -= 4;
1011 break;
1012 case 0x2: /* Data breakpoint hit */
1013 target->debug_reason = DBG_REASON_WATCHPOINT;
1014 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1015 pc -= 4;
1016 break;
1017 case 0x3: /* BKPT instruction executed */
1018 target->debug_reason = DBG_REASON_BREAKPOINT;
1019 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1020 pc -= 4;
1021 break;
1022 case 0x4: /* Ext. debug event */
1023 target->debug_reason = DBG_REASON_DBGRQ;
1024 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1025 pc -= 4;
1026 break;
1027 case 0x5: /* Vector trap occured */
1028 target->debug_reason = DBG_REASON_BREAKPOINT;
1029 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1030 pc -= 4;
1031 break;
1032 case 0x6: /* Trace buffer full break */
1033 target->debug_reason = DBG_REASON_DBGRQ;
1034 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1035 pc -= 4;
1036 break;
1037 case 0x7: /* Reserved (may flag Hot-Debug support) */
1038 default:
1039 LOG_ERROR("Method of Entry is 'Reserved'");
1040 exit(-1);
1041 break;
1044 /* apply PC fixup */
1045 buf_set_u32(armv4_5->pc->value, 0, 32, pc);
1047 /* on the first debug entry, identify cache type */
1048 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1050 uint32_t cache_type_reg;
1052 /* read cp15 cache type register */
1053 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1054 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1056 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1059 /* examine MMU and Cache settings */
1060 /* read cp15 control register */
1061 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1062 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1063 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1064 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1065 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1067 /* tracing enabled, read collected trace data */
1068 if (xscale->trace.buffer_enabled)
1070 xscale_read_trace(target);
1071 xscale->trace.buffer_fill--;
1073 /* resume if we're still collecting trace data */
1074 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1075 && (xscale->trace.buffer_fill > 0))
1077 xscale_resume(target, 1, 0x0, 1, 0);
1079 else
1081 xscale->trace.buffer_enabled = 0;
1085 return ERROR_OK;
1088 static int xscale_halt(struct target *target)
1090 struct xscale_common *xscale = target_to_xscale(target);
1092 LOG_DEBUG("target->state: %s",
1093 target_state_name(target));
1095 if (target->state == TARGET_HALTED)
1097 LOG_DEBUG("target was already halted");
1098 return ERROR_OK;
1100 else if (target->state == TARGET_UNKNOWN)
1102 /* this must not happen for a xscale target */
1103 LOG_ERROR("target was in unknown state when halt was requested");
1104 return ERROR_TARGET_INVALID;
1106 else if (target->state == TARGET_RESET)
1108 LOG_DEBUG("target->state == TARGET_RESET");
1110 else
1112 /* assert external dbg break */
1113 xscale->external_debug_break = 1;
1114 xscale_read_dcsr(target);
1116 target->debug_reason = DBG_REASON_DBGRQ;
1119 return ERROR_OK;
1122 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1124 struct xscale_common *xscale = target_to_xscale(target);
1125 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1126 int retval;
1128 if (xscale->ibcr0_used)
1130 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1132 if (ibcr0_bp)
1134 xscale_unset_breakpoint(target, ibcr0_bp);
1136 else
1138 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1139 exit(-1);
1143 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1144 return retval;
1146 return ERROR_OK;
1149 static int xscale_disable_single_step(struct target *target)
1151 struct xscale_common *xscale = target_to_xscale(target);
1152 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1153 int retval;
1155 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1156 return retval;
1158 return ERROR_OK;
1161 static void xscale_enable_watchpoints(struct target *target)
1163 struct watchpoint *watchpoint = target->watchpoints;
1165 while (watchpoint)
1167 if (watchpoint->set == 0)
1168 xscale_set_watchpoint(target, watchpoint);
1169 watchpoint = watchpoint->next;
1173 static void xscale_enable_breakpoints(struct target *target)
1175 struct breakpoint *breakpoint = target->breakpoints;
1177 /* set any pending breakpoints */
1178 while (breakpoint)
1180 if (breakpoint->set == 0)
1181 xscale_set_breakpoint(target, breakpoint);
1182 breakpoint = breakpoint->next;
1186 static int xscale_resume(struct target *target, int current,
1187 uint32_t address, int handle_breakpoints, int debug_execution)
1189 struct xscale_common *xscale = target_to_xscale(target);
1190 struct arm *armv4_5 = &xscale->armv4_5_common;
1191 struct breakpoint *breakpoint = target->breakpoints;
1192 uint32_t current_pc;
1193 int retval;
1194 int i;
1196 LOG_DEBUG("-");
1198 if (target->state != TARGET_HALTED)
1200 LOG_WARNING("target not halted");
1201 return ERROR_TARGET_NOT_HALTED;
1204 if (!debug_execution)
1206 target_free_all_working_areas(target);
1209 /* update vector tables */
1210 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1211 return retval;
1213 /* current = 1: continue on current pc, otherwise continue at <address> */
1214 if (!current)
1215 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1217 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1219 /* if we're at the reset vector, we have to simulate the branch */
1220 if (current_pc == 0x0)
1222 arm_simulate_step(target, NULL);
1223 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1226 /* the front-end may request us not to handle breakpoints */
1227 if (handle_breakpoints)
1229 breakpoint = breakpoint_find(target,
1230 buf_get_u32(armv4_5->pc->value, 0, 32));
1231 if (breakpoint != NULL)
1233 uint32_t next_pc;
1235 /* there's a breakpoint at the current PC, we have to step over it */
1236 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1237 xscale_unset_breakpoint(target, breakpoint);
1239 /* calculate PC of next instruction */
1240 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1242 uint32_t current_opcode;
1243 target_read_u32(target, current_pc, &current_opcode);
1244 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1247 LOG_DEBUG("enable single-step");
1248 xscale_enable_single_step(target, next_pc);
1250 /* restore banked registers */
1251 retval = xscale_restore_banked(target);
1253 /* send resume request (command 0x30 or 0x31)
1254 * clean the trace buffer if it is to be enabled (0x62) */
1255 if (xscale->trace.buffer_enabled)
1257 xscale_send_u32(target, 0x62);
1258 xscale_send_u32(target, 0x31);
1260 else
1261 xscale_send_u32(target, 0x30);
1263 /* send CPSR */
1264 xscale_send_u32(target,
1265 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1266 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1267 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1269 for (i = 7; i >= 0; i--)
1271 /* send register */
1272 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1273 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1276 /* send PC */
1277 xscale_send_u32(target,
1278 buf_get_u32(armv4_5->pc->value, 0, 32));
1279 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1280 buf_get_u32(armv4_5->pc->value, 0, 32));
1282 /* wait for and process debug entry */
1283 xscale_debug_entry(target);
1285 LOG_DEBUG("disable single-step");
1286 xscale_disable_single_step(target);
1288 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1289 xscale_set_breakpoint(target, breakpoint);
1293 /* enable any pending breakpoints and watchpoints */
1294 xscale_enable_breakpoints(target);
1295 xscale_enable_watchpoints(target);
1297 /* restore banked registers */
1298 retval = xscale_restore_banked(target);
1300 /* send resume request (command 0x30 or 0x31)
1301 * clean the trace buffer if it is to be enabled (0x62) */
1302 if (xscale->trace.buffer_enabled)
1304 xscale_send_u32(target, 0x62);
1305 xscale_send_u32(target, 0x31);
1307 else
1308 xscale_send_u32(target, 0x30);
1310 /* send CPSR */
1311 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1312 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1313 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1315 for (i = 7; i >= 0; i--)
1317 /* send register */
1318 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1319 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1322 /* send PC */
1323 xscale_send_u32(target, buf_get_u32(armv4_5->pc->value, 0, 32));
1324 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1325 buf_get_u32(armv4_5->pc->value, 0, 32));
1327 target->debug_reason = DBG_REASON_NOTHALTED;
1329 if (!debug_execution)
1331 /* registers are now invalid */
1332 register_cache_invalidate(armv4_5->core_cache);
1333 target->state = TARGET_RUNNING;
1334 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1336 else
1338 target->state = TARGET_DEBUG_RUNNING;
1339 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1342 LOG_DEBUG("target resumed");
1344 return ERROR_OK;
1347 static int xscale_step_inner(struct target *target, int current,
1348 uint32_t address, int handle_breakpoints)
1350 struct xscale_common *xscale = target_to_xscale(target);
1351 struct arm *armv4_5 = &xscale->armv4_5_common;
1352 uint32_t next_pc;
1353 int retval;
1354 int i;
1356 target->debug_reason = DBG_REASON_SINGLESTEP;
1358 /* calculate PC of next instruction */
1359 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1361 uint32_t current_opcode, current_pc;
1362 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1364 target_read_u32(target, current_pc, &current_opcode);
1365 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1366 return retval;
1369 LOG_DEBUG("enable single-step");
1370 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1371 return retval;
1373 /* restore banked registers */
1374 if ((retval = xscale_restore_banked(target)) != ERROR_OK)
1375 return retval;
1377 /* send resume request (command 0x30 or 0x31)
1378 * clean the trace buffer if it is to be enabled (0x62) */
1379 if (xscale->trace.buffer_enabled)
1381 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1382 return retval;
1383 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1384 return retval;
1386 else
1387 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1388 return retval;
1390 /* send CPSR */
1391 retval = xscale_send_u32(target,
1392 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1393 if (retval != ERROR_OK)
1394 return retval;
1395 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1396 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1398 for (i = 7; i >= 0; i--)
1400 /* send register */
1401 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1402 return retval;
1403 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1406 /* send PC */
1407 retval = xscale_send_u32(target,
1408 buf_get_u32(armv4_5->pc->value, 0, 32));
1409 if (retval != ERROR_OK)
1410 return retval;
1411 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1412 buf_get_u32(armv4_5->pc->value, 0, 32));
1414 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1416 /* registers are now invalid */
1417 register_cache_invalidate(armv4_5->core_cache);
1419 /* wait for and process debug entry */
1420 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1421 return retval;
1423 LOG_DEBUG("disable single-step");
1424 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1425 return retval;
1427 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1429 return ERROR_OK;
1432 static int xscale_step(struct target *target, int current,
1433 uint32_t address, int handle_breakpoints)
1435 struct arm *armv4_5 = target_to_arm(target);
1436 struct breakpoint *breakpoint = NULL;
1438 uint32_t current_pc;
1439 int retval;
1441 if (target->state != TARGET_HALTED)
1443 LOG_WARNING("target not halted");
1444 return ERROR_TARGET_NOT_HALTED;
1447 /* current = 1: continue on current pc, otherwise continue at <address> */
1448 if (!current)
1449 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1451 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1453 /* if we're at the reset vector, we have to simulate the step */
1454 if (current_pc == 0x0)
1456 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1457 return retval;
1458 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1460 target->debug_reason = DBG_REASON_SINGLESTEP;
1461 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1463 return ERROR_OK;
1466 /* the front-end may request us not to handle breakpoints */
1467 if (handle_breakpoints)
1468 breakpoint = breakpoint_find(target,
1469 buf_get_u32(armv4_5->pc->value, 0, 32));
1470 if (breakpoint != NULL) {
1471 retval = xscale_unset_breakpoint(target, breakpoint);
1472 if (retval != ERROR_OK)
1473 return retval;
1476 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1478 if (breakpoint)
1480 xscale_set_breakpoint(target, breakpoint);
1483 LOG_DEBUG("target stepped");
1485 return ERROR_OK;
1489 static int xscale_assert_reset(struct target *target)
1491 struct xscale_common *xscale = target_to_xscale(target);
1493 LOG_DEBUG("target->state: %s",
1494 target_state_name(target));
1496 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1497 * end up in T-L-R, which would reset JTAG
1499 jtag_set_end_state(TAP_IDLE);
1500 xscale_jtag_set_instr(target->tap,
1501 XSCALE_SELDCSR << xscale->xscale_variant);
1503 /* set Hold reset, Halt mode and Trap Reset */
1504 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1505 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1506 xscale_write_dcsr(target, 1, 0);
1508 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1509 xscale_jtag_set_instr(target->tap, ~0);
1510 jtag_execute_queue();
1512 /* assert reset */
1513 jtag_add_reset(0, 1);
1515 /* sleep 1ms, to be sure we fulfill any requirements */
1516 jtag_add_sleep(1000);
1517 jtag_execute_queue();
1519 target->state = TARGET_RESET;
1521 if (target->reset_halt)
1523 int retval;
1524 if ((retval = target_halt(target)) != ERROR_OK)
1525 return retval;
1528 return ERROR_OK;
1531 static int xscale_deassert_reset(struct target *target)
1533 struct xscale_common *xscale = target_to_xscale(target);
1534 struct breakpoint *breakpoint = target->breakpoints;
1536 LOG_DEBUG("-");
1538 xscale->ibcr_available = 2;
1539 xscale->ibcr0_used = 0;
1540 xscale->ibcr1_used = 0;
1542 xscale->dbr_available = 2;
1543 xscale->dbr0_used = 0;
1544 xscale->dbr1_used = 0;
1546 /* mark all hardware breakpoints as unset */
1547 while (breakpoint)
1549 if (breakpoint->type == BKPT_HARD)
1551 breakpoint->set = 0;
1553 breakpoint = breakpoint->next;
1556 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1558 /* FIXME mark hardware watchpoints got unset too. Also,
1559 * at least some of the XScale registers are invalid...
1563 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1564 * contents got invalidated. Safer to force that, so writing new
1565 * contents can't ever fail..
1568 uint32_t address;
1569 unsigned buf_cnt;
1570 const uint8_t *buffer = xscale_debug_handler;
1571 int retval;
1573 /* release SRST */
1574 jtag_add_reset(0, 0);
1576 /* wait 300ms; 150 and 100ms were not enough */
1577 jtag_add_sleep(300*1000);
1579 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1580 jtag_execute_queue();
1582 /* set Hold reset, Halt mode and Trap Reset */
1583 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1584 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1585 xscale_write_dcsr(target, 1, 0);
1587 /* Load the debug handler into the mini-icache. Since
1588 * it's using halt mode (not monitor mode), it runs in
1589 * "Special Debug State" for access to registers, memory,
1590 * coprocessors, trace data, etc.
1592 address = xscale->handler_address;
1593 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1594 binary_size > 0;
1595 binary_size -= buf_cnt, buffer += buf_cnt)
1597 uint32_t cache_line[8];
1598 unsigned i;
1600 buf_cnt = binary_size;
1601 if (buf_cnt > 32)
1602 buf_cnt = 32;
1604 for (i = 0; i < buf_cnt; i += 4)
1606 /* convert LE buffer to host-endian uint32_t */
1607 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1610 for (; i < 32; i += 4)
1612 cache_line[i / 4] = 0xe1a08008;
1615 /* only load addresses other than the reset vectors */
1616 if ((address % 0x400) != 0x0)
1618 retval = xscale_load_ic(target, address,
1619 cache_line);
1620 if (retval != ERROR_OK)
1621 return retval;
1624 address += buf_cnt;
1627 retval = xscale_load_ic(target, 0x0,
1628 xscale->low_vectors);
1629 if (retval != ERROR_OK)
1630 return retval;
1631 retval = xscale_load_ic(target, 0xffff0000,
1632 xscale->high_vectors);
1633 if (retval != ERROR_OK)
1634 return retval;
1636 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1638 jtag_add_sleep(100000);
1640 /* set Hold reset, Halt mode and Trap Reset */
1641 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1642 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1643 xscale_write_dcsr(target, 1, 0);
1645 /* clear Hold reset to let the target run (should enter debug handler) */
1646 xscale_write_dcsr(target, 0, 1);
1647 target->state = TARGET_RUNNING;
1649 if (!target->reset_halt)
1651 jtag_add_sleep(10000);
1653 /* we should have entered debug now */
1654 xscale_debug_entry(target);
1655 target->state = TARGET_HALTED;
1657 /* resume the target */
1658 xscale_resume(target, 1, 0x0, 1, 0);
1662 return ERROR_OK;
1665 static int xscale_read_core_reg(struct target *target, struct reg *r,
1666 int num, enum arm_mode mode)
1668 /** \todo add debug handler support for core register reads */
1669 LOG_ERROR("not implemented");
1670 return ERROR_OK;
1673 static int xscale_write_core_reg(struct target *target, struct reg *r,
1674 int num, enum arm_mode mode, uint32_t value)
1676 /** \todo add debug handler support for core register writes */
1677 LOG_ERROR("not implemented");
1678 return ERROR_OK;
1681 static int xscale_full_context(struct target *target)
1683 struct arm *armv4_5 = target_to_arm(target);
1685 uint32_t *buffer;
1687 int i, j;
1689 LOG_DEBUG("-");
1691 if (target->state != TARGET_HALTED)
1693 LOG_WARNING("target not halted");
1694 return ERROR_TARGET_NOT_HALTED;
1697 buffer = malloc(4 * 8);
1699 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1700 * we can't enter User mode on an XScale (unpredictable),
1701 * but User shares registers with SYS
1703 for (i = 1; i < 7; i++)
1705 enum arm_mode mode = armv4_5_number_to_mode(i);
1706 bool valid = true;
1707 struct reg *r;
1709 if (mode == ARM_MODE_USR)
1710 continue;
1712 /* check if there are invalid registers in the current mode
1714 for (j = 0; valid && j <= 16; j++)
1716 if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1717 mode, j).valid)
1718 valid = false;
1720 if (valid)
1721 continue;
1723 /* request banked registers */
1724 xscale_send_u32(target, 0x0);
1726 /* send CPSR for desired bank mode */
1727 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1729 /* get banked registers: r8 to r14; and SPSR
1730 * except in USR/SYS mode
1732 if (mode != ARM_MODE_SYS) {
1733 /* SPSR */
1734 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1735 mode, 16);
1737 xscale_receive(target, buffer, 8);
1739 buf_set_u32(r->value, 0, 32, buffer[7]);
1740 r->dirty = false;
1741 r->valid = true;
1742 } else {
1743 xscale_receive(target, buffer, 7);
1746 /* move data from buffer to register cache */
1747 for (j = 8; j <= 14; j++)
1749 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1750 mode, j);
1752 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1753 r->dirty = false;
1754 r->valid = true;
1758 free(buffer);
1760 return ERROR_OK;
1763 static int xscale_restore_banked(struct target *target)
1765 struct arm *armv4_5 = target_to_arm(target);
1767 int i, j;
1769 if (target->state != TARGET_HALTED)
1771 LOG_WARNING("target not halted");
1772 return ERROR_TARGET_NOT_HALTED;
1775 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1776 * and check if any banked registers need to be written. Ignore
1777 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1778 * an XScale (unpredictable), but they share all registers.
1780 for (i = 1; i < 7; i++)
1782 enum arm_mode mode = armv4_5_number_to_mode(i);
1783 struct reg *r;
1785 if (mode == ARM_MODE_USR)
1786 continue;
1788 /* check if there are dirty registers in this mode */
1789 for (j = 8; j <= 14; j++)
1791 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1792 mode, j).dirty)
1793 goto dirty;
1796 /* if not USR/SYS, check if the SPSR needs to be written */
1797 if (mode != ARM_MODE_SYS)
1799 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1800 mode, 16).dirty)
1801 goto dirty;
1804 /* there's nothing to flush for this mode */
1805 continue;
1807 dirty:
1808 /* command 0x1: "send banked registers" */
1809 xscale_send_u32(target, 0x1);
1811 /* send CPSR for desired mode */
1812 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1814 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1815 * but this protocol doesn't understand that nuance.
1817 for (j = 8; j <= 14; j++) {
1818 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1819 mode, j);
1820 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1821 r->dirty = false;
1824 /* send spsr if not in USR/SYS mode */
1825 if (mode != ARM_MODE_SYS) {
1826 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1827 mode, 16);
1828 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1829 r->dirty = false;
1833 return ERROR_OK;
1836 static int xscale_read_memory(struct target *target, uint32_t address,
1837 uint32_t size, uint32_t count, uint8_t *buffer)
1839 struct xscale_common *xscale = target_to_xscale(target);
1840 uint32_t *buf32;
1841 uint32_t i;
1842 int retval;
1844 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1846 if (target->state != TARGET_HALTED)
1848 LOG_WARNING("target not halted");
1849 return ERROR_TARGET_NOT_HALTED;
1852 /* sanitize arguments */
1853 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1854 return ERROR_INVALID_ARGUMENTS;
1856 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1857 return ERROR_TARGET_UNALIGNED_ACCESS;
1859 /* send memory read request (command 0x1n, n: access size) */
1860 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1861 return retval;
1863 /* send base address for read request */
1864 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1865 return retval;
1867 /* send number of requested data words */
1868 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1869 return retval;
1871 /* receive data from target (count times 32-bit words in host endianness) */
1872 buf32 = malloc(4 * count);
1873 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1874 return retval;
1876 /* extract data from host-endian buffer into byte stream */
1877 for (i = 0; i < count; i++)
1879 switch (size)
1881 case 4:
1882 target_buffer_set_u32(target, buffer, buf32[i]);
1883 buffer += 4;
1884 break;
1885 case 2:
1886 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1887 buffer += 2;
1888 break;
1889 case 1:
1890 *buffer++ = buf32[i] & 0xff;
1891 break;
1892 default:
1893 LOG_ERROR("invalid read size");
1894 return ERROR_INVALID_ARGUMENTS;
1898 free(buf32);
1900 /* examine DCSR, to see if Sticky Abort (SA) got set */
1901 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1902 return retval;
1903 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1905 /* clear SA bit */
1906 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1907 return retval;
1909 return ERROR_TARGET_DATA_ABORT;
1912 return ERROR_OK;
1915 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1916 uint32_t size, uint32_t count, uint8_t *buffer)
1918 struct xscale_common *xscale = target_to_xscale(target);
1920 /* with MMU inactive, there are only physical addresses */
1921 if (!xscale->armv4_5_mmu.mmu_enabled)
1922 return xscale_read_memory(target, address, size, count, buffer);
1924 /** \todo: provide a non-stub implementation of this routine. */
1925 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1926 target_name(target), __func__);
1927 return ERROR_FAIL;
1930 static int xscale_write_memory(struct target *target, uint32_t address,
1931 uint32_t size, uint32_t count, uint8_t *buffer)
1933 struct xscale_common *xscale = target_to_xscale(target);
1934 int retval;
1936 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1938 if (target->state != TARGET_HALTED)
1940 LOG_WARNING("target not halted");
1941 return ERROR_TARGET_NOT_HALTED;
1944 /* sanitize arguments */
1945 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1946 return ERROR_INVALID_ARGUMENTS;
1948 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1949 return ERROR_TARGET_UNALIGNED_ACCESS;
1951 /* send memory write request (command 0x2n, n: access size) */
1952 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1953 return retval;
1955 /* send base address for read request */
1956 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1957 return retval;
1959 /* send number of requested data words to be written*/
1960 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1961 return retval;
1963 /* extract data from host-endian buffer into byte stream */
1964 #if 0
1965 for (i = 0; i < count; i++)
1967 switch (size)
1969 case 4:
1970 value = target_buffer_get_u32(target, buffer);
1971 xscale_send_u32(target, value);
1972 buffer += 4;
1973 break;
1974 case 2:
1975 value = target_buffer_get_u16(target, buffer);
1976 xscale_send_u32(target, value);
1977 buffer += 2;
1978 break;
1979 case 1:
1980 value = *buffer;
1981 xscale_send_u32(target, value);
1982 buffer += 1;
1983 break;
1984 default:
1985 LOG_ERROR("should never get here");
1986 exit(-1);
1989 #endif
1990 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1991 return retval;
1993 /* examine DCSR, to see if Sticky Abort (SA) got set */
1994 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1995 return retval;
1996 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1998 /* clear SA bit */
1999 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
2000 return retval;
2002 return ERROR_TARGET_DATA_ABORT;
2005 return ERROR_OK;
2008 static int xscale_write_phys_memory(struct target *target, uint32_t address,
2009 uint32_t size, uint32_t count, uint8_t *buffer)
2011 struct xscale_common *xscale = target_to_xscale(target);
2013 /* with MMU inactive, there are only physical addresses */
2014 if (!xscale->armv4_5_mmu.mmu_enabled)
2015 return xscale_read_memory(target, address, size, count, buffer);
2017 /** \todo: provide a non-stub implementation of this routine. */
2018 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
2019 target_name(target), __func__);
2020 return ERROR_FAIL;
2023 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
2024 uint32_t count, uint8_t *buffer)
2026 return xscale_write_memory(target, address, 4, count, buffer);
2029 static uint32_t xscale_get_ttb(struct target *target)
2031 struct xscale_common *xscale = target_to_xscale(target);
2032 uint32_t ttb;
2034 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2035 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2037 return ttb;
2040 static void xscale_disable_mmu_caches(struct target *target, int mmu,
2041 int d_u_cache, int i_cache)
2043 struct xscale_common *xscale = target_to_xscale(target);
2044 uint32_t cp15_control;
2046 /* read cp15 control register */
2047 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2048 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2050 if (mmu)
2051 cp15_control &= ~0x1U;
2053 if (d_u_cache)
2055 /* clean DCache */
2056 xscale_send_u32(target, 0x50);
2057 xscale_send_u32(target, xscale->cache_clean_address);
2059 /* invalidate DCache */
2060 xscale_send_u32(target, 0x51);
2062 cp15_control &= ~0x4U;
2065 if (i_cache)
2067 /* invalidate ICache */
2068 xscale_send_u32(target, 0x52);
2069 cp15_control &= ~0x1000U;
2072 /* write new cp15 control register */
2073 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2075 /* execute cpwait to ensure outstanding operations complete */
2076 xscale_send_u32(target, 0x53);
2079 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2080 int d_u_cache, int i_cache)
2082 struct xscale_common *xscale = target_to_xscale(target);
2083 uint32_t cp15_control;
2085 /* read cp15 control register */
2086 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2087 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2089 if (mmu)
2090 cp15_control |= 0x1U;
2092 if (d_u_cache)
2093 cp15_control |= 0x4U;
2095 if (i_cache)
2096 cp15_control |= 0x1000U;
2098 /* write new cp15 control register */
2099 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2101 /* execute cpwait to ensure outstanding operations complete */
2102 xscale_send_u32(target, 0x53);
2105 static int xscale_set_breakpoint(struct target *target,
2106 struct breakpoint *breakpoint)
2108 int retval;
2109 struct xscale_common *xscale = target_to_xscale(target);
2111 if (target->state != TARGET_HALTED)
2113 LOG_WARNING("target not halted");
2114 return ERROR_TARGET_NOT_HALTED;
2117 if (breakpoint->set)
2119 LOG_WARNING("breakpoint already set");
2120 return ERROR_OK;
2123 if (breakpoint->type == BKPT_HARD)
2125 uint32_t value = breakpoint->address | 1;
2126 if (!xscale->ibcr0_used)
2128 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2129 xscale->ibcr0_used = 1;
2130 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2132 else if (!xscale->ibcr1_used)
2134 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2135 xscale->ibcr1_used = 1;
2136 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2138 else
2140 LOG_ERROR("BUG: no hardware comparator available");
2141 return ERROR_OK;
2144 else if (breakpoint->type == BKPT_SOFT)
2146 if (breakpoint->length == 4)
2148 /* keep the original instruction in target endianness */
2149 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2151 return retval;
2153 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2154 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2156 return retval;
2159 else
2161 /* keep the original instruction in target endianness */
2162 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2164 return retval;
2166 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2167 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2169 return retval;
2172 breakpoint->set = 1;
2175 return ERROR_OK;
2178 static int xscale_add_breakpoint(struct target *target,
2179 struct breakpoint *breakpoint)
2181 struct xscale_common *xscale = target_to_xscale(target);
2183 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2185 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2186 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2189 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2191 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2192 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2195 if (breakpoint->type == BKPT_HARD)
2197 xscale->ibcr_available--;
2200 return ERROR_OK;
2203 static int xscale_unset_breakpoint(struct target *target,
2204 struct breakpoint *breakpoint)
2206 int retval;
2207 struct xscale_common *xscale = target_to_xscale(target);
2209 if (target->state != TARGET_HALTED)
2211 LOG_WARNING("target not halted");
2212 return ERROR_TARGET_NOT_HALTED;
2215 if (!breakpoint->set)
2217 LOG_WARNING("breakpoint not set");
2218 return ERROR_OK;
2221 if (breakpoint->type == BKPT_HARD)
2223 if (breakpoint->set == 1)
2225 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2226 xscale->ibcr0_used = 0;
2228 else if (breakpoint->set == 2)
2230 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2231 xscale->ibcr1_used = 0;
2233 breakpoint->set = 0;
2235 else
2237 /* restore original instruction (kept in target endianness) */
2238 if (breakpoint->length == 4)
2240 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2242 return retval;
2245 else
2247 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2249 return retval;
2252 breakpoint->set = 0;
2255 return ERROR_OK;
2258 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2260 struct xscale_common *xscale = target_to_xscale(target);
2262 if (target->state != TARGET_HALTED)
2264 LOG_WARNING("target not halted");
2265 return ERROR_TARGET_NOT_HALTED;
2268 if (breakpoint->set)
2270 xscale_unset_breakpoint(target, breakpoint);
2273 if (breakpoint->type == BKPT_HARD)
2274 xscale->ibcr_available++;
2276 return ERROR_OK;
2279 static int xscale_set_watchpoint(struct target *target,
2280 struct watchpoint *watchpoint)
2282 struct xscale_common *xscale = target_to_xscale(target);
2283 uint8_t enable = 0;
2284 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2285 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2287 if (target->state != TARGET_HALTED)
2289 LOG_WARNING("target not halted");
2290 return ERROR_TARGET_NOT_HALTED;
2293 xscale_get_reg(dbcon);
2295 switch (watchpoint->rw)
2297 case WPT_READ:
2298 enable = 0x3;
2299 break;
2300 case WPT_ACCESS:
2301 enable = 0x2;
2302 break;
2303 case WPT_WRITE:
2304 enable = 0x1;
2305 break;
2306 default:
2307 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2310 if (!xscale->dbr0_used)
2312 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2313 dbcon_value |= enable;
2314 xscale_set_reg_u32(dbcon, dbcon_value);
2315 watchpoint->set = 1;
2316 xscale->dbr0_used = 1;
2318 else if (!xscale->dbr1_used)
2320 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2321 dbcon_value |= enable << 2;
2322 xscale_set_reg_u32(dbcon, dbcon_value);
2323 watchpoint->set = 2;
2324 xscale->dbr1_used = 1;
2326 else
2328 LOG_ERROR("BUG: no hardware comparator available");
2329 return ERROR_OK;
2332 return ERROR_OK;
2335 static int xscale_add_watchpoint(struct target *target,
2336 struct watchpoint *watchpoint)
2338 struct xscale_common *xscale = target_to_xscale(target);
2340 if (xscale->dbr_available < 1)
2342 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2345 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2347 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2350 xscale->dbr_available--;
2352 return ERROR_OK;
2355 static int xscale_unset_watchpoint(struct target *target,
2356 struct watchpoint *watchpoint)
2358 struct xscale_common *xscale = target_to_xscale(target);
2359 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2360 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2362 if (target->state != TARGET_HALTED)
2364 LOG_WARNING("target not halted");
2365 return ERROR_TARGET_NOT_HALTED;
2368 if (!watchpoint->set)
2370 LOG_WARNING("breakpoint not set");
2371 return ERROR_OK;
2374 if (watchpoint->set == 1)
2376 dbcon_value &= ~0x3;
2377 xscale_set_reg_u32(dbcon, dbcon_value);
2378 xscale->dbr0_used = 0;
2380 else if (watchpoint->set == 2)
2382 dbcon_value &= ~0xc;
2383 xscale_set_reg_u32(dbcon, dbcon_value);
2384 xscale->dbr1_used = 0;
2386 watchpoint->set = 0;
2388 return ERROR_OK;
2391 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2393 struct xscale_common *xscale = target_to_xscale(target);
2395 if (target->state != TARGET_HALTED)
2397 LOG_WARNING("target not halted");
2398 return ERROR_TARGET_NOT_HALTED;
2401 if (watchpoint->set)
2403 xscale_unset_watchpoint(target, watchpoint);
2406 xscale->dbr_available++;
2408 return ERROR_OK;
2411 static int xscale_get_reg(struct reg *reg)
2413 struct xscale_reg *arch_info = reg->arch_info;
2414 struct target *target = arch_info->target;
2415 struct xscale_common *xscale = target_to_xscale(target);
2417 /* DCSR, TX and RX are accessible via JTAG */
2418 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2420 return xscale_read_dcsr(arch_info->target);
2422 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2424 /* 1 = consume register content */
2425 return xscale_read_tx(arch_info->target, 1);
2427 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2429 /* can't read from RX register (host -> debug handler) */
2430 return ERROR_OK;
2432 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2434 /* can't (explicitly) read from TXRXCTRL register */
2435 return ERROR_OK;
2437 else /* Other DBG registers have to be transfered by the debug handler */
2439 /* send CP read request (command 0x40) */
2440 xscale_send_u32(target, 0x40);
2442 /* send CP register number */
2443 xscale_send_u32(target, arch_info->dbg_handler_number);
2445 /* read register value */
2446 xscale_read_tx(target, 1);
2447 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2449 reg->dirty = 0;
2450 reg->valid = 1;
2453 return ERROR_OK;
2456 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2458 struct xscale_reg *arch_info = reg->arch_info;
2459 struct target *target = arch_info->target;
2460 struct xscale_common *xscale = target_to_xscale(target);
2461 uint32_t value = buf_get_u32(buf, 0, 32);
2463 /* DCSR, TX and RX are accessible via JTAG */
2464 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2466 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2467 return xscale_write_dcsr(arch_info->target, -1, -1);
2469 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2471 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2472 return xscale_write_rx(arch_info->target);
2474 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2476 /* can't write to TX register (debug-handler -> host) */
2477 return ERROR_OK;
2479 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2481 /* can't (explicitly) write to TXRXCTRL register */
2482 return ERROR_OK;
2484 else /* Other DBG registers have to be transfered by the debug handler */
2486 /* send CP write request (command 0x41) */
2487 xscale_send_u32(target, 0x41);
2489 /* send CP register number */
2490 xscale_send_u32(target, arch_info->dbg_handler_number);
2492 /* send CP register value */
2493 xscale_send_u32(target, value);
2494 buf_set_u32(reg->value, 0, 32, value);
2497 return ERROR_OK;
2500 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2502 struct xscale_common *xscale = target_to_xscale(target);
2503 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2504 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2506 /* send CP write request (command 0x41) */
2507 xscale_send_u32(target, 0x41);
2509 /* send CP register number */
2510 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2512 /* send CP register value */
2513 xscale_send_u32(target, value);
2514 buf_set_u32(dcsr->value, 0, 32, value);
2516 return ERROR_OK;
2519 static int xscale_read_trace(struct target *target)
2521 struct xscale_common *xscale = target_to_xscale(target);
2522 struct arm *armv4_5 = &xscale->armv4_5_common;
2523 struct xscale_trace_data **trace_data_p;
2525 /* 258 words from debug handler
2526 * 256 trace buffer entries
2527 * 2 checkpoint addresses
2529 uint32_t trace_buffer[258];
2530 int is_address[256];
2531 int i, j;
2533 if (target->state != TARGET_HALTED)
2535 LOG_WARNING("target must be stopped to read trace data");
2536 return ERROR_TARGET_NOT_HALTED;
2539 /* send read trace buffer command (command 0x61) */
2540 xscale_send_u32(target, 0x61);
2542 /* receive trace buffer content */
2543 xscale_receive(target, trace_buffer, 258);
2545 /* parse buffer backwards to identify address entries */
2546 for (i = 255; i >= 0; i--)
2548 is_address[i] = 0;
2549 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2550 ((trace_buffer[i] & 0xf0) == 0xd0))
2552 if (i >= 3)
2553 is_address[--i] = 1;
2554 if (i >= 2)
2555 is_address[--i] = 1;
2556 if (i >= 1)
2557 is_address[--i] = 1;
2558 if (i >= 0)
2559 is_address[--i] = 1;
2564 /* search first non-zero entry */
2565 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2568 if (j == 256)
2570 LOG_DEBUG("no trace data collected");
2571 return ERROR_XSCALE_NO_TRACE_DATA;
2574 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2577 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2578 (*trace_data_p)->next = NULL;
2579 (*trace_data_p)->chkpt0 = trace_buffer[256];
2580 (*trace_data_p)->chkpt1 = trace_buffer[257];
2581 (*trace_data_p)->last_instruction =
2582 buf_get_u32(armv4_5->pc->value, 0, 32);
2583 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2584 (*trace_data_p)->depth = 256 - j;
2586 for (i = j; i < 256; i++)
2588 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2589 if (is_address[i])
2590 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2591 else
2592 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2595 return ERROR_OK;
2598 static int xscale_read_instruction(struct target *target,
2599 struct arm_instruction *instruction)
2601 struct xscale_common *xscale = target_to_xscale(target);
2602 int i;
2603 int section = -1;
2604 size_t size_read;
2605 uint32_t opcode;
2606 int retval;
2608 if (!xscale->trace.image)
2609 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2611 /* search for the section the current instruction belongs to */
2612 for (i = 0; i < xscale->trace.image->num_sections; i++)
2614 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2615 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2617 section = i;
2618 break;
2622 if (section == -1)
2624 /* current instruction couldn't be found in the image */
2625 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2628 if (xscale->trace.core_state == ARM_STATE_ARM)
2630 uint8_t buf[4];
2631 if ((retval = image_read_section(xscale->trace.image, section,
2632 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2633 4, buf, &size_read)) != ERROR_OK)
2635 LOG_ERROR("error while reading instruction: %i", retval);
2636 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2638 opcode = target_buffer_get_u32(target, buf);
2639 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2641 else if (xscale->trace.core_state == ARM_STATE_THUMB)
2643 uint8_t buf[2];
2644 if ((retval = image_read_section(xscale->trace.image, section,
2645 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2646 2, buf, &size_read)) != ERROR_OK)
2648 LOG_ERROR("error while reading instruction: %i", retval);
2649 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2651 opcode = target_buffer_get_u16(target, buf);
2652 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2654 else
2656 LOG_ERROR("BUG: unknown core state encountered");
2657 exit(-1);
2660 return ERROR_OK;
2663 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2664 int i, uint32_t *target)
2666 /* if there are less than four entries prior to the indirect branch message
2667 * we can't extract the address */
2668 if (i < 4)
2670 return -1;
2673 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2674 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2676 return 0;
2679 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2681 struct xscale_common *xscale = target_to_xscale(target);
2682 int next_pc_ok = 0;
2683 uint32_t next_pc = 0x0;
2684 struct xscale_trace_data *trace_data = xscale->trace.data;
2685 int retval;
2687 while (trace_data)
2689 int i, chkpt;
2690 int rollover;
2691 int branch;
2692 int exception;
2693 xscale->trace.core_state = ARM_STATE_ARM;
2695 chkpt = 0;
2696 rollover = 0;
2698 for (i = 0; i < trace_data->depth; i++)
2700 next_pc_ok = 0;
2701 branch = 0;
2702 exception = 0;
2704 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2705 continue;
2707 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2709 case 0: /* Exceptions */
2710 case 1:
2711 case 2:
2712 case 3:
2713 case 4:
2714 case 5:
2715 case 6:
2716 case 7:
2717 exception = (trace_data->entries[i].data & 0x70) >> 4;
2718 next_pc_ok = 1;
2719 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2720 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2721 break;
2722 case 8: /* Direct Branch */
2723 branch = 1;
2724 break;
2725 case 9: /* Indirect Branch */
2726 branch = 1;
2727 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2729 next_pc_ok = 1;
2731 break;
2732 case 13: /* Checkpointed Indirect Branch */
2733 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2735 next_pc_ok = 1;
2736 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2737 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2738 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2740 /* explicit fall-through */
2741 case 12: /* Checkpointed Direct Branch */
2742 branch = 1;
2743 if (chkpt == 0)
2745 next_pc_ok = 1;
2746 next_pc = trace_data->chkpt0;
2747 chkpt++;
2749 else if (chkpt == 1)
2751 next_pc_ok = 1;
2752 next_pc = trace_data->chkpt0;
2753 chkpt++;
2755 else
2757 LOG_WARNING("more than two checkpointed branches encountered");
2759 break;
2760 case 15: /* Roll-over */
2761 rollover++;
2762 continue;
2763 default: /* Reserved */
2764 command_print(cmd_ctx, "--- reserved trace message ---");
2765 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2766 return ERROR_OK;
2769 if (xscale->trace.pc_ok)
2771 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2772 struct arm_instruction instruction;
2774 if ((exception == 6) || (exception == 7))
2776 /* IRQ or FIQ exception, no instruction executed */
2777 executed -= 1;
2780 while (executed-- >= 0)
2782 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2784 /* can't continue tracing with no image available */
2785 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2787 return retval;
2789 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2791 /* TODO: handle incomplete images */
2795 /* a precise abort on a load to the PC is included in the incremental
2796 * word count, other instructions causing data aborts are not included
2798 if ((executed == 0) && (exception == 4)
2799 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2801 if ((instruction.type == ARM_LDM)
2802 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2804 executed--;
2806 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2807 && (instruction.info.load_store.Rd != 15))
2809 executed--;
2813 /* only the last instruction executed
2814 * (the one that caused the control flow change)
2815 * could be a taken branch
2817 if (((executed == -1) && (branch == 1)) &&
2818 (((instruction.type == ARM_B) ||
2819 (instruction.type == ARM_BL) ||
2820 (instruction.type == ARM_BLX)) &&
2821 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2823 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2825 else
2827 xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2;
2829 command_print(cmd_ctx, "%s", instruction.text);
2832 rollover = 0;
2835 if (next_pc_ok)
2837 xscale->trace.current_pc = next_pc;
2838 xscale->trace.pc_ok = 1;
2842 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2)
2844 struct arm_instruction instruction;
2845 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2847 /* can't continue tracing with no image available */
2848 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2850 return retval;
2852 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2854 /* TODO: handle incomplete images */
2857 command_print(cmd_ctx, "%s", instruction.text);
2860 trace_data = trace_data->next;
2863 return ERROR_OK;
2866 static const struct reg_arch_type xscale_reg_type = {
2867 .get = xscale_get_reg,
2868 .set = xscale_set_reg,
2871 static void xscale_build_reg_cache(struct target *target)
2873 struct xscale_common *xscale = target_to_xscale(target);
2874 struct arm *armv4_5 = &xscale->armv4_5_common;
2875 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2876 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2877 int i;
2878 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2880 (*cache_p) = arm_build_reg_cache(target, armv4_5);
2882 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2883 cache_p = &(*cache_p)->next;
2885 /* fill in values for the xscale reg cache */
2886 (*cache_p)->name = "XScale registers";
2887 (*cache_p)->next = NULL;
2888 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2889 (*cache_p)->num_regs = num_regs;
2891 for (i = 0; i < num_regs; i++)
2893 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2894 (*cache_p)->reg_list[i].value = calloc(4, 1);
2895 (*cache_p)->reg_list[i].dirty = 0;
2896 (*cache_p)->reg_list[i].valid = 0;
2897 (*cache_p)->reg_list[i].size = 32;
2898 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2899 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2900 arch_info[i] = xscale_reg_arch_info[i];
2901 arch_info[i].target = target;
2904 xscale->reg_cache = (*cache_p);
2907 static int xscale_init_target(struct command_context *cmd_ctx,
2908 struct target *target)
2910 xscale_build_reg_cache(target);
2911 return ERROR_OK;
2914 static int xscale_init_arch_info(struct target *target,
2915 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2917 struct arm *armv4_5;
2918 uint32_t high_reset_branch, low_reset_branch;
2919 int i;
2921 armv4_5 = &xscale->armv4_5_common;
2923 /* store architecture specfic data */
2924 xscale->common_magic = XSCALE_COMMON_MAGIC;
2926 /* we don't really *need* a variant param ... */
2927 if (variant) {
2928 int ir_length = 0;
2930 if (strcmp(variant, "pxa250") == 0
2931 || strcmp(variant, "pxa255") == 0
2932 || strcmp(variant, "pxa26x") == 0)
2933 ir_length = 5;
2934 else if (strcmp(variant, "pxa27x") == 0
2935 || strcmp(variant, "ixp42x") == 0
2936 || strcmp(variant, "ixp45x") == 0
2937 || strcmp(variant, "ixp46x") == 0)
2938 ir_length = 7;
2939 else if (strcmp(variant, "pxa3xx") == 0)
2940 ir_length = 11;
2941 else
2942 LOG_WARNING("%s: unrecognized variant %s",
2943 tap->dotted_name, variant);
2945 if (ir_length && ir_length != tap->ir_length) {
2946 LOG_WARNING("%s: IR length for %s is %d; fixing",
2947 tap->dotted_name, variant, ir_length);
2948 tap->ir_length = ir_length;
2952 /* PXA3xx shifts the JTAG instructions */
2953 if (tap->ir_length == 11)
2954 xscale->xscale_variant = XSCALE_PXA3XX;
2955 else
2956 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2958 /* the debug handler isn't installed (and thus not running) at this time */
2959 xscale->handler_address = 0xfe000800;
2961 /* clear the vectors we keep locally for reference */
2962 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2963 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2965 /* no user-specified vectors have been configured yet */
2966 xscale->static_low_vectors_set = 0x0;
2967 xscale->static_high_vectors_set = 0x0;
2969 /* calculate branches to debug handler */
2970 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2971 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2973 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2974 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2976 for (i = 1; i <= 7; i++)
2978 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2979 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2982 /* 64kB aligned region used for DCache cleaning */
2983 xscale->cache_clean_address = 0xfffe0000;
2985 xscale->hold_rst = 0;
2986 xscale->external_debug_break = 0;
2988 xscale->ibcr_available = 2;
2989 xscale->ibcr0_used = 0;
2990 xscale->ibcr1_used = 0;
2992 xscale->dbr_available = 2;
2993 xscale->dbr0_used = 0;
2994 xscale->dbr1_used = 0;
2996 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
2997 target_name(target));
2999 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3000 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3002 xscale->vector_catch = 0x1;
3004 xscale->trace.capture_status = TRACE_IDLE;
3005 xscale->trace.data = NULL;
3006 xscale->trace.image = NULL;
3007 xscale->trace.buffer_enabled = 0;
3008 xscale->trace.buffer_fill = 0;
3010 /* prepare ARMv4/5 specific information */
3011 armv4_5->arch_info = xscale;
3012 armv4_5->read_core_reg = xscale_read_core_reg;
3013 armv4_5->write_core_reg = xscale_write_core_reg;
3014 armv4_5->full_context = xscale_full_context;
3016 arm_init_arch_info(target, armv4_5);
3018 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3019 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3020 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3021 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3022 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3023 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3024 xscale->armv4_5_mmu.has_tiny_pages = 1;
3025 xscale->armv4_5_mmu.mmu_enabled = 0;
3027 return ERROR_OK;
3030 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3032 struct xscale_common *xscale;
3034 if (sizeof xscale_debug_handler - 1 > 0x800) {
3035 LOG_ERROR("debug_handler.bin: larger than 2kb");
3036 return ERROR_FAIL;
3039 xscale = calloc(1, sizeof(*xscale));
3040 if (!xscale)
3041 return ERROR_FAIL;
3043 return xscale_init_arch_info(target, xscale, target->tap,
3044 target->variant);
3047 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3049 struct target *target = NULL;
3050 struct xscale_common *xscale;
3051 int retval;
3052 uint32_t handler_address;
3054 if (CMD_ARGC < 2)
3056 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3057 return ERROR_OK;
3060 if ((target = get_target(CMD_ARGV[0])) == NULL)
3062 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3063 return ERROR_FAIL;
3066 xscale = target_to_xscale(target);
3067 retval = xscale_verify_pointer(CMD_CTX, xscale);
3068 if (retval != ERROR_OK)
3069 return retval;
3071 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3073 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3074 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3076 xscale->handler_address = handler_address;
3078 else
3080 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3081 return ERROR_FAIL;
3084 return ERROR_OK;
3087 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3089 struct target *target = NULL;
3090 struct xscale_common *xscale;
3091 int retval;
3092 uint32_t cache_clean_address;
3094 if (CMD_ARGC < 2)
3096 return ERROR_COMMAND_SYNTAX_ERROR;
3099 target = get_target(CMD_ARGV[0]);
3100 if (target == NULL)
3102 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3103 return ERROR_FAIL;
3105 xscale = target_to_xscale(target);
3106 retval = xscale_verify_pointer(CMD_CTX, xscale);
3107 if (retval != ERROR_OK)
3108 return retval;
3110 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3112 if (cache_clean_address & 0xffff)
3114 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3116 else
3118 xscale->cache_clean_address = cache_clean_address;
3121 return ERROR_OK;
3124 COMMAND_HANDLER(xscale_handle_cache_info_command)
3126 struct target *target = get_current_target(CMD_CTX);
3127 struct xscale_common *xscale = target_to_xscale(target);
3128 int retval;
3130 retval = xscale_verify_pointer(CMD_CTX, xscale);
3131 if (retval != ERROR_OK)
3132 return retval;
3134 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3137 static int xscale_virt2phys(struct target *target,
3138 uint32_t virtual, uint32_t *physical)
3140 struct xscale_common *xscale = target_to_xscale(target);
3141 int type;
3142 uint32_t cb;
3143 int domain;
3144 uint32_t ap;
3146 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3147 LOG_ERROR(xscale_not);
3148 return ERROR_TARGET_INVALID;
3151 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3152 if (type == -1)
3154 return ret;
3156 *physical = ret;
3157 return ERROR_OK;
3160 static int xscale_mmu(struct target *target, int *enabled)
3162 struct xscale_common *xscale = target_to_xscale(target);
3164 if (target->state != TARGET_HALTED)
3166 LOG_ERROR("Target not halted");
3167 return ERROR_TARGET_INVALID;
3169 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3170 return ERROR_OK;
3173 COMMAND_HANDLER(xscale_handle_mmu_command)
3175 struct target *target = get_current_target(CMD_CTX);
3176 struct xscale_common *xscale = target_to_xscale(target);
3177 int retval;
3179 retval = xscale_verify_pointer(CMD_CTX, xscale);
3180 if (retval != ERROR_OK)
3181 return retval;
3183 if (target->state != TARGET_HALTED)
3185 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3186 return ERROR_OK;
3189 if (CMD_ARGC >= 1)
3191 bool enable;
3192 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3193 if (enable)
3194 xscale_enable_mmu_caches(target, 1, 0, 0);
3195 else
3196 xscale_disable_mmu_caches(target, 1, 0, 0);
3197 xscale->armv4_5_mmu.mmu_enabled = enable;
3200 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3202 return ERROR_OK;
3205 COMMAND_HANDLER(xscale_handle_idcache_command)
3207 struct target *target = get_current_target(CMD_CTX);
3208 struct xscale_common *xscale = target_to_xscale(target);
3210 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3211 if (retval != ERROR_OK)
3212 return retval;
3214 if (target->state != TARGET_HALTED)
3216 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3217 return ERROR_OK;
3220 bool icache;
3221 COMMAND_PARSE_BOOL(CMD_NAME, icache, "icache", "dcache");
3223 if (CMD_ARGC >= 1)
3225 bool enable;
3226 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3227 if (enable)
3228 xscale_enable_mmu_caches(target, 1, 0, 0);
3229 else
3230 xscale_disable_mmu_caches(target, 1, 0, 0);
3231 if (icache)
3232 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3233 else
3234 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3237 bool enabled = icache ?
3238 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3239 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3240 const char *msg = enabled ? "enabled" : "disabled";
3241 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3243 return ERROR_OK;
3246 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3248 struct target *target = get_current_target(CMD_CTX);
3249 struct xscale_common *xscale = target_to_xscale(target);
3250 int retval;
3252 retval = xscale_verify_pointer(CMD_CTX, xscale);
3253 if (retval != ERROR_OK)
3254 return retval;
3256 if (CMD_ARGC < 1)
3258 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3260 else
3262 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3263 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3264 xscale_write_dcsr(target, -1, -1);
3267 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3269 return ERROR_OK;
3273 COMMAND_HANDLER(xscale_handle_vector_table_command)
3275 struct target *target = get_current_target(CMD_CTX);
3276 struct xscale_common *xscale = target_to_xscale(target);
3277 int err = 0;
3278 int retval;
3280 retval = xscale_verify_pointer(CMD_CTX, xscale);
3281 if (retval != ERROR_OK)
3282 return retval;
3284 if (CMD_ARGC == 0) /* print current settings */
3286 int idx;
3288 command_print(CMD_CTX, "active user-set static vectors:");
3289 for (idx = 1; idx < 8; idx++)
3290 if (xscale->static_low_vectors_set & (1 << idx))
3291 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3292 for (idx = 1; idx < 8; idx++)
3293 if (xscale->static_high_vectors_set & (1 << idx))
3294 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3295 return ERROR_OK;
3298 if (CMD_ARGC != 3)
3299 err = 1;
3300 else
3302 int idx;
3303 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3304 uint32_t vec;
3305 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3307 if (idx < 1 || idx >= 8)
3308 err = 1;
3310 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3312 xscale->static_low_vectors_set |= (1<<idx);
3313 xscale->static_low_vectors[idx] = vec;
3315 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3317 xscale->static_high_vectors_set |= (1<<idx);
3318 xscale->static_high_vectors[idx] = vec;
3320 else
3321 err = 1;
3324 if (err)
3325 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3327 return ERROR_OK;
3331 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3333 struct target *target = get_current_target(CMD_CTX);
3334 struct xscale_common *xscale = target_to_xscale(target);
3335 struct arm *armv4_5 = &xscale->armv4_5_common;
3336 uint32_t dcsr_value;
3337 int retval;
3339 retval = xscale_verify_pointer(CMD_CTX, xscale);
3340 if (retval != ERROR_OK)
3341 return retval;
3343 if (target->state != TARGET_HALTED)
3345 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3346 return ERROR_OK;
3349 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3351 struct xscale_trace_data *td, *next_td;
3352 xscale->trace.buffer_enabled = 1;
3354 /* free old trace data */
3355 td = xscale->trace.data;
3356 while (td)
3358 next_td = td->next;
3360 if (td->entries)
3361 free(td->entries);
3362 free(td);
3363 td = next_td;
3365 xscale->trace.data = NULL;
3367 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3369 xscale->trace.buffer_enabled = 0;
3372 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3374 uint32_t fill = 1;
3375 if (CMD_ARGC >= 3)
3376 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3377 xscale->trace.buffer_fill = fill;
3379 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3381 xscale->trace.buffer_fill = -1;
3384 if (xscale->trace.buffer_enabled)
3386 /* if we enable the trace buffer in fill-once
3387 * mode we know the address of the first instruction */
3388 xscale->trace.pc_ok = 1;
3389 xscale->trace.current_pc =
3390 buf_get_u32(armv4_5->pc->value, 0, 32);
3392 else
3394 /* otherwise the address is unknown, and we have no known good PC */
3395 xscale->trace.pc_ok = 0;
3398 command_print(CMD_CTX, "trace buffer %s (%s)",
3399 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3400 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3402 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3403 if (xscale->trace.buffer_fill >= 0)
3404 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3405 else
3406 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3408 return ERROR_OK;
3411 COMMAND_HANDLER(xscale_handle_trace_image_command)
3413 struct target *target = get_current_target(CMD_CTX);
3414 struct xscale_common *xscale = target_to_xscale(target);
3415 int retval;
3417 if (CMD_ARGC < 1)
3419 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3420 return ERROR_OK;
3423 retval = xscale_verify_pointer(CMD_CTX, xscale);
3424 if (retval != ERROR_OK)
3425 return retval;
3427 if (xscale->trace.image)
3429 image_close(xscale->trace.image);
3430 free(xscale->trace.image);
3431 command_print(CMD_CTX, "previously loaded image found and closed");
3434 xscale->trace.image = malloc(sizeof(struct image));
3435 xscale->trace.image->base_address_set = 0;
3436 xscale->trace.image->start_address_set = 0;
3438 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3439 if (CMD_ARGC >= 2)
3441 xscale->trace.image->base_address_set = 1;
3442 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], xscale->trace.image->base_address);
3444 else
3446 xscale->trace.image->base_address_set = 0;
3449 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3451 free(xscale->trace.image);
3452 xscale->trace.image = NULL;
3453 return ERROR_OK;
3456 return ERROR_OK;
3459 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3461 struct target *target = get_current_target(CMD_CTX);
3462 struct xscale_common *xscale = target_to_xscale(target);
3463 struct xscale_trace_data *trace_data;
3464 struct fileio file;
3465 int retval;
3467 retval = xscale_verify_pointer(CMD_CTX, xscale);
3468 if (retval != ERROR_OK)
3469 return retval;
3471 if (target->state != TARGET_HALTED)
3473 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3474 return ERROR_OK;
3477 if (CMD_ARGC < 1)
3479 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3480 return ERROR_OK;
3483 trace_data = xscale->trace.data;
3485 if (!trace_data)
3487 command_print(CMD_CTX, "no trace data collected");
3488 return ERROR_OK;
3491 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3493 return ERROR_OK;
3496 while (trace_data)
3498 int i;
3500 fileio_write_u32(&file, trace_data->chkpt0);
3501 fileio_write_u32(&file, trace_data->chkpt1);
3502 fileio_write_u32(&file, trace_data->last_instruction);
3503 fileio_write_u32(&file, trace_data->depth);
3505 for (i = 0; i < trace_data->depth; i++)
3506 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3508 trace_data = trace_data->next;
3511 fileio_close(&file);
3513 return ERROR_OK;
3516 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3518 struct target *target = get_current_target(CMD_CTX);
3519 struct xscale_common *xscale = target_to_xscale(target);
3520 int retval;
3522 retval = xscale_verify_pointer(CMD_CTX, xscale);
3523 if (retval != ERROR_OK)
3524 return retval;
3526 xscale_analyze_trace(target, CMD_CTX);
3528 return ERROR_OK;
3531 COMMAND_HANDLER(xscale_handle_cp15)
3533 struct target *target = get_current_target(CMD_CTX);
3534 struct xscale_common *xscale = target_to_xscale(target);
3535 int retval;
3537 retval = xscale_verify_pointer(CMD_CTX, xscale);
3538 if (retval != ERROR_OK)
3539 return retval;
3541 if (target->state != TARGET_HALTED)
3543 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3544 return ERROR_OK;
3546 uint32_t reg_no = 0;
3547 struct reg *reg = NULL;
3548 if (CMD_ARGC > 0)
3550 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3551 /*translate from xscale cp15 register no to openocd register*/
3552 switch (reg_no)
3554 case 0:
3555 reg_no = XSCALE_MAINID;
3556 break;
3557 case 1:
3558 reg_no = XSCALE_CTRL;
3559 break;
3560 case 2:
3561 reg_no = XSCALE_TTB;
3562 break;
3563 case 3:
3564 reg_no = XSCALE_DAC;
3565 break;
3566 case 5:
3567 reg_no = XSCALE_FSR;
3568 break;
3569 case 6:
3570 reg_no = XSCALE_FAR;
3571 break;
3572 case 13:
3573 reg_no = XSCALE_PID;
3574 break;
3575 case 15:
3576 reg_no = XSCALE_CPACCESS;
3577 break;
3578 default:
3579 command_print(CMD_CTX, "invalid register number");
3580 return ERROR_INVALID_ARGUMENTS;
3582 reg = &xscale->reg_cache->reg_list[reg_no];
3585 if (CMD_ARGC == 1)
3587 uint32_t value;
3589 /* read cp15 control register */
3590 xscale_get_reg(reg);
3591 value = buf_get_u32(reg->value, 0, 32);
3592 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3594 else if (CMD_ARGC == 2)
3596 uint32_t value;
3597 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3599 /* send CP write request (command 0x41) */
3600 xscale_send_u32(target, 0x41);
3602 /* send CP register number */
3603 xscale_send_u32(target, reg_no);
3605 /* send CP register value */
3606 xscale_send_u32(target, value);
3608 /* execute cpwait to ensure outstanding operations complete */
3609 xscale_send_u32(target, 0x53);
3611 else
3613 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3616 return ERROR_OK;
3619 static const struct command_registration xscale_exec_command_handlers[] = {
3621 .name = "cache_info",
3622 .handler = xscale_handle_cache_info_command,
3623 .mode = COMMAND_EXEC,
3624 .help = "display information about CPU caches",
3627 .name = "mmu",
3628 .handler = xscale_handle_mmu_command,
3629 .mode = COMMAND_EXEC,
3630 .help = "enable or disable the MMU",
3631 .usage = "['enable'|'disable']",
3634 .name = "icache",
3635 .handler = xscale_handle_idcache_command,
3636 .mode = COMMAND_EXEC,
3637 .help = "display ICache state, optionally enabling or "
3638 "disabling it",
3639 .usage = "['enable'|'disable']",
3642 .name = "dcache",
3643 .handler = xscale_handle_idcache_command,
3644 .mode = COMMAND_EXEC,
3645 .help = "display DCache state, optionally enabling or "
3646 "disabling it",
3647 .usage = "['enable'|'disable']",
3650 .name = "vector_catch",
3651 .handler = xscale_handle_vector_catch_command,
3652 .mode = COMMAND_EXEC,
3653 .help = "set or display 8-bit mask of vectors "
3654 "that should trigger debug entry",
3655 .usage = "[mask]",
3658 .name = "vector_table",
3659 .handler = xscale_handle_vector_table_command,
3660 .mode = COMMAND_EXEC,
3661 .help = "set vector table entry in mini-ICache, "
3662 "or display current tables",
3663 .usage = "[('high'|'low') index code]",
3666 .name = "trace_buffer",
3667 .handler = xscale_handle_trace_buffer_command,
3668 .mode = COMMAND_EXEC,
3669 .help = "display trace buffer status, enable or disable "
3670 "tracing, and optionally reconfigure trace mode",
3671 .usage = "['enable'|'disable' ['fill' number|'wrap']]",
3674 .name = "dump_trace",
3675 .handler = xscale_handle_dump_trace_command,
3676 .mode = COMMAND_EXEC,
3677 .help = "dump content of trace buffer to file",
3678 .usage = "filename",
3681 .name = "analyze_trace",
3682 .handler = xscale_handle_analyze_trace_buffer_command,
3683 .mode = COMMAND_EXEC,
3684 .help = "analyze content of trace buffer",
3685 .usage = "",
3688 .name = "trace_image",
3689 .handler = xscale_handle_trace_image_command,
3690 .mode = COMMAND_EXEC,
3691 .help = "load image from file to address (default 0)",
3692 .usage = "filename [offset [filetype]]",
3695 .name = "cp15",
3696 .handler = xscale_handle_cp15,
3697 .mode = COMMAND_EXEC,
3698 .help = "Read or write coprocessor 15 register.",
3699 .usage = "register [value]",
3701 COMMAND_REGISTRATION_DONE
3703 static const struct command_registration xscale_any_command_handlers[] = {
3705 .name = "debug_handler",
3706 .handler = xscale_handle_debug_handler_command,
3707 .mode = COMMAND_ANY,
3708 .help = "Change address used for debug handler.",
3709 .usage = "target address",
3712 .name = "cache_clean_address",
3713 .handler = xscale_handle_cache_clean_address_command,
3714 .mode = COMMAND_ANY,
3715 .help = "Change address used for cleaning data cache.",
3716 .usage = "address",
3719 .chain = xscale_exec_command_handlers,
3721 COMMAND_REGISTRATION_DONE
3723 static const struct command_registration xscale_command_handlers[] = {
3725 .chain = arm_command_handlers,
3728 .name = "xscale",
3729 .mode = COMMAND_ANY,
3730 .help = "xscale command group",
3731 .chain = xscale_any_command_handlers,
3733 COMMAND_REGISTRATION_DONE
3736 struct target_type xscale_target =
3738 .name = "xscale",
3740 .poll = xscale_poll,
3741 .arch_state = xscale_arch_state,
3743 .target_request_data = NULL,
3745 .halt = xscale_halt,
3746 .resume = xscale_resume,
3747 .step = xscale_step,
3749 .assert_reset = xscale_assert_reset,
3750 .deassert_reset = xscale_deassert_reset,
3751 .soft_reset_halt = NULL,
3753 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3754 .get_gdb_reg_list = arm_get_gdb_reg_list,
3756 .read_memory = xscale_read_memory,
3757 .read_phys_memory = xscale_read_phys_memory,
3758 .write_memory = xscale_write_memory,
3759 .write_phys_memory = xscale_write_phys_memory,
3760 .bulk_write_memory = xscale_bulk_write_memory,
3762 .checksum_memory = arm_checksum_memory,
3763 .blank_check_memory = arm_blank_check_memory,
3765 .run_algorithm = armv4_5_run_algorithm,
3767 .add_breakpoint = xscale_add_breakpoint,
3768 .remove_breakpoint = xscale_remove_breakpoint,
3769 .add_watchpoint = xscale_add_watchpoint,
3770 .remove_watchpoint = xscale_remove_watchpoint,
3772 .commands = xscale_command_handlers,
3773 .target_create = xscale_target_create,
3774 .init_target = xscale_init_target,
3776 .virt2phys = xscale_virt2phys,
3777 .mmu = xscale_mmu