XScale: initial PXA3xx support
[openocd/ellerodev.git] / src / target / xscale.c
blobb4cb4ffb1c321f733e5e4f6844357c8d996b4c62
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include "time_support.h"
37 #include "register.h"
38 #include "image.h"
42 * Important XScale documents available as of October 2009 include:
44 * Intel XScale® Core Developer’s Manual, January 2004
45 * Order Number: 273473-002
46 * This has a chapter detailing debug facilities, and punts some
47 * details to chip-specific microarchitecture documents.
49 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
50 * Document Number: 273539-005
51 * Less detailed than the developer's manual, but summarizes those
52 * missing details (for most XScales) and gives LOTS of notes about
53 * debugger/handler interaction issues. Presents a simpler reset
54 * and load-handler sequence than the arch doc. (Note, OpenOCD
55 * doesn't currently support "Hot-Debug" as defined there.)
57 * Chip-specific microarchitecture documents may also be useful.
61 /* forward declarations */
62 static int xscale_resume(struct target *, int current,
63 uint32_t address, int handle_breakpoints, int debug_execution);
64 static int xscale_debug_entry(struct target *);
65 static int xscale_restore_context(struct target *);
66 static int xscale_get_reg(struct reg *reg);
67 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
68 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
69 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
70 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_read_trace(struct target *);
74 /* This XScale "debug handler" is loaded into the processor's
75 * mini-ICache, which is 2K of code writable only via JTAG.
77 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
78 * binary files cleanly. It's string oriented, and terminates them
79 * with a NUL character. Better would be to generate the constants
80 * and let other code decide names, scoping, and other housekeeping.
82 static /* unsigned const char xscale_debug_handler[] = ... */
83 #include "xscale_debug.h"
85 static char *const xscale_reg_list[] =
87 "XSCALE_MAINID", /* 0 */
88 "XSCALE_CACHETYPE",
89 "XSCALE_CTRL",
90 "XSCALE_AUXCTRL",
91 "XSCALE_TTB",
92 "XSCALE_DAC",
93 "XSCALE_FSR",
94 "XSCALE_FAR",
95 "XSCALE_PID",
96 "XSCALE_CPACCESS",
97 "XSCALE_IBCR0", /* 10 */
98 "XSCALE_IBCR1",
99 "XSCALE_DBR0",
100 "XSCALE_DBR1",
101 "XSCALE_DBCON",
102 "XSCALE_TBREG",
103 "XSCALE_CHKPT0",
104 "XSCALE_CHKPT1",
105 "XSCALE_DCSR",
106 "XSCALE_TX",
107 "XSCALE_RX", /* 20 */
108 "XSCALE_TXRXCTRL",
111 static const struct xscale_reg xscale_reg_arch_info[] =
113 {XSCALE_MAINID, NULL},
114 {XSCALE_CACHETYPE, NULL},
115 {XSCALE_CTRL, NULL},
116 {XSCALE_AUXCTRL, NULL},
117 {XSCALE_TTB, NULL},
118 {XSCALE_DAC, NULL},
119 {XSCALE_FSR, NULL},
120 {XSCALE_FAR, NULL},
121 {XSCALE_PID, NULL},
122 {XSCALE_CPACCESS, NULL},
123 {XSCALE_IBCR0, NULL},
124 {XSCALE_IBCR1, NULL},
125 {XSCALE_DBR0, NULL},
126 {XSCALE_DBR1, NULL},
127 {XSCALE_DBCON, NULL},
128 {XSCALE_TBREG, NULL},
129 {XSCALE_CHKPT0, NULL},
130 {XSCALE_CHKPT1, NULL},
131 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
132 {-1, NULL}, /* TX accessed via JTAG */
133 {-1, NULL}, /* RX accessed via JTAG */
134 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
137 /* convenience wrapper to access XScale specific registers */
138 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
140 uint8_t buf[4];
142 buf_set_u32(buf, 0, 32, value);
144 return xscale_set_reg(reg, buf);
147 static const char xscale_not[] = "target is not an XScale";
149 static int xscale_verify_pointer(struct command_context *cmd_ctx,
150 struct xscale_common *xscale)
152 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
153 command_print(cmd_ctx, xscale_not);
154 return ERROR_TARGET_INVALID;
156 return ERROR_OK;
159 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
161 if (tap == NULL)
162 return ERROR_FAIL;
164 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
166 struct scan_field field;
167 uint8_t scratch[4];
169 memset(&field, 0, sizeof field);
170 field.tap = tap;
171 field.num_bits = tap->ir_length;
172 field.out_value = scratch;
173 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
175 jtag_add_ir_scan(1, &field, jtag_get_end_state());
178 return ERROR_OK;
181 static int xscale_read_dcsr(struct target *target)
183 struct xscale_common *xscale = target_to_xscale(target);
184 int retval;
185 struct scan_field fields[3];
186 uint8_t field0 = 0x0;
187 uint8_t field0_check_value = 0x2;
188 uint8_t field0_check_mask = 0x7;
189 uint8_t field2 = 0x0;
190 uint8_t field2_check_value = 0x0;
191 uint8_t field2_check_mask = 0x1;
193 jtag_set_end_state(TAP_DRPAUSE);
194 xscale_jtag_set_instr(target->tap,
195 XSCALE_SELDCSR << xscale->xscale_variant);
197 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
198 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
200 memset(&fields, 0, sizeof fields);
202 fields[0].tap = target->tap;
203 fields[0].num_bits = 3;
204 fields[0].out_value = &field0;
205 uint8_t tmp;
206 fields[0].in_value = &tmp;
208 fields[1].tap = target->tap;
209 fields[1].num_bits = 32;
210 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
212 fields[2].tap = target->tap;
213 fields[2].num_bits = 1;
214 fields[2].out_value = &field2;
215 uint8_t tmp2;
216 fields[2].in_value = &tmp2;
218 jtag_add_dr_scan(3, fields, jtag_get_end_state());
220 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
221 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
223 if ((retval = jtag_execute_queue()) != ERROR_OK)
225 LOG_ERROR("JTAG error while reading DCSR");
226 return retval;
229 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
230 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
232 /* write the register with the value we just read
233 * on this second pass, only the first bit of field0 is guaranteed to be 0)
235 field0_check_mask = 0x1;
236 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
237 fields[1].in_value = NULL;
239 jtag_set_end_state(TAP_IDLE);
241 jtag_add_dr_scan(3, fields, jtag_get_end_state());
243 /* DANGER!!! this must be here. It will make sure that the arguments
244 * to jtag_set_check_value() does not go out of scope! */
245 return jtag_execute_queue();
249 static void xscale_getbuf(jtag_callback_data_t arg)
251 uint8_t *in = (uint8_t *)arg;
252 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
255 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
257 if (num_words == 0)
258 return ERROR_INVALID_ARGUMENTS;
260 struct xscale_common *xscale = target_to_xscale(target);
261 int retval = ERROR_OK;
262 tap_state_t path[3];
263 struct scan_field fields[3];
264 uint8_t *field0 = malloc(num_words * 1);
265 uint8_t field0_check_value = 0x2;
266 uint8_t field0_check_mask = 0x6;
267 uint32_t *field1 = malloc(num_words * 4);
268 uint8_t field2_check_value = 0x0;
269 uint8_t field2_check_mask = 0x1;
270 int words_done = 0;
271 int words_scheduled = 0;
272 int i;
274 path[0] = TAP_DRSELECT;
275 path[1] = TAP_DRCAPTURE;
276 path[2] = TAP_DRSHIFT;
278 memset(&fields, 0, sizeof fields);
280 fields[0].tap = target->tap;
281 fields[0].num_bits = 3;
282 fields[0].check_value = &field0_check_value;
283 fields[0].check_mask = &field0_check_mask;
285 fields[1].tap = target->tap;
286 fields[1].num_bits = 32;
288 fields[2].tap = target->tap;
289 fields[2].num_bits = 1;
290 fields[2].check_value = &field2_check_value;
291 fields[2].check_mask = &field2_check_mask;
293 jtag_set_end_state(TAP_IDLE);
294 xscale_jtag_set_instr(target->tap,
295 XSCALE_DBGTX << xscale->xscale_variant);
296 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
298 /* repeat until all words have been collected */
299 int attempts = 0;
300 while (words_done < num_words)
302 /* schedule reads */
303 words_scheduled = 0;
304 for (i = words_done; i < num_words; i++)
306 fields[0].in_value = &field0[i];
308 jtag_add_pathmove(3, path);
310 fields[1].in_value = (uint8_t *)(field1 + i);
312 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
314 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
316 words_scheduled++;
319 if ((retval = jtag_execute_queue()) != ERROR_OK)
321 LOG_ERROR("JTAG error while receiving data from debug handler");
322 break;
325 /* examine results */
326 for (i = words_done; i < num_words; i++)
328 if (!(field0[0] & 1))
330 /* move backwards if necessary */
331 int j;
332 for (j = i; j < num_words - 1; j++)
334 field0[j] = field0[j + 1];
335 field1[j] = field1[j + 1];
337 words_scheduled--;
340 if (words_scheduled == 0)
342 if (attempts++==1000)
344 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
345 retval = ERROR_TARGET_TIMEOUT;
346 break;
350 words_done += words_scheduled;
353 for (i = 0; i < num_words; i++)
354 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
356 free(field1);
358 return retval;
361 static int xscale_read_tx(struct target *target, int consume)
363 struct xscale_common *xscale = target_to_xscale(target);
364 tap_state_t path[3];
365 tap_state_t noconsume_path[6];
366 int retval;
367 struct timeval timeout, now;
368 struct scan_field fields[3];
369 uint8_t field0_in = 0x0;
370 uint8_t field0_check_value = 0x2;
371 uint8_t field0_check_mask = 0x6;
372 uint8_t field2_check_value = 0x0;
373 uint8_t field2_check_mask = 0x1;
375 jtag_set_end_state(TAP_IDLE);
377 xscale_jtag_set_instr(target->tap,
378 XSCALE_DBGTX << xscale->xscale_variant);
380 path[0] = TAP_DRSELECT;
381 path[1] = TAP_DRCAPTURE;
382 path[2] = TAP_DRSHIFT;
384 noconsume_path[0] = TAP_DRSELECT;
385 noconsume_path[1] = TAP_DRCAPTURE;
386 noconsume_path[2] = TAP_DREXIT1;
387 noconsume_path[3] = TAP_DRPAUSE;
388 noconsume_path[4] = TAP_DREXIT2;
389 noconsume_path[5] = TAP_DRSHIFT;
391 memset(&fields, 0, sizeof fields);
393 fields[0].tap = target->tap;
394 fields[0].num_bits = 3;
395 fields[0].in_value = &field0_in;
397 fields[1].tap = target->tap;
398 fields[1].num_bits = 32;
399 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
401 fields[2].tap = target->tap;
402 fields[2].num_bits = 1;
403 uint8_t tmp;
404 fields[2].in_value = &tmp;
406 gettimeofday(&timeout, NULL);
407 timeval_add_time(&timeout, 1, 0);
409 for (;;)
411 /* if we want to consume the register content (i.e. clear TX_READY),
412 * we have to go straight from Capture-DR to Shift-DR
413 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
415 if (consume)
416 jtag_add_pathmove(3, path);
417 else
419 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
422 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
424 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
425 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
427 if ((retval = jtag_execute_queue()) != ERROR_OK)
429 LOG_ERROR("JTAG error while reading TX");
430 return ERROR_TARGET_TIMEOUT;
433 gettimeofday(&now, NULL);
434 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
436 LOG_ERROR("time out reading TX register");
437 return ERROR_TARGET_TIMEOUT;
439 if (!((!(field0_in & 1)) && consume))
441 goto done;
443 if (debug_level >= 3)
445 LOG_DEBUG("waiting 100ms");
446 alive_sleep(100); /* avoid flooding the logs */
447 } else
449 keep_alive();
452 done:
454 if (!(field0_in & 1))
455 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
457 return ERROR_OK;
460 static int xscale_write_rx(struct target *target)
462 struct xscale_common *xscale = target_to_xscale(target);
463 int retval;
464 struct timeval timeout, now;
465 struct scan_field fields[3];
466 uint8_t field0_out = 0x0;
467 uint8_t field0_in = 0x0;
468 uint8_t field0_check_value = 0x2;
469 uint8_t field0_check_mask = 0x6;
470 uint8_t field2 = 0x0;
471 uint8_t field2_check_value = 0x0;
472 uint8_t field2_check_mask = 0x1;
474 jtag_set_end_state(TAP_IDLE);
476 xscale_jtag_set_instr(target->tap,
477 XSCALE_DBGRX << xscale->xscale_variant);
479 memset(&fields, 0, sizeof fields);
481 fields[0].tap = target->tap;
482 fields[0].num_bits = 3;
483 fields[0].out_value = &field0_out;
484 fields[0].in_value = &field0_in;
486 fields[1].tap = target->tap;
487 fields[1].num_bits = 32;
488 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
490 fields[2].tap = target->tap;
491 fields[2].num_bits = 1;
492 fields[2].out_value = &field2;
493 uint8_t tmp;
494 fields[2].in_value = &tmp;
496 gettimeofday(&timeout, NULL);
497 timeval_add_time(&timeout, 1, 0);
499 /* poll until rx_read is low */
500 LOG_DEBUG("polling RX");
501 for (;;)
503 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
505 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
506 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
508 if ((retval = jtag_execute_queue()) != ERROR_OK)
510 LOG_ERROR("JTAG error while writing RX");
511 return retval;
514 gettimeofday(&now, NULL);
515 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
517 LOG_ERROR("time out writing RX register");
518 return ERROR_TARGET_TIMEOUT;
520 if (!(field0_in & 1))
521 goto done;
522 if (debug_level >= 3)
524 LOG_DEBUG("waiting 100ms");
525 alive_sleep(100); /* avoid flooding the logs */
526 } else
528 keep_alive();
531 done:
533 /* set rx_valid */
534 field2 = 0x1;
535 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
537 if ((retval = jtag_execute_queue()) != ERROR_OK)
539 LOG_ERROR("JTAG error while writing RX");
540 return retval;
543 return ERROR_OK;
546 /* send count elements of size byte to the debug handler */
547 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
549 struct xscale_common *xscale = target_to_xscale(target);
550 uint32_t t[3];
551 int bits[3];
552 int retval;
553 int done_count = 0;
555 jtag_set_end_state(TAP_IDLE);
557 xscale_jtag_set_instr(target->tap,
558 XSCALE_DBGRX << xscale->xscale_variant);
560 bits[0]=3;
561 t[0]=0;
562 bits[1]=32;
563 t[2]=1;
564 bits[2]=1;
565 int endianness = target->endianness;
566 while (done_count++ < count)
568 switch (size)
570 case 4:
571 if (endianness == TARGET_LITTLE_ENDIAN)
573 t[1]=le_to_h_u32(buffer);
574 } else
576 t[1]=be_to_h_u32(buffer);
578 break;
579 case 2:
580 if (endianness == TARGET_LITTLE_ENDIAN)
582 t[1]=le_to_h_u16(buffer);
583 } else
585 t[1]=be_to_h_u16(buffer);
587 break;
588 case 1:
589 t[1]=buffer[0];
590 break;
591 default:
592 LOG_ERROR("BUG: size neither 4, 2 nor 1");
593 return ERROR_INVALID_ARGUMENTS;
595 jtag_add_dr_out(target->tap,
597 bits,
599 jtag_set_end_state(TAP_IDLE));
600 buffer += size;
603 if ((retval = jtag_execute_queue()) != ERROR_OK)
605 LOG_ERROR("JTAG error while sending data to debug handler");
606 return retval;
609 return ERROR_OK;
612 static int xscale_send_u32(struct target *target, uint32_t value)
614 struct xscale_common *xscale = target_to_xscale(target);
616 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
617 return xscale_write_rx(target);
620 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
622 struct xscale_common *xscale = target_to_xscale(target);
623 int retval;
624 struct scan_field fields[3];
625 uint8_t field0 = 0x0;
626 uint8_t field0_check_value = 0x2;
627 uint8_t field0_check_mask = 0x7;
628 uint8_t field2 = 0x0;
629 uint8_t field2_check_value = 0x0;
630 uint8_t field2_check_mask = 0x1;
632 if (hold_rst != -1)
633 xscale->hold_rst = hold_rst;
635 if (ext_dbg_brk != -1)
636 xscale->external_debug_break = ext_dbg_brk;
638 jtag_set_end_state(TAP_IDLE);
639 xscale_jtag_set_instr(target->tap,
640 XSCALE_SELDCSR << xscale->xscale_variant);
642 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
643 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
645 memset(&fields, 0, sizeof fields);
647 fields[0].tap = target->tap;
648 fields[0].num_bits = 3;
649 fields[0].out_value = &field0;
650 uint8_t tmp;
651 fields[0].in_value = &tmp;
653 fields[1].tap = target->tap;
654 fields[1].num_bits = 32;
655 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
657 fields[2].tap = target->tap;
658 fields[2].num_bits = 1;
659 fields[2].out_value = &field2;
660 uint8_t tmp2;
661 fields[2].in_value = &tmp2;
663 jtag_add_dr_scan(3, fields, jtag_get_end_state());
665 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
666 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
668 if ((retval = jtag_execute_queue()) != ERROR_OK)
670 LOG_ERROR("JTAG error while writing DCSR");
671 return retval;
674 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
675 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
677 return ERROR_OK;
680 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
681 static unsigned int parity (unsigned int v)
683 // unsigned int ov = v;
684 v ^= v >> 16;
685 v ^= v >> 8;
686 v ^= v >> 4;
687 v &= 0xf;
688 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
689 return (0x6996 >> v) & 1;
692 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
694 struct xscale_common *xscale = target_to_xscale(target);
695 uint8_t packet[4];
696 uint8_t cmd;
697 int word;
698 struct scan_field fields[2];
700 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
702 /* LDIC into IR */
703 jtag_set_end_state(TAP_IDLE);
704 xscale_jtag_set_instr(target->tap,
705 XSCALE_LDIC << xscale->xscale_variant);
707 /* CMD is b011 to load a cacheline into the Mini ICache.
708 * Loading into the main ICache is deprecated, and unused.
709 * It's followed by three zero bits, and 27 address bits.
711 buf_set_u32(&cmd, 0, 6, 0x3);
713 /* virtual address of desired cache line */
714 buf_set_u32(packet, 0, 27, va >> 5);
716 memset(&fields, 0, sizeof fields);
718 fields[0].tap = target->tap;
719 fields[0].num_bits = 6;
720 fields[0].out_value = &cmd;
722 fields[1].tap = target->tap;
723 fields[1].num_bits = 27;
724 fields[1].out_value = packet;
726 jtag_add_dr_scan(2, fields, jtag_get_end_state());
728 /* rest of packet is a cacheline: 8 instructions, with parity */
729 fields[0].num_bits = 32;
730 fields[0].out_value = packet;
732 fields[1].num_bits = 1;
733 fields[1].out_value = &cmd;
735 for (word = 0; word < 8; word++)
737 buf_set_u32(packet, 0, 32, buffer[word]);
739 uint32_t value;
740 memcpy(&value, packet, sizeof(uint32_t));
741 cmd = parity(value);
743 jtag_add_dr_scan(2, fields, jtag_get_end_state());
746 return jtag_execute_queue();
749 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
751 struct xscale_common *xscale = target_to_xscale(target);
752 uint8_t packet[4];
753 uint8_t cmd;
754 struct scan_field fields[2];
756 jtag_set_end_state(TAP_IDLE);
757 xscale_jtag_set_instr(target->tap,
758 XSCALE_LDIC << xscale->xscale_variant);
760 /* CMD for invalidate IC line b000, bits [6:4] b000 */
761 buf_set_u32(&cmd, 0, 6, 0x0);
763 /* virtual address of desired cache line */
764 buf_set_u32(packet, 0, 27, va >> 5);
766 memset(&fields, 0, sizeof fields);
768 fields[0].tap = target->tap;
769 fields[0].num_bits = 6;
770 fields[0].out_value = &cmd;
772 fields[1].tap = target->tap;
773 fields[1].num_bits = 27;
774 fields[1].out_value = packet;
776 jtag_add_dr_scan(2, fields, jtag_get_end_state());
778 return ERROR_OK;
781 static int xscale_update_vectors(struct target *target)
783 struct xscale_common *xscale = target_to_xscale(target);
784 int i;
785 int retval;
787 uint32_t low_reset_branch, high_reset_branch;
789 for (i = 1; i < 8; i++)
791 /* if there's a static vector specified for this exception, override */
792 if (xscale->static_high_vectors_set & (1 << i))
794 xscale->high_vectors[i] = xscale->static_high_vectors[i];
796 else
798 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
799 if (retval == ERROR_TARGET_TIMEOUT)
800 return retval;
801 if (retval != ERROR_OK)
803 /* Some of these reads will fail as part of normal execution */
804 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
809 for (i = 1; i < 8; i++)
811 if (xscale->static_low_vectors_set & (1 << i))
813 xscale->low_vectors[i] = xscale->static_low_vectors[i];
815 else
817 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
818 if (retval == ERROR_TARGET_TIMEOUT)
819 return retval;
820 if (retval != ERROR_OK)
822 /* Some of these reads will fail as part of normal execution */
823 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
828 /* calculate branches to debug handler */
829 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
830 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
832 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
833 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
835 /* invalidate and load exception vectors in mini i-cache */
836 xscale_invalidate_ic_line(target, 0x0);
837 xscale_invalidate_ic_line(target, 0xffff0000);
839 xscale_load_ic(target, 0x0, xscale->low_vectors);
840 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
842 return ERROR_OK;
845 static int xscale_arch_state(struct target *target)
847 struct xscale_common *xscale = target_to_xscale(target);
848 struct arm *armv4_5 = &xscale->armv4_5_common;
850 static const char *state[] =
852 "disabled", "enabled"
855 static const char *arch_dbg_reason[] =
857 "", "\n(processor reset)", "\n(trace buffer full)"
860 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
862 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
863 return ERROR_INVALID_ARGUMENTS;
866 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
867 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
868 "MMU: %s, D-Cache: %s, I-Cache: %s"
869 "%s",
870 armv4_5_state_strings[armv4_5->core_state],
871 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name ,
872 arm_mode_name(armv4_5->core_mode),
873 buf_get_u32(armv4_5->cpsr->value, 0, 32),
874 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
875 state[xscale->armv4_5_mmu.mmu_enabled],
876 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
877 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
878 arch_dbg_reason[xscale->arch_debug_reason]);
880 return ERROR_OK;
883 static int xscale_poll(struct target *target)
885 int retval = ERROR_OK;
887 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
889 enum target_state previous_state = target->state;
890 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
893 /* there's data to read from the tx register, we entered debug state */
894 target->state = TARGET_HALTED;
896 /* process debug entry, fetching current mode regs */
897 retval = xscale_debug_entry(target);
899 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
901 LOG_USER("error while polling TX register, reset CPU");
902 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
903 target->state = TARGET_HALTED;
906 /* debug_entry could have overwritten target state (i.e. immediate resume)
907 * don't signal event handlers in that case
909 if (target->state != TARGET_HALTED)
910 return ERROR_OK;
912 /* if target was running, signal that we halted
913 * otherwise we reentered from debug execution */
914 if (previous_state == TARGET_RUNNING)
915 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
916 else
917 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
920 return retval;
923 static int xscale_debug_entry(struct target *target)
925 struct xscale_common *xscale = target_to_xscale(target);
926 struct arm *armv4_5 = &xscale->armv4_5_common;
927 uint32_t pc;
928 uint32_t buffer[10];
929 int i;
930 int retval;
931 uint32_t moe;
933 /* clear external dbg break (will be written on next DCSR read) */
934 xscale->external_debug_break = 0;
935 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
936 return retval;
938 /* get r0, pc, r1 to r7 and cpsr */
939 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
940 return retval;
942 /* move r0 from buffer to register cache */
943 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
944 armv4_5->core_cache->reg_list[0].dirty = 1;
945 armv4_5->core_cache->reg_list[0].valid = 1;
946 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
948 /* move pc from buffer to register cache */
949 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
950 armv4_5->core_cache->reg_list[15].dirty = 1;
951 armv4_5->core_cache->reg_list[15].valid = 1;
952 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
954 /* move data from buffer to register cache */
955 for (i = 1; i <= 7; i++)
957 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
958 armv4_5->core_cache->reg_list[i].dirty = 1;
959 armv4_5->core_cache->reg_list[i].valid = 1;
960 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
963 arm_set_cpsr(armv4_5, buffer[9]);
964 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
966 if (!is_arm_mode(armv4_5->core_mode))
968 target->state = TARGET_UNKNOWN;
969 LOG_ERROR("cpsr contains invalid mode value - communication failure");
970 return ERROR_TARGET_FAILURE;
972 LOG_DEBUG("target entered debug state in %s mode",
973 arm_mode_name(armv4_5->core_mode));
975 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
976 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
978 xscale_receive(target, buffer, 8);
979 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
980 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
981 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
983 else
985 /* r8 to r14, but no spsr */
986 xscale_receive(target, buffer, 7);
989 /* move data from buffer to register cache */
990 for (i = 8; i <= 14; i++)
992 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
993 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
994 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
997 /* examine debug reason */
998 xscale_read_dcsr(target);
999 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
1001 /* stored PC (for calculating fixup) */
1002 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1004 switch (moe)
1006 case 0x0: /* Processor reset */
1007 target->debug_reason = DBG_REASON_DBGRQ;
1008 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1009 pc -= 4;
1010 break;
1011 case 0x1: /* Instruction breakpoint hit */
1012 target->debug_reason = DBG_REASON_BREAKPOINT;
1013 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1014 pc -= 4;
1015 break;
1016 case 0x2: /* Data breakpoint hit */
1017 target->debug_reason = DBG_REASON_WATCHPOINT;
1018 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1019 pc -= 4;
1020 break;
1021 case 0x3: /* BKPT instruction executed */
1022 target->debug_reason = DBG_REASON_BREAKPOINT;
1023 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1024 pc -= 4;
1025 break;
1026 case 0x4: /* Ext. debug event */
1027 target->debug_reason = DBG_REASON_DBGRQ;
1028 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1029 pc -= 4;
1030 break;
1031 case 0x5: /* Vector trap occured */
1032 target->debug_reason = DBG_REASON_BREAKPOINT;
1033 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1034 pc -= 4;
1035 break;
1036 case 0x6: /* Trace buffer full break */
1037 target->debug_reason = DBG_REASON_DBGRQ;
1038 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1039 pc -= 4;
1040 break;
1041 case 0x7: /* Reserved (may flag Hot-Debug support) */
1042 default:
1043 LOG_ERROR("Method of Entry is 'Reserved'");
1044 exit(-1);
1045 break;
1048 /* apply PC fixup */
1049 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1051 /* on the first debug entry, identify cache type */
1052 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1054 uint32_t cache_type_reg;
1056 /* read cp15 cache type register */
1057 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1058 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1060 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1063 /* examine MMU and Cache settings */
1064 /* read cp15 control register */
1065 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1066 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1067 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1068 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1069 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1071 /* tracing enabled, read collected trace data */
1072 if (xscale->trace.buffer_enabled)
1074 xscale_read_trace(target);
1075 xscale->trace.buffer_fill--;
1077 /* resume if we're still collecting trace data */
1078 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1079 && (xscale->trace.buffer_fill > 0))
1081 xscale_resume(target, 1, 0x0, 1, 0);
1083 else
1085 xscale->trace.buffer_enabled = 0;
1089 return ERROR_OK;
1092 static int xscale_halt(struct target *target)
1094 struct xscale_common *xscale = target_to_xscale(target);
1096 LOG_DEBUG("target->state: %s",
1097 target_state_name(target));
1099 if (target->state == TARGET_HALTED)
1101 LOG_DEBUG("target was already halted");
1102 return ERROR_OK;
1104 else if (target->state == TARGET_UNKNOWN)
1106 /* this must not happen for a xscale target */
1107 LOG_ERROR("target was in unknown state when halt was requested");
1108 return ERROR_TARGET_INVALID;
1110 else if (target->state == TARGET_RESET)
1112 LOG_DEBUG("target->state == TARGET_RESET");
1114 else
1116 /* assert external dbg break */
1117 xscale->external_debug_break = 1;
1118 xscale_read_dcsr(target);
1120 target->debug_reason = DBG_REASON_DBGRQ;
1123 return ERROR_OK;
1126 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1128 struct xscale_common *xscale = target_to_xscale(target);
1129 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1130 int retval;
1132 if (xscale->ibcr0_used)
1134 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1136 if (ibcr0_bp)
1138 xscale_unset_breakpoint(target, ibcr0_bp);
1140 else
1142 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1143 exit(-1);
1147 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1148 return retval;
1150 return ERROR_OK;
1153 static int xscale_disable_single_step(struct target *target)
1155 struct xscale_common *xscale = target_to_xscale(target);
1156 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1157 int retval;
1159 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1160 return retval;
1162 return ERROR_OK;
1165 static void xscale_enable_watchpoints(struct target *target)
1167 struct watchpoint *watchpoint = target->watchpoints;
1169 while (watchpoint)
1171 if (watchpoint->set == 0)
1172 xscale_set_watchpoint(target, watchpoint);
1173 watchpoint = watchpoint->next;
1177 static void xscale_enable_breakpoints(struct target *target)
1179 struct breakpoint *breakpoint = target->breakpoints;
1181 /* set any pending breakpoints */
1182 while (breakpoint)
1184 if (breakpoint->set == 0)
1185 xscale_set_breakpoint(target, breakpoint);
1186 breakpoint = breakpoint->next;
1190 static int xscale_resume(struct target *target, int current,
1191 uint32_t address, int handle_breakpoints, int debug_execution)
1193 struct xscale_common *xscale = target_to_xscale(target);
1194 struct arm *armv4_5 = &xscale->armv4_5_common;
1195 struct breakpoint *breakpoint = target->breakpoints;
1196 uint32_t current_pc;
1197 int retval;
1198 int i;
1200 LOG_DEBUG("-");
1202 if (target->state != TARGET_HALTED)
1204 LOG_WARNING("target not halted");
1205 return ERROR_TARGET_NOT_HALTED;
1208 if (!debug_execution)
1210 target_free_all_working_areas(target);
1213 /* update vector tables */
1214 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1215 return retval;
1217 /* current = 1: continue on current pc, otherwise continue at <address> */
1218 if (!current)
1219 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1221 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1223 /* if we're at the reset vector, we have to simulate the branch */
1224 if (current_pc == 0x0)
1226 arm_simulate_step(target, NULL);
1227 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1230 /* the front-end may request us not to handle breakpoints */
1231 if (handle_breakpoints)
1233 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1235 uint32_t next_pc;
1237 /* there's a breakpoint at the current PC, we have to step over it */
1238 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1239 xscale_unset_breakpoint(target, breakpoint);
1241 /* calculate PC of next instruction */
1242 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1244 uint32_t current_opcode;
1245 target_read_u32(target, current_pc, &current_opcode);
1246 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1249 LOG_DEBUG("enable single-step");
1250 xscale_enable_single_step(target, next_pc);
1252 /* restore banked registers */
1253 xscale_restore_context(target);
1255 /* send resume request (command 0x30 or 0x31)
1256 * clean the trace buffer if it is to be enabled (0x62) */
1257 if (xscale->trace.buffer_enabled)
1259 xscale_send_u32(target, 0x62);
1260 xscale_send_u32(target, 0x31);
1262 else
1263 xscale_send_u32(target, 0x30);
1265 /* send CPSR */
1266 xscale_send_u32(target,
1267 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1268 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1269 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1271 for (i = 7; i >= 0; i--)
1273 /* send register */
1274 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1275 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1278 /* send PC */
1279 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1280 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1282 /* wait for and process debug entry */
1283 xscale_debug_entry(target);
1285 LOG_DEBUG("disable single-step");
1286 xscale_disable_single_step(target);
1288 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1289 xscale_set_breakpoint(target, breakpoint);
1293 /* enable any pending breakpoints and watchpoints */
1294 xscale_enable_breakpoints(target);
1295 xscale_enable_watchpoints(target);
1297 /* restore banked registers */
1298 xscale_restore_context(target);
1300 /* send resume request (command 0x30 or 0x31)
1301 * clean the trace buffer if it is to be enabled (0x62) */
1302 if (xscale->trace.buffer_enabled)
1304 xscale_send_u32(target, 0x62);
1305 xscale_send_u32(target, 0x31);
1307 else
1308 xscale_send_u32(target, 0x30);
1310 /* send CPSR */
1311 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1312 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1313 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1315 for (i = 7; i >= 0; i--)
1317 /* send register */
1318 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1319 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1322 /* send PC */
1323 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1324 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1326 target->debug_reason = DBG_REASON_NOTHALTED;
1328 if (!debug_execution)
1330 /* registers are now invalid */
1331 register_cache_invalidate(armv4_5->core_cache);
1332 target->state = TARGET_RUNNING;
1333 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1335 else
1337 target->state = TARGET_DEBUG_RUNNING;
1338 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1341 LOG_DEBUG("target resumed");
1343 return ERROR_OK;
1346 static int xscale_step_inner(struct target *target, int current,
1347 uint32_t address, int handle_breakpoints)
1349 struct xscale_common *xscale = target_to_xscale(target);
1350 struct arm *armv4_5 = &xscale->armv4_5_common;
1351 uint32_t next_pc;
1352 int retval;
1353 int i;
1355 target->debug_reason = DBG_REASON_SINGLESTEP;
1357 /* calculate PC of next instruction */
1358 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1360 uint32_t current_opcode, current_pc;
1361 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1363 target_read_u32(target, current_pc, &current_opcode);
1364 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1365 return retval;
1368 LOG_DEBUG("enable single-step");
1369 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1370 return retval;
1372 /* restore banked registers */
1373 if ((retval = xscale_restore_context(target)) != ERROR_OK)
1374 return retval;
1376 /* send resume request (command 0x30 or 0x31)
1377 * clean the trace buffer if it is to be enabled (0x62) */
1378 if (xscale->trace.buffer_enabled)
1380 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1381 return retval;
1382 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1383 return retval;
1385 else
1386 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1387 return retval;
1389 /* send CPSR */
1390 retval = xscale_send_u32(target,
1391 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1392 if (retval != ERROR_OK)
1393 return retval;
1394 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1395 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1397 for (i = 7; i >= 0; i--)
1399 /* send register */
1400 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1401 return retval;
1402 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1405 /* send PC */
1406 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
1407 return retval;
1408 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1410 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1412 /* registers are now invalid */
1413 register_cache_invalidate(armv4_5->core_cache);
1415 /* wait for and process debug entry */
1416 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1417 return retval;
1419 LOG_DEBUG("disable single-step");
1420 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1421 return retval;
1423 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1425 return ERROR_OK;
1428 static int xscale_step(struct target *target, int current,
1429 uint32_t address, int handle_breakpoints)
1431 struct arm *armv4_5 = target_to_armv4_5(target);
1432 struct breakpoint *breakpoint = target->breakpoints;
1434 uint32_t current_pc;
1435 int retval;
1437 if (target->state != TARGET_HALTED)
1439 LOG_WARNING("target not halted");
1440 return ERROR_TARGET_NOT_HALTED;
1443 /* current = 1: continue on current pc, otherwise continue at <address> */
1444 if (!current)
1445 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1447 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1449 /* if we're at the reset vector, we have to simulate the step */
1450 if (current_pc == 0x0)
1452 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1453 return retval;
1454 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1456 target->debug_reason = DBG_REASON_SINGLESTEP;
1457 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1459 return ERROR_OK;
1462 /* the front-end may request us not to handle breakpoints */
1463 if (handle_breakpoints)
1464 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1466 if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
1467 return retval;
1470 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1472 if (breakpoint)
1474 xscale_set_breakpoint(target, breakpoint);
1477 LOG_DEBUG("target stepped");
1479 return ERROR_OK;
1483 static int xscale_assert_reset(struct target *target)
1485 struct xscale_common *xscale = target_to_xscale(target);
1487 LOG_DEBUG("target->state: %s",
1488 target_state_name(target));
1490 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1491 * end up in T-L-R, which would reset JTAG
1493 jtag_set_end_state(TAP_IDLE);
1494 xscale_jtag_set_instr(target->tap,
1495 XSCALE_SELDCSR << xscale->xscale_variant);
1497 /* set Hold reset, Halt mode and Trap Reset */
1498 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1499 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1500 xscale_write_dcsr(target, 1, 0);
1502 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1503 xscale_jtag_set_instr(target->tap, 0x7f);
1504 jtag_execute_queue();
1506 /* assert reset */
1507 jtag_add_reset(0, 1);
1509 /* sleep 1ms, to be sure we fulfill any requirements */
1510 jtag_add_sleep(1000);
1511 jtag_execute_queue();
1513 target->state = TARGET_RESET;
1515 if (target->reset_halt)
1517 int retval;
1518 if ((retval = target_halt(target)) != ERROR_OK)
1519 return retval;
1522 return ERROR_OK;
1525 static int xscale_deassert_reset(struct target *target)
1527 struct xscale_common *xscale = target_to_xscale(target);
1528 struct breakpoint *breakpoint = target->breakpoints;
1530 LOG_DEBUG("-");
1532 xscale->ibcr_available = 2;
1533 xscale->ibcr0_used = 0;
1534 xscale->ibcr1_used = 0;
1536 xscale->dbr_available = 2;
1537 xscale->dbr0_used = 0;
1538 xscale->dbr1_used = 0;
1540 /* mark all hardware breakpoints as unset */
1541 while (breakpoint)
1543 if (breakpoint->type == BKPT_HARD)
1545 breakpoint->set = 0;
1547 breakpoint = breakpoint->next;
1550 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1552 /* FIXME mark hardware watchpoints got unset too. Also,
1553 * at least some of the XScale registers are invalid...
1557 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1558 * contents got invalidated. Safer to force that, so writing new
1559 * contents can't ever fail..
1562 uint32_t address;
1563 unsigned buf_cnt;
1564 const uint8_t *buffer = xscale_debug_handler;
1565 int retval;
1567 /* release SRST */
1568 jtag_add_reset(0, 0);
1570 /* wait 300ms; 150 and 100ms were not enough */
1571 jtag_add_sleep(300*1000);
1573 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1574 jtag_execute_queue();
1576 /* set Hold reset, Halt mode and Trap Reset */
1577 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1578 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1579 xscale_write_dcsr(target, 1, 0);
1581 /* Load the debug handler into the mini-icache. Since
1582 * it's using halt mode (not monitor mode), it runs in
1583 * "Special Debug State" for access to registers, memory,
1584 * coprocessors, trace data, etc.
1586 address = xscale->handler_address;
1587 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1588 binary_size > 0;
1589 binary_size -= buf_cnt, buffer += buf_cnt)
1591 uint32_t cache_line[8];
1592 unsigned i;
1594 buf_cnt = binary_size;
1595 if (buf_cnt > 32)
1596 buf_cnt = 32;
1598 for (i = 0; i < buf_cnt; i += 4)
1600 /* convert LE buffer to host-endian uint32_t */
1601 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1604 for (; i < 32; i += 4)
1606 cache_line[i / 4] = 0xe1a08008;
1609 /* only load addresses other than the reset vectors */
1610 if ((address % 0x400) != 0x0)
1612 retval = xscale_load_ic(target, address,
1613 cache_line);
1614 if (retval != ERROR_OK)
1615 return retval;
1618 address += buf_cnt;
1621 retval = xscale_load_ic(target, 0x0,
1622 xscale->low_vectors);
1623 if (retval != ERROR_OK)
1624 return retval;
1625 retval = xscale_load_ic(target, 0xffff0000,
1626 xscale->high_vectors);
1627 if (retval != ERROR_OK)
1628 return retval;
1630 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1632 jtag_add_sleep(100000);
1634 /* set Hold reset, Halt mode and Trap Reset */
1635 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1636 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1637 xscale_write_dcsr(target, 1, 0);
1639 /* clear Hold reset to let the target run (should enter debug handler) */
1640 xscale_write_dcsr(target, 0, 1);
1641 target->state = TARGET_RUNNING;
1643 if (!target->reset_halt)
1645 jtag_add_sleep(10000);
1647 /* we should have entered debug now */
1648 xscale_debug_entry(target);
1649 target->state = TARGET_HALTED;
1651 /* resume the target */
1652 xscale_resume(target, 1, 0x0, 1, 0);
1656 return ERROR_OK;
1659 static int xscale_read_core_reg(struct target *target, struct reg *r,
1660 int num, enum armv4_5_mode mode)
1662 /** \todo add debug handler support for core register reads */
1663 LOG_ERROR("not implemented");
1664 return ERROR_OK;
1667 static int xscale_write_core_reg(struct target *target, struct reg *r,
1668 int num, enum armv4_5_mode mode, uint32_t value)
1670 /** \todo add debug handler support for core register writes */
1671 LOG_ERROR("not implemented");
1672 return ERROR_OK;
1675 static int xscale_full_context(struct target *target)
1677 struct arm *armv4_5 = target_to_armv4_5(target);
1679 uint32_t *buffer;
1681 int i, j;
1683 LOG_DEBUG("-");
1685 if (target->state != TARGET_HALTED)
1687 LOG_WARNING("target not halted");
1688 return ERROR_TARGET_NOT_HALTED;
1691 buffer = malloc(4 * 8);
1693 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1694 * we can't enter User mode on an XScale (unpredictable),
1695 * but User shares registers with SYS
1697 for (i = 1; i < 7; i++)
1699 int valid = 1;
1701 /* check if there are invalid registers in the current mode
1703 for (j = 0; j <= 16; j++)
1705 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1706 valid = 0;
1709 if (!valid)
1711 uint32_t tmp_cpsr;
1713 /* request banked registers */
1714 xscale_send_u32(target, 0x0);
1716 tmp_cpsr = 0x0;
1717 tmp_cpsr |= armv4_5_number_to_mode(i);
1718 tmp_cpsr |= 0xc0; /* I/F bits */
1720 /* send CPSR for desired mode */
1721 xscale_send_u32(target, tmp_cpsr);
1723 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1724 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1726 xscale_receive(target, buffer, 8);
1727 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1728 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1729 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1731 else
1733 xscale_receive(target, buffer, 7);
1736 /* move data from buffer to register cache */
1737 for (j = 8; j <= 14; j++)
1739 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1740 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1741 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1746 free(buffer);
1748 return ERROR_OK;
1751 static int xscale_restore_context(struct target *target)
1753 struct arm *armv4_5 = target_to_armv4_5(target);
1755 int i, j;
1757 if (target->state != TARGET_HALTED)
1759 LOG_WARNING("target not halted");
1760 return ERROR_TARGET_NOT_HALTED;
1763 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1764 * we can't enter User mode on an XScale (unpredictable),
1765 * but User shares registers with SYS
1767 for (i = 1; i < 7; i++)
1769 int dirty = 0;
1771 /* check if there are invalid registers in the current mode
1773 for (j = 8; j <= 14; j++)
1775 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1776 dirty = 1;
1779 /* if not USR/SYS, check if the SPSR needs to be written */
1780 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1782 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1783 dirty = 1;
1786 if (dirty)
1788 uint32_t tmp_cpsr;
1790 /* send banked registers */
1791 xscale_send_u32(target, 0x1);
1793 tmp_cpsr = 0x0;
1794 tmp_cpsr |= armv4_5_number_to_mode(i);
1795 tmp_cpsr |= 0xc0; /* I/F bits */
1797 /* send CPSR for desired mode */
1798 xscale_send_u32(target, tmp_cpsr);
1800 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1801 for (j = 8; j <= 14; j++)
1803 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1804 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1807 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1809 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1810 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1815 return ERROR_OK;
1818 static int xscale_read_memory(struct target *target, uint32_t address,
1819 uint32_t size, uint32_t count, uint8_t *buffer)
1821 struct xscale_common *xscale = target_to_xscale(target);
1822 uint32_t *buf32;
1823 uint32_t i;
1824 int retval;
1826 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1828 if (target->state != TARGET_HALTED)
1830 LOG_WARNING("target not halted");
1831 return ERROR_TARGET_NOT_HALTED;
1834 /* sanitize arguments */
1835 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1836 return ERROR_INVALID_ARGUMENTS;
1838 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1839 return ERROR_TARGET_UNALIGNED_ACCESS;
1841 /* send memory read request (command 0x1n, n: access size) */
1842 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1843 return retval;
1845 /* send base address for read request */
1846 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1847 return retval;
1849 /* send number of requested data words */
1850 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1851 return retval;
1853 /* receive data from target (count times 32-bit words in host endianness) */
1854 buf32 = malloc(4 * count);
1855 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1856 return retval;
1858 /* extract data from host-endian buffer into byte stream */
1859 for (i = 0; i < count; i++)
1861 switch (size)
1863 case 4:
1864 target_buffer_set_u32(target, buffer, buf32[i]);
1865 buffer += 4;
1866 break;
1867 case 2:
1868 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1869 buffer += 2;
1870 break;
1871 case 1:
1872 *buffer++ = buf32[i] & 0xff;
1873 break;
1874 default:
1875 LOG_ERROR("invalid read size");
1876 return ERROR_INVALID_ARGUMENTS;
1880 free(buf32);
1882 /* examine DCSR, to see if Sticky Abort (SA) got set */
1883 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1884 return retval;
1885 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1887 /* clear SA bit */
1888 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1889 return retval;
1891 return ERROR_TARGET_DATA_ABORT;
1894 return ERROR_OK;
1897 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1898 uint32_t size, uint32_t count, uint8_t *buffer)
1900 /** \todo: provide a non-stub implementtion of this routine. */
1901 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1902 target_name(target), __func__);
1903 return ERROR_FAIL;
1906 static int xscale_write_memory(struct target *target, uint32_t address,
1907 uint32_t size, uint32_t count, uint8_t *buffer)
1909 struct xscale_common *xscale = target_to_xscale(target);
1910 int retval;
1912 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1914 if (target->state != TARGET_HALTED)
1916 LOG_WARNING("target not halted");
1917 return ERROR_TARGET_NOT_HALTED;
1920 /* sanitize arguments */
1921 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1922 return ERROR_INVALID_ARGUMENTS;
1924 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1925 return ERROR_TARGET_UNALIGNED_ACCESS;
1927 /* send memory write request (command 0x2n, n: access size) */
1928 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1929 return retval;
1931 /* send base address for read request */
1932 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1933 return retval;
1935 /* send number of requested data words to be written*/
1936 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1937 return retval;
1939 /* extract data from host-endian buffer into byte stream */
1940 #if 0
1941 for (i = 0; i < count; i++)
1943 switch (size)
1945 case 4:
1946 value = target_buffer_get_u32(target, buffer);
1947 xscale_send_u32(target, value);
1948 buffer += 4;
1949 break;
1950 case 2:
1951 value = target_buffer_get_u16(target, buffer);
1952 xscale_send_u32(target, value);
1953 buffer += 2;
1954 break;
1955 case 1:
1956 value = *buffer;
1957 xscale_send_u32(target, value);
1958 buffer += 1;
1959 break;
1960 default:
1961 LOG_ERROR("should never get here");
1962 exit(-1);
1965 #endif
1966 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1967 return retval;
1969 /* examine DCSR, to see if Sticky Abort (SA) got set */
1970 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1971 return retval;
1972 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1974 /* clear SA bit */
1975 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1976 return retval;
1978 return ERROR_TARGET_DATA_ABORT;
1981 return ERROR_OK;
1984 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1985 uint32_t size, uint32_t count, uint8_t *buffer)
1987 /** \todo: provide a non-stub implementtion of this routine. */
1988 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1989 target_name(target), __func__);
1990 return ERROR_FAIL;
1993 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
1994 uint32_t count, uint8_t *buffer)
1996 return xscale_write_memory(target, address, 4, count, buffer);
1999 static uint32_t xscale_get_ttb(struct target *target)
2001 struct xscale_common *xscale = target_to_xscale(target);
2002 uint32_t ttb;
2004 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2005 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2007 return ttb;
2010 static void xscale_disable_mmu_caches(struct target *target, int mmu,
2011 int d_u_cache, int i_cache)
2013 struct xscale_common *xscale = target_to_xscale(target);
2014 uint32_t cp15_control;
2016 /* read cp15 control register */
2017 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2018 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2020 if (mmu)
2021 cp15_control &= ~0x1U;
2023 if (d_u_cache)
2025 /* clean DCache */
2026 xscale_send_u32(target, 0x50);
2027 xscale_send_u32(target, xscale->cache_clean_address);
2029 /* invalidate DCache */
2030 xscale_send_u32(target, 0x51);
2032 cp15_control &= ~0x4U;
2035 if (i_cache)
2037 /* invalidate ICache */
2038 xscale_send_u32(target, 0x52);
2039 cp15_control &= ~0x1000U;
2042 /* write new cp15 control register */
2043 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2045 /* execute cpwait to ensure outstanding operations complete */
2046 xscale_send_u32(target, 0x53);
2049 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2050 int d_u_cache, int i_cache)
2052 struct xscale_common *xscale = target_to_xscale(target);
2053 uint32_t cp15_control;
2055 /* read cp15 control register */
2056 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2057 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2059 if (mmu)
2060 cp15_control |= 0x1U;
2062 if (d_u_cache)
2063 cp15_control |= 0x4U;
2065 if (i_cache)
2066 cp15_control |= 0x1000U;
2068 /* write new cp15 control register */
2069 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2071 /* execute cpwait to ensure outstanding operations complete */
2072 xscale_send_u32(target, 0x53);
2075 static int xscale_set_breakpoint(struct target *target,
2076 struct breakpoint *breakpoint)
2078 int retval;
2079 struct xscale_common *xscale = target_to_xscale(target);
2081 if (target->state != TARGET_HALTED)
2083 LOG_WARNING("target not halted");
2084 return ERROR_TARGET_NOT_HALTED;
2087 if (breakpoint->set)
2089 LOG_WARNING("breakpoint already set");
2090 return ERROR_OK;
2093 if (breakpoint->type == BKPT_HARD)
2095 uint32_t value = breakpoint->address | 1;
2096 if (!xscale->ibcr0_used)
2098 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2099 xscale->ibcr0_used = 1;
2100 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2102 else if (!xscale->ibcr1_used)
2104 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2105 xscale->ibcr1_used = 1;
2106 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2108 else
2110 LOG_ERROR("BUG: no hardware comparator available");
2111 return ERROR_OK;
2114 else if (breakpoint->type == BKPT_SOFT)
2116 if (breakpoint->length == 4)
2118 /* keep the original instruction in target endianness */
2119 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2121 return retval;
2123 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2124 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2126 return retval;
2129 else
2131 /* keep the original instruction in target endianness */
2132 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2134 return retval;
2136 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2137 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2139 return retval;
2142 breakpoint->set = 1;
2145 return ERROR_OK;
2148 static int xscale_add_breakpoint(struct target *target,
2149 struct breakpoint *breakpoint)
2151 struct xscale_common *xscale = target_to_xscale(target);
2153 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2155 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2156 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2159 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2161 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2162 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2165 if (breakpoint->type == BKPT_HARD)
2167 xscale->ibcr_available--;
2170 return ERROR_OK;
2173 static int xscale_unset_breakpoint(struct target *target,
2174 struct breakpoint *breakpoint)
2176 int retval;
2177 struct xscale_common *xscale = target_to_xscale(target);
2179 if (target->state != TARGET_HALTED)
2181 LOG_WARNING("target not halted");
2182 return ERROR_TARGET_NOT_HALTED;
2185 if (!breakpoint->set)
2187 LOG_WARNING("breakpoint not set");
2188 return ERROR_OK;
2191 if (breakpoint->type == BKPT_HARD)
2193 if (breakpoint->set == 1)
2195 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2196 xscale->ibcr0_used = 0;
2198 else if (breakpoint->set == 2)
2200 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2201 xscale->ibcr1_used = 0;
2203 breakpoint->set = 0;
2205 else
2207 /* restore original instruction (kept in target endianness) */
2208 if (breakpoint->length == 4)
2210 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2212 return retval;
2215 else
2217 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2219 return retval;
2222 breakpoint->set = 0;
2225 return ERROR_OK;
2228 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2230 struct xscale_common *xscale = target_to_xscale(target);
2232 if (target->state != TARGET_HALTED)
2234 LOG_WARNING("target not halted");
2235 return ERROR_TARGET_NOT_HALTED;
2238 if (breakpoint->set)
2240 xscale_unset_breakpoint(target, breakpoint);
2243 if (breakpoint->type == BKPT_HARD)
2244 xscale->ibcr_available++;
2246 return ERROR_OK;
2249 static int xscale_set_watchpoint(struct target *target,
2250 struct watchpoint *watchpoint)
2252 struct xscale_common *xscale = target_to_xscale(target);
2253 uint8_t enable = 0;
2254 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2255 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2257 if (target->state != TARGET_HALTED)
2259 LOG_WARNING("target not halted");
2260 return ERROR_TARGET_NOT_HALTED;
2263 xscale_get_reg(dbcon);
2265 switch (watchpoint->rw)
2267 case WPT_READ:
2268 enable = 0x3;
2269 break;
2270 case WPT_ACCESS:
2271 enable = 0x2;
2272 break;
2273 case WPT_WRITE:
2274 enable = 0x1;
2275 break;
2276 default:
2277 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2280 if (!xscale->dbr0_used)
2282 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2283 dbcon_value |= enable;
2284 xscale_set_reg_u32(dbcon, dbcon_value);
2285 watchpoint->set = 1;
2286 xscale->dbr0_used = 1;
2288 else if (!xscale->dbr1_used)
2290 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2291 dbcon_value |= enable << 2;
2292 xscale_set_reg_u32(dbcon, dbcon_value);
2293 watchpoint->set = 2;
2294 xscale->dbr1_used = 1;
2296 else
2298 LOG_ERROR("BUG: no hardware comparator available");
2299 return ERROR_OK;
2302 return ERROR_OK;
2305 static int xscale_add_watchpoint(struct target *target,
2306 struct watchpoint *watchpoint)
2308 struct xscale_common *xscale = target_to_xscale(target);
2310 if (xscale->dbr_available < 1)
2312 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2315 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2317 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2320 xscale->dbr_available--;
2322 return ERROR_OK;
2325 static int xscale_unset_watchpoint(struct target *target,
2326 struct watchpoint *watchpoint)
2328 struct xscale_common *xscale = target_to_xscale(target);
2329 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2330 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2332 if (target->state != TARGET_HALTED)
2334 LOG_WARNING("target not halted");
2335 return ERROR_TARGET_NOT_HALTED;
2338 if (!watchpoint->set)
2340 LOG_WARNING("breakpoint not set");
2341 return ERROR_OK;
2344 if (watchpoint->set == 1)
2346 dbcon_value &= ~0x3;
2347 xscale_set_reg_u32(dbcon, dbcon_value);
2348 xscale->dbr0_used = 0;
2350 else if (watchpoint->set == 2)
2352 dbcon_value &= ~0xc;
2353 xscale_set_reg_u32(dbcon, dbcon_value);
2354 xscale->dbr1_used = 0;
2356 watchpoint->set = 0;
2358 return ERROR_OK;
2361 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2363 struct xscale_common *xscale = target_to_xscale(target);
2365 if (target->state != TARGET_HALTED)
2367 LOG_WARNING("target not halted");
2368 return ERROR_TARGET_NOT_HALTED;
2371 if (watchpoint->set)
2373 xscale_unset_watchpoint(target, watchpoint);
2376 xscale->dbr_available++;
2378 return ERROR_OK;
2381 static int xscale_get_reg(struct reg *reg)
2383 struct xscale_reg *arch_info = reg->arch_info;
2384 struct target *target = arch_info->target;
2385 struct xscale_common *xscale = target_to_xscale(target);
2387 /* DCSR, TX and RX are accessible via JTAG */
2388 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2390 return xscale_read_dcsr(arch_info->target);
2392 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2394 /* 1 = consume register content */
2395 return xscale_read_tx(arch_info->target, 1);
2397 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2399 /* can't read from RX register (host -> debug handler) */
2400 return ERROR_OK;
2402 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2404 /* can't (explicitly) read from TXRXCTRL register */
2405 return ERROR_OK;
2407 else /* Other DBG registers have to be transfered by the debug handler */
2409 /* send CP read request (command 0x40) */
2410 xscale_send_u32(target, 0x40);
2412 /* send CP register number */
2413 xscale_send_u32(target, arch_info->dbg_handler_number);
2415 /* read register value */
2416 xscale_read_tx(target, 1);
2417 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2419 reg->dirty = 0;
2420 reg->valid = 1;
2423 return ERROR_OK;
2426 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2428 struct xscale_reg *arch_info = reg->arch_info;
2429 struct target *target = arch_info->target;
2430 struct xscale_common *xscale = target_to_xscale(target);
2431 uint32_t value = buf_get_u32(buf, 0, 32);
2433 /* DCSR, TX and RX are accessible via JTAG */
2434 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2436 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2437 return xscale_write_dcsr(arch_info->target, -1, -1);
2439 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2441 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2442 return xscale_write_rx(arch_info->target);
2444 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2446 /* can't write to TX register (debug-handler -> host) */
2447 return ERROR_OK;
2449 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2451 /* can't (explicitly) write to TXRXCTRL register */
2452 return ERROR_OK;
2454 else /* Other DBG registers have to be transfered by the debug handler */
2456 /* send CP write request (command 0x41) */
2457 xscale_send_u32(target, 0x41);
2459 /* send CP register number */
2460 xscale_send_u32(target, arch_info->dbg_handler_number);
2462 /* send CP register value */
2463 xscale_send_u32(target, value);
2464 buf_set_u32(reg->value, 0, 32, value);
2467 return ERROR_OK;
2470 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2472 struct xscale_common *xscale = target_to_xscale(target);
2473 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2474 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2476 /* send CP write request (command 0x41) */
2477 xscale_send_u32(target, 0x41);
2479 /* send CP register number */
2480 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2482 /* send CP register value */
2483 xscale_send_u32(target, value);
2484 buf_set_u32(dcsr->value, 0, 32, value);
2486 return ERROR_OK;
2489 static int xscale_read_trace(struct target *target)
2491 struct xscale_common *xscale = target_to_xscale(target);
2492 struct arm *armv4_5 = &xscale->armv4_5_common;
2493 struct xscale_trace_data **trace_data_p;
2495 /* 258 words from debug handler
2496 * 256 trace buffer entries
2497 * 2 checkpoint addresses
2499 uint32_t trace_buffer[258];
2500 int is_address[256];
2501 int i, j;
2503 if (target->state != TARGET_HALTED)
2505 LOG_WARNING("target must be stopped to read trace data");
2506 return ERROR_TARGET_NOT_HALTED;
2509 /* send read trace buffer command (command 0x61) */
2510 xscale_send_u32(target, 0x61);
2512 /* receive trace buffer content */
2513 xscale_receive(target, trace_buffer, 258);
2515 /* parse buffer backwards to identify address entries */
2516 for (i = 255; i >= 0; i--)
2518 is_address[i] = 0;
2519 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2520 ((trace_buffer[i] & 0xf0) == 0xd0))
2522 if (i >= 3)
2523 is_address[--i] = 1;
2524 if (i >= 2)
2525 is_address[--i] = 1;
2526 if (i >= 1)
2527 is_address[--i] = 1;
2528 if (i >= 0)
2529 is_address[--i] = 1;
2534 /* search first non-zero entry */
2535 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2538 if (j == 256)
2540 LOG_DEBUG("no trace data collected");
2541 return ERROR_XSCALE_NO_TRACE_DATA;
2544 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2547 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2548 (*trace_data_p)->next = NULL;
2549 (*trace_data_p)->chkpt0 = trace_buffer[256];
2550 (*trace_data_p)->chkpt1 = trace_buffer[257];
2551 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2552 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2553 (*trace_data_p)->depth = 256 - j;
2555 for (i = j; i < 256; i++)
2557 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2558 if (is_address[i])
2559 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2560 else
2561 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2564 return ERROR_OK;
2567 static int xscale_read_instruction(struct target *target,
2568 struct arm_instruction *instruction)
2570 struct xscale_common *xscale = target_to_xscale(target);
2571 int i;
2572 int section = -1;
2573 size_t size_read;
2574 uint32_t opcode;
2575 int retval;
2577 if (!xscale->trace.image)
2578 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2580 /* search for the section the current instruction belongs to */
2581 for (i = 0; i < xscale->trace.image->num_sections; i++)
2583 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2584 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2586 section = i;
2587 break;
2591 if (section == -1)
2593 /* current instruction couldn't be found in the image */
2594 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2597 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2599 uint8_t buf[4];
2600 if ((retval = image_read_section(xscale->trace.image, section,
2601 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2602 4, buf, &size_read)) != ERROR_OK)
2604 LOG_ERROR("error while reading instruction: %i", retval);
2605 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2607 opcode = target_buffer_get_u32(target, buf);
2608 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2610 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2612 uint8_t buf[2];
2613 if ((retval = image_read_section(xscale->trace.image, section,
2614 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2615 2, buf, &size_read)) != ERROR_OK)
2617 LOG_ERROR("error while reading instruction: %i", retval);
2618 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2620 opcode = target_buffer_get_u16(target, buf);
2621 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2623 else
2625 LOG_ERROR("BUG: unknown core state encountered");
2626 exit(-1);
2629 return ERROR_OK;
2632 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2633 int i, uint32_t *target)
2635 /* if there are less than four entries prior to the indirect branch message
2636 * we can't extract the address */
2637 if (i < 4)
2639 return -1;
2642 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2643 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2645 return 0;
2648 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2650 struct xscale_common *xscale = target_to_xscale(target);
2651 int next_pc_ok = 0;
2652 uint32_t next_pc = 0x0;
2653 struct xscale_trace_data *trace_data = xscale->trace.data;
2654 int retval;
2656 while (trace_data)
2658 int i, chkpt;
2659 int rollover;
2660 int branch;
2661 int exception;
2662 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2664 chkpt = 0;
2665 rollover = 0;
2667 for (i = 0; i < trace_data->depth; i++)
2669 next_pc_ok = 0;
2670 branch = 0;
2671 exception = 0;
2673 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2674 continue;
2676 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2678 case 0: /* Exceptions */
2679 case 1:
2680 case 2:
2681 case 3:
2682 case 4:
2683 case 5:
2684 case 6:
2685 case 7:
2686 exception = (trace_data->entries[i].data & 0x70) >> 4;
2687 next_pc_ok = 1;
2688 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2689 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2690 break;
2691 case 8: /* Direct Branch */
2692 branch = 1;
2693 break;
2694 case 9: /* Indirect Branch */
2695 branch = 1;
2696 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2698 next_pc_ok = 1;
2700 break;
2701 case 13: /* Checkpointed Indirect Branch */
2702 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2704 next_pc_ok = 1;
2705 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2706 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2707 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2709 /* explicit fall-through */
2710 case 12: /* Checkpointed Direct Branch */
2711 branch = 1;
2712 if (chkpt == 0)
2714 next_pc_ok = 1;
2715 next_pc = trace_data->chkpt0;
2716 chkpt++;
2718 else if (chkpt == 1)
2720 next_pc_ok = 1;
2721 next_pc = trace_data->chkpt0;
2722 chkpt++;
2724 else
2726 LOG_WARNING("more than two checkpointed branches encountered");
2728 break;
2729 case 15: /* Roll-over */
2730 rollover++;
2731 continue;
2732 default: /* Reserved */
2733 command_print(cmd_ctx, "--- reserved trace message ---");
2734 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2735 return ERROR_OK;
2738 if (xscale->trace.pc_ok)
2740 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2741 struct arm_instruction instruction;
2743 if ((exception == 6) || (exception == 7))
2745 /* IRQ or FIQ exception, no instruction executed */
2746 executed -= 1;
2749 while (executed-- >= 0)
2751 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2753 /* can't continue tracing with no image available */
2754 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2756 return retval;
2758 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2760 /* TODO: handle incomplete images */
2764 /* a precise abort on a load to the PC is included in the incremental
2765 * word count, other instructions causing data aborts are not included
2767 if ((executed == 0) && (exception == 4)
2768 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2770 if ((instruction.type == ARM_LDM)
2771 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2773 executed--;
2775 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2776 && (instruction.info.load_store.Rd != 15))
2778 executed--;
2782 /* only the last instruction executed
2783 * (the one that caused the control flow change)
2784 * could be a taken branch
2786 if (((executed == -1) && (branch == 1)) &&
2787 (((instruction.type == ARM_B) ||
2788 (instruction.type == ARM_BL) ||
2789 (instruction.type == ARM_BLX)) &&
2790 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2792 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2794 else
2796 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2798 command_print(cmd_ctx, "%s", instruction.text);
2801 rollover = 0;
2804 if (next_pc_ok)
2806 xscale->trace.current_pc = next_pc;
2807 xscale->trace.pc_ok = 1;
2811 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2813 struct arm_instruction instruction;
2814 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2816 /* can't continue tracing with no image available */
2817 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2819 return retval;
2821 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2823 /* TODO: handle incomplete images */
2826 command_print(cmd_ctx, "%s", instruction.text);
2829 trace_data = trace_data->next;
2832 return ERROR_OK;
2835 static const struct reg_arch_type xscale_reg_type = {
2836 .get = xscale_get_reg,
2837 .set = xscale_set_reg,
2840 static void xscale_build_reg_cache(struct target *target)
2842 struct xscale_common *xscale = target_to_xscale(target);
2843 struct arm *armv4_5 = &xscale->armv4_5_common;
2844 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2845 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2846 int i;
2847 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2849 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2851 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2852 cache_p = &(*cache_p)->next;
2854 /* fill in values for the xscale reg cache */
2855 (*cache_p)->name = "XScale registers";
2856 (*cache_p)->next = NULL;
2857 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2858 (*cache_p)->num_regs = num_regs;
2860 for (i = 0; i < num_regs; i++)
2862 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2863 (*cache_p)->reg_list[i].value = calloc(4, 1);
2864 (*cache_p)->reg_list[i].dirty = 0;
2865 (*cache_p)->reg_list[i].valid = 0;
2866 (*cache_p)->reg_list[i].size = 32;
2867 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2868 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2869 arch_info[i] = xscale_reg_arch_info[i];
2870 arch_info[i].target = target;
2873 xscale->reg_cache = (*cache_p);
2876 static int xscale_init_target(struct command_context *cmd_ctx,
2877 struct target *target)
2879 xscale_build_reg_cache(target);
2880 return ERROR_OK;
2883 static int xscale_init_arch_info(struct target *target,
2884 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2886 struct arm *armv4_5;
2887 uint32_t high_reset_branch, low_reset_branch;
2888 int i;
2890 armv4_5 = &xscale->armv4_5_common;
2892 /* store architecture specfic data */
2893 xscale->common_magic = XSCALE_COMMON_MAGIC;
2895 /* we don't really *need* a variant param ... */
2896 if (variant) {
2897 int ir_length = 0;
2899 if (strcmp(variant, "pxa250") == 0
2900 || strcmp(variant, "pxa255") == 0
2901 || strcmp(variant, "pxa26x") == 0)
2902 ir_length = 5;
2903 else if (strcmp(variant, "pxa27x") == 0
2904 || strcmp(variant, "ixp42x") == 0
2905 || strcmp(variant, "ixp45x") == 0
2906 || strcmp(variant, "ixp46x") == 0)
2907 ir_length = 7;
2908 else if (strcmp(variant, "pxa3xx") == 0)
2909 ir_length = 11;
2910 else
2911 LOG_WARNING("%s: unrecognized variant %s",
2912 tap->dotted_name, variant);
2914 if (ir_length && ir_length != tap->ir_length) {
2915 LOG_WARNING("%s: IR length for %s is %d; fixing",
2916 tap->dotted_name, variant, ir_length);
2917 tap->ir_length = ir_length;
2921 /* PXA3xx shifts the JTAG instructions */
2922 if (tap->ir_length == 11)
2923 xscale->xscale_variant = XSCALE_PXA3XX;
2924 else
2925 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2927 /* the debug handler isn't installed (and thus not running) at this time */
2928 xscale->handler_address = 0xfe000800;
2930 /* clear the vectors we keep locally for reference */
2931 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2932 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2934 /* no user-specified vectors have been configured yet */
2935 xscale->static_low_vectors_set = 0x0;
2936 xscale->static_high_vectors_set = 0x0;
2938 /* calculate branches to debug handler */
2939 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2940 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2942 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2943 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2945 for (i = 1; i <= 7; i++)
2947 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2948 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2951 /* 64kB aligned region used for DCache cleaning */
2952 xscale->cache_clean_address = 0xfffe0000;
2954 xscale->hold_rst = 0;
2955 xscale->external_debug_break = 0;
2957 xscale->ibcr_available = 2;
2958 xscale->ibcr0_used = 0;
2959 xscale->ibcr1_used = 0;
2961 xscale->dbr_available = 2;
2962 xscale->dbr0_used = 0;
2963 xscale->dbr1_used = 0;
2965 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2966 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2968 xscale->vector_catch = 0x1;
2970 xscale->trace.capture_status = TRACE_IDLE;
2971 xscale->trace.data = NULL;
2972 xscale->trace.image = NULL;
2973 xscale->trace.buffer_enabled = 0;
2974 xscale->trace.buffer_fill = 0;
2976 /* prepare ARMv4/5 specific information */
2977 armv4_5->arch_info = xscale;
2978 armv4_5->read_core_reg = xscale_read_core_reg;
2979 armv4_5->write_core_reg = xscale_write_core_reg;
2980 armv4_5->full_context = xscale_full_context;
2982 armv4_5_init_arch_info(target, armv4_5);
2984 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2985 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2986 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
2987 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
2988 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
2989 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
2990 xscale->armv4_5_mmu.has_tiny_pages = 1;
2991 xscale->armv4_5_mmu.mmu_enabled = 0;
2993 return ERROR_OK;
2996 static int xscale_target_create(struct target *target, Jim_Interp *interp)
2998 struct xscale_common *xscale;
3000 if (sizeof xscale_debug_handler - 1 > 0x800) {
3001 LOG_ERROR("debug_handler.bin: larger than 2kb");
3002 return ERROR_FAIL;
3005 xscale = calloc(1, sizeof(*xscale));
3006 if (!xscale)
3007 return ERROR_FAIL;
3009 return xscale_init_arch_info(target, xscale, target->tap,
3010 target->variant);
3013 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3015 struct target *target = NULL;
3016 struct xscale_common *xscale;
3017 int retval;
3018 uint32_t handler_address;
3020 if (CMD_ARGC < 2)
3022 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3023 return ERROR_OK;
3026 if ((target = get_target(CMD_ARGV[0])) == NULL)
3028 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3029 return ERROR_FAIL;
3032 xscale = target_to_xscale(target);
3033 retval = xscale_verify_pointer(CMD_CTX, xscale);
3034 if (retval != ERROR_OK)
3035 return retval;
3037 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3039 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3040 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3042 xscale->handler_address = handler_address;
3044 else
3046 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3047 return ERROR_FAIL;
3050 return ERROR_OK;
3053 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3055 struct target *target = NULL;
3056 struct xscale_common *xscale;
3057 int retval;
3058 uint32_t cache_clean_address;
3060 if (CMD_ARGC < 2)
3062 return ERROR_COMMAND_SYNTAX_ERROR;
3065 target = get_target(CMD_ARGV[0]);
3066 if (target == NULL)
3068 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3069 return ERROR_FAIL;
3071 xscale = target_to_xscale(target);
3072 retval = xscale_verify_pointer(CMD_CTX, xscale);
3073 if (retval != ERROR_OK)
3074 return retval;
3076 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3078 if (cache_clean_address & 0xffff)
3080 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3082 else
3084 xscale->cache_clean_address = cache_clean_address;
3087 return ERROR_OK;
3090 COMMAND_HANDLER(xscale_handle_cache_info_command)
3092 struct target *target = get_current_target(CMD_CTX);
3093 struct xscale_common *xscale = target_to_xscale(target);
3094 int retval;
3096 retval = xscale_verify_pointer(CMD_CTX, xscale);
3097 if (retval != ERROR_OK)
3098 return retval;
3100 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3103 static int xscale_virt2phys(struct target *target,
3104 uint32_t virtual, uint32_t *physical)
3106 struct xscale_common *xscale = target_to_xscale(target);
3107 int type;
3108 uint32_t cb;
3109 int domain;
3110 uint32_t ap;
3112 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3113 LOG_ERROR(xscale_not);
3114 return ERROR_TARGET_INVALID;
3117 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3118 if (type == -1)
3120 return ret;
3122 *physical = ret;
3123 return ERROR_OK;
3126 static int xscale_mmu(struct target *target, int *enabled)
3128 struct xscale_common *xscale = target_to_xscale(target);
3130 if (target->state != TARGET_HALTED)
3132 LOG_ERROR("Target not halted");
3133 return ERROR_TARGET_INVALID;
3135 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3136 return ERROR_OK;
3139 COMMAND_HANDLER(xscale_handle_mmu_command)
3141 struct target *target = get_current_target(CMD_CTX);
3142 struct xscale_common *xscale = target_to_xscale(target);
3143 int retval;
3145 retval = xscale_verify_pointer(CMD_CTX, xscale);
3146 if (retval != ERROR_OK)
3147 return retval;
3149 if (target->state != TARGET_HALTED)
3151 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3152 return ERROR_OK;
3155 if (CMD_ARGC >= 1)
3157 bool enable;
3158 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3159 if (enable)
3160 xscale_enable_mmu_caches(target, 1, 0, 0);
3161 else
3162 xscale_disable_mmu_caches(target, 1, 0, 0);
3163 xscale->armv4_5_mmu.mmu_enabled = enable;
3166 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3168 return ERROR_OK;
3171 COMMAND_HANDLER(xscale_handle_idcache_command)
3173 struct target *target = get_current_target(CMD_CTX);
3174 struct xscale_common *xscale = target_to_xscale(target);
3176 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3177 if (retval != ERROR_OK)
3178 return retval;
3180 if (target->state != TARGET_HALTED)
3182 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3183 return ERROR_OK;
3186 bool icache;
3187 COMMAND_PARSE_BOOL(CMD_NAME, icache, "icache", "dcache");
3189 if (CMD_ARGC >= 1)
3191 bool enable;
3192 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3193 if (enable)
3194 xscale_enable_mmu_caches(target, 1, 0, 0);
3195 else
3196 xscale_disable_mmu_caches(target, 1, 0, 0);
3197 if (icache)
3198 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3199 else
3200 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3203 bool enabled = icache ?
3204 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3205 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3206 const char *msg = enabled ? "enabled" : "disabled";
3207 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3209 return ERROR_OK;
3212 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3214 struct target *target = get_current_target(CMD_CTX);
3215 struct xscale_common *xscale = target_to_xscale(target);
3216 int retval;
3218 retval = xscale_verify_pointer(CMD_CTX, xscale);
3219 if (retval != ERROR_OK)
3220 return retval;
3222 if (CMD_ARGC < 1)
3224 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3226 else
3228 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3229 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3230 xscale_write_dcsr(target, -1, -1);
3233 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3235 return ERROR_OK;
3239 COMMAND_HANDLER(xscale_handle_vector_table_command)
3241 struct target *target = get_current_target(CMD_CTX);
3242 struct xscale_common *xscale = target_to_xscale(target);
3243 int err = 0;
3244 int retval;
3246 retval = xscale_verify_pointer(CMD_CTX, xscale);
3247 if (retval != ERROR_OK)
3248 return retval;
3250 if (CMD_ARGC == 0) /* print current settings */
3252 int idx;
3254 command_print(CMD_CTX, "active user-set static vectors:");
3255 for (idx = 1; idx < 8; idx++)
3256 if (xscale->static_low_vectors_set & (1 << idx))
3257 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3258 for (idx = 1; idx < 8; idx++)
3259 if (xscale->static_high_vectors_set & (1 << idx))
3260 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3261 return ERROR_OK;
3264 if (CMD_ARGC != 3)
3265 err = 1;
3266 else
3268 int idx;
3269 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3270 uint32_t vec;
3271 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3273 if (idx < 1 || idx >= 8)
3274 err = 1;
3276 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3278 xscale->static_low_vectors_set |= (1<<idx);
3279 xscale->static_low_vectors[idx] = vec;
3281 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3283 xscale->static_high_vectors_set |= (1<<idx);
3284 xscale->static_high_vectors[idx] = vec;
3286 else
3287 err = 1;
3290 if (err)
3291 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3293 return ERROR_OK;
3297 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3299 struct target *target = get_current_target(CMD_CTX);
3300 struct xscale_common *xscale = target_to_xscale(target);
3301 struct arm *armv4_5 = &xscale->armv4_5_common;
3302 uint32_t dcsr_value;
3303 int retval;
3305 retval = xscale_verify_pointer(CMD_CTX, xscale);
3306 if (retval != ERROR_OK)
3307 return retval;
3309 if (target->state != TARGET_HALTED)
3311 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3312 return ERROR_OK;
3315 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3317 struct xscale_trace_data *td, *next_td;
3318 xscale->trace.buffer_enabled = 1;
3320 /* free old trace data */
3321 td = xscale->trace.data;
3322 while (td)
3324 next_td = td->next;
3326 if (td->entries)
3327 free(td->entries);
3328 free(td);
3329 td = next_td;
3331 xscale->trace.data = NULL;
3333 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3335 xscale->trace.buffer_enabled = 0;
3338 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3340 uint32_t fill = 1;
3341 if (CMD_ARGC >= 3)
3342 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3343 xscale->trace.buffer_fill = fill;
3345 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3347 xscale->trace.buffer_fill = -1;
3350 if (xscale->trace.buffer_enabled)
3352 /* if we enable the trace buffer in fill-once
3353 * mode we know the address of the first instruction */
3354 xscale->trace.pc_ok = 1;
3355 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3357 else
3359 /* otherwise the address is unknown, and we have no known good PC */
3360 xscale->trace.pc_ok = 0;
3363 command_print(CMD_CTX, "trace buffer %s (%s)",
3364 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3365 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3367 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3368 if (xscale->trace.buffer_fill >= 0)
3369 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3370 else
3371 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3373 return ERROR_OK;
3376 COMMAND_HANDLER(xscale_handle_trace_image_command)
3378 struct target *target = get_current_target(CMD_CTX);
3379 struct xscale_common *xscale = target_to_xscale(target);
3380 int retval;
3382 if (CMD_ARGC < 1)
3384 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3385 return ERROR_OK;
3388 retval = xscale_verify_pointer(CMD_CTX, xscale);
3389 if (retval != ERROR_OK)
3390 return retval;
3392 if (xscale->trace.image)
3394 image_close(xscale->trace.image);
3395 free(xscale->trace.image);
3396 command_print(CMD_CTX, "previously loaded image found and closed");
3399 xscale->trace.image = malloc(sizeof(struct image));
3400 xscale->trace.image->base_address_set = 0;
3401 xscale->trace.image->start_address_set = 0;
3403 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3404 if (CMD_ARGC >= 2)
3406 xscale->trace.image->base_address_set = 1;
3407 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], xscale->trace.image->base_address);
3409 else
3411 xscale->trace.image->base_address_set = 0;
3414 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3416 free(xscale->trace.image);
3417 xscale->trace.image = NULL;
3418 return ERROR_OK;
3421 return ERROR_OK;
3424 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3426 struct target *target = get_current_target(CMD_CTX);
3427 struct xscale_common *xscale = target_to_xscale(target);
3428 struct xscale_trace_data *trace_data;
3429 struct fileio file;
3430 int retval;
3432 retval = xscale_verify_pointer(CMD_CTX, xscale);
3433 if (retval != ERROR_OK)
3434 return retval;
3436 if (target->state != TARGET_HALTED)
3438 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3439 return ERROR_OK;
3442 if (CMD_ARGC < 1)
3444 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3445 return ERROR_OK;
3448 trace_data = xscale->trace.data;
3450 if (!trace_data)
3452 command_print(CMD_CTX, "no trace data collected");
3453 return ERROR_OK;
3456 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3458 return ERROR_OK;
3461 while (trace_data)
3463 int i;
3465 fileio_write_u32(&file, trace_data->chkpt0);
3466 fileio_write_u32(&file, trace_data->chkpt1);
3467 fileio_write_u32(&file, trace_data->last_instruction);
3468 fileio_write_u32(&file, trace_data->depth);
3470 for (i = 0; i < trace_data->depth; i++)
3471 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3473 trace_data = trace_data->next;
3476 fileio_close(&file);
3478 return ERROR_OK;
3481 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3483 struct target *target = get_current_target(CMD_CTX);
3484 struct xscale_common *xscale = target_to_xscale(target);
3485 int retval;
3487 retval = xscale_verify_pointer(CMD_CTX, xscale);
3488 if (retval != ERROR_OK)
3489 return retval;
3491 xscale_analyze_trace(target, CMD_CTX);
3493 return ERROR_OK;
3496 COMMAND_HANDLER(xscale_handle_cp15)
3498 struct target *target = get_current_target(CMD_CTX);
3499 struct xscale_common *xscale = target_to_xscale(target);
3500 int retval;
3502 retval = xscale_verify_pointer(CMD_CTX, xscale);
3503 if (retval != ERROR_OK)
3504 return retval;
3506 if (target->state != TARGET_HALTED)
3508 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3509 return ERROR_OK;
3511 uint32_t reg_no = 0;
3512 struct reg *reg = NULL;
3513 if (CMD_ARGC > 0)
3515 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3516 /*translate from xscale cp15 register no to openocd register*/
3517 switch (reg_no)
3519 case 0:
3520 reg_no = XSCALE_MAINID;
3521 break;
3522 case 1:
3523 reg_no = XSCALE_CTRL;
3524 break;
3525 case 2:
3526 reg_no = XSCALE_TTB;
3527 break;
3528 case 3:
3529 reg_no = XSCALE_DAC;
3530 break;
3531 case 5:
3532 reg_no = XSCALE_FSR;
3533 break;
3534 case 6:
3535 reg_no = XSCALE_FAR;
3536 break;
3537 case 13:
3538 reg_no = XSCALE_PID;
3539 break;
3540 case 15:
3541 reg_no = XSCALE_CPACCESS;
3542 break;
3543 default:
3544 command_print(CMD_CTX, "invalid register number");
3545 return ERROR_INVALID_ARGUMENTS;
3547 reg = &xscale->reg_cache->reg_list[reg_no];
3550 if (CMD_ARGC == 1)
3552 uint32_t value;
3554 /* read cp15 control register */
3555 xscale_get_reg(reg);
3556 value = buf_get_u32(reg->value, 0, 32);
3557 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3559 else if (CMD_ARGC == 2)
3561 uint32_t value;
3562 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3564 /* send CP write request (command 0x41) */
3565 xscale_send_u32(target, 0x41);
3567 /* send CP register number */
3568 xscale_send_u32(target, reg_no);
3570 /* send CP register value */
3571 xscale_send_u32(target, value);
3573 /* execute cpwait to ensure outstanding operations complete */
3574 xscale_send_u32(target, 0x53);
3576 else
3578 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3581 return ERROR_OK;
3584 static const struct command_registration xscale_exec_command_handlers[] = {
3586 .name = "cache_info",
3587 .handler = &xscale_handle_cache_info_command,
3588 .mode = COMMAND_EXEC, NULL,
3592 .name = "mmu",
3593 .handler = &xscale_handle_mmu_command,
3594 .mode = COMMAND_EXEC,
3595 .usage = "[enable|disable]",
3596 .help = "enable or disable the MMU",
3599 .name = "icache",
3600 .handler = &xscale_handle_idcache_command,
3601 .mode = COMMAND_EXEC,
3602 .usage = "[enable|disable]",
3603 .help = "enable or disable the ICache",
3606 .name = "dcache",
3607 .handler = &xscale_handle_idcache_command,
3608 .mode = COMMAND_EXEC,
3609 .usage = "[enable|disable]",
3610 .help = "enable or disable the DCache",
3614 .name = "vector_catch",
3615 .handler = &xscale_handle_vector_catch_command,
3616 .mode = COMMAND_EXEC,
3617 .help = "mask of vectors that should be caught",
3618 .usage = "[<mask>]",
3621 .name = "vector_table",
3622 .handler = &xscale_handle_vector_table_command,
3623 .mode = COMMAND_EXEC,
3624 .usage = "<high|low> <index> <code>",
3625 .help = "set static code for exception handler entry",
3629 .name = "trace_buffer",
3630 .handler = &xscale_handle_trace_buffer_command,
3631 .mode = COMMAND_EXEC,
3632 .usage = "<enable | disable> [fill [n]|wrap]",
3635 .name = "dump_trace",
3636 .handler = &xscale_handle_dump_trace_command,
3637 .mode = COMMAND_EXEC,
3638 .help = "dump content of trace buffer to <file>",
3639 .usage = "<file>",
3642 .name = "analyze_trace",
3643 .handler = &xscale_handle_analyze_trace_buffer_command,
3644 .mode = COMMAND_EXEC,
3645 .help = "analyze content of trace buffer",
3648 .name = "trace_image",
3649 .handler = &xscale_handle_trace_image_command,
3650 COMMAND_EXEC,
3651 .help = "load image from <file> [base address]",
3652 .usage = "<file> [address] [type]",
3656 .name = "cp15",
3657 .handler = &xscale_handle_cp15,
3658 .mode = COMMAND_EXEC,
3659 .help = "access coproc 15",
3660 .usage = "<register> [value]",
3662 COMMAND_REGISTRATION_DONE
3664 static const struct command_registration xscale_any_command_handlers[] = {
3666 .name = "debug_handler",
3667 .handler = &xscale_handle_debug_handler_command,
3668 .mode = COMMAND_ANY,
3669 .usage = "<target#> <address>",
3672 .name = "cache_clean_address",
3673 .handler = &xscale_handle_cache_clean_address_command,
3674 .mode = COMMAND_ANY,
3677 .chain = xscale_exec_command_handlers,
3679 COMMAND_REGISTRATION_DONE
3681 static const struct command_registration xscale_command_handlers[] = {
3683 .chain = arm_command_handlers,
3686 .name = "xscale",
3687 .mode = COMMAND_ANY,
3688 .help = "xscale command group",
3689 .chain = xscale_any_command_handlers,
3691 COMMAND_REGISTRATION_DONE
3694 struct target_type xscale_target =
3696 .name = "xscale",
3698 .poll = xscale_poll,
3699 .arch_state = xscale_arch_state,
3701 .target_request_data = NULL,
3703 .halt = xscale_halt,
3704 .resume = xscale_resume,
3705 .step = xscale_step,
3707 .assert_reset = xscale_assert_reset,
3708 .deassert_reset = xscale_deassert_reset,
3709 .soft_reset_halt = NULL,
3711 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
3713 .read_memory = xscale_read_memory,
3714 .read_phys_memory = xscale_read_phys_memory,
3715 .write_memory = xscale_write_memory,
3716 .write_phys_memory = xscale_write_phys_memory,
3717 .bulk_write_memory = xscale_bulk_write_memory,
3719 .checksum_memory = arm_checksum_memory,
3720 .blank_check_memory = arm_blank_check_memory,
3722 .run_algorithm = armv4_5_run_algorithm,
3724 .add_breakpoint = xscale_add_breakpoint,
3725 .remove_breakpoint = xscale_remove_breakpoint,
3726 .add_watchpoint = xscale_add_watchpoint,
3727 .remove_watchpoint = xscale_remove_watchpoint,
3729 .commands = xscale_command_handlers,
3730 .target_create = xscale_target_create,
3731 .init_target = xscale_init_target,
3733 .virt2phys = xscale_virt2phys,
3734 .mmu = xscale_mmu