fix xscale icache and dcache commands
[openocd/genbsdl.git] / src / target / xscale.c
blob50c9595006e9336f08bb30ddd9c57cb91c86ce0a
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
37 #include "register.h"
38 #include "image.h"
39 #include "arm_opcodes.h"
40 #include "armv4_5.h"
44 * Important XScale documents available as of October 2009 include:
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
59 * Chip-specific microarchitecture documents may also be useful.
63 /* forward declarations */
64 static int xscale_resume(struct target *, int current,
65 uint32_t address, int handle_breakpoints, int debug_execution);
66 static int xscale_debug_entry(struct target *);
67 static int xscale_restore_banked(struct target *);
68 static int xscale_get_reg(struct reg *reg);
69 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
70 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
72 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
73 static int xscale_read_trace(struct target *);
76 /* This XScale "debug handler" is loaded into the processor's
77 * mini-ICache, which is 2K of code writable only via JTAG.
79 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
80 * binary files cleanly. It's string oriented, and terminates them
81 * with a NUL character. Better would be to generate the constants
82 * and let other code decide names, scoping, and other housekeeping.
84 static /* unsigned const char xscale_debug_handler[] = ... */
85 #include "xscale_debug.h"
87 static char *const xscale_reg_list[] =
89 "XSCALE_MAINID", /* 0 */
90 "XSCALE_CACHETYPE",
91 "XSCALE_CTRL",
92 "XSCALE_AUXCTRL",
93 "XSCALE_TTB",
94 "XSCALE_DAC",
95 "XSCALE_FSR",
96 "XSCALE_FAR",
97 "XSCALE_PID",
98 "XSCALE_CPACCESS",
99 "XSCALE_IBCR0", /* 10 */
100 "XSCALE_IBCR1",
101 "XSCALE_DBR0",
102 "XSCALE_DBR1",
103 "XSCALE_DBCON",
104 "XSCALE_TBREG",
105 "XSCALE_CHKPT0",
106 "XSCALE_CHKPT1",
107 "XSCALE_DCSR",
108 "XSCALE_TX",
109 "XSCALE_RX", /* 20 */
110 "XSCALE_TXRXCTRL",
113 static const struct xscale_reg xscale_reg_arch_info[] =
115 {XSCALE_MAINID, NULL},
116 {XSCALE_CACHETYPE, NULL},
117 {XSCALE_CTRL, NULL},
118 {XSCALE_AUXCTRL, NULL},
119 {XSCALE_TTB, NULL},
120 {XSCALE_DAC, NULL},
121 {XSCALE_FSR, NULL},
122 {XSCALE_FAR, NULL},
123 {XSCALE_PID, NULL},
124 {XSCALE_CPACCESS, NULL},
125 {XSCALE_IBCR0, NULL},
126 {XSCALE_IBCR1, NULL},
127 {XSCALE_DBR0, NULL},
128 {XSCALE_DBR1, NULL},
129 {XSCALE_DBCON, NULL},
130 {XSCALE_TBREG, NULL},
131 {XSCALE_CHKPT0, NULL},
132 {XSCALE_CHKPT1, NULL},
133 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
134 {-1, NULL}, /* TX accessed via JTAG */
135 {-1, NULL}, /* RX accessed via JTAG */
136 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
139 /* convenience wrapper to access XScale specific registers */
140 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
142 uint8_t buf[4];
144 buf_set_u32(buf, 0, 32, value);
146 return xscale_set_reg(reg, buf);
149 static const char xscale_not[] = "target is not an XScale";
151 static int xscale_verify_pointer(struct command_context *cmd_ctx,
152 struct xscale_common *xscale)
154 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
155 command_print(cmd_ctx, xscale_not);
156 return ERROR_TARGET_INVALID;
158 return ERROR_OK;
161 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
163 if (tap == NULL)
164 return ERROR_FAIL;
166 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
168 struct scan_field field;
169 uint8_t scratch[4];
171 memset(&field, 0, sizeof field);
172 field.num_bits = tap->ir_length;
173 field.out_value = scratch;
174 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
176 jtag_add_ir_scan(tap, &field, jtag_get_end_state());
179 return ERROR_OK;
182 static int xscale_read_dcsr(struct target *target)
184 struct xscale_common *xscale = target_to_xscale(target);
185 int retval;
186 struct scan_field fields[3];
187 uint8_t field0 = 0x0;
188 uint8_t field0_check_value = 0x2;
189 uint8_t field0_check_mask = 0x7;
190 uint8_t field2 = 0x0;
191 uint8_t field2_check_value = 0x0;
192 uint8_t field2_check_mask = 0x1;
194 jtag_set_end_state(TAP_DRPAUSE);
195 xscale_jtag_set_instr(target->tap,
196 XSCALE_SELDCSR << xscale->xscale_variant);
198 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
199 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
201 memset(&fields, 0, sizeof fields);
203 fields[0].num_bits = 3;
204 fields[0].out_value = &field0;
205 uint8_t tmp;
206 fields[0].in_value = &tmp;
208 fields[1].num_bits = 32;
209 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
211 fields[2].num_bits = 1;
212 fields[2].out_value = &field2;
213 uint8_t tmp2;
214 fields[2].in_value = &tmp2;
216 jtag_add_dr_scan(target->tap, 3, fields, jtag_get_end_state());
218 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
219 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
221 if ((retval = jtag_execute_queue()) != ERROR_OK)
223 LOG_ERROR("JTAG error while reading DCSR");
224 return retval;
227 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
228 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
230 /* write the register with the value we just read
231 * on this second pass, only the first bit of field0 is guaranteed to be 0)
233 field0_check_mask = 0x1;
234 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
235 fields[1].in_value = NULL;
237 jtag_set_end_state(TAP_IDLE);
239 jtag_add_dr_scan(target->tap, 3, fields, jtag_get_end_state());
241 /* DANGER!!! this must be here. It will make sure that the arguments
242 * to jtag_set_check_value() does not go out of scope! */
243 return jtag_execute_queue();
247 static void xscale_getbuf(jtag_callback_data_t arg)
249 uint8_t *in = (uint8_t *)arg;
250 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
253 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
255 if (num_words == 0)
256 return ERROR_INVALID_ARGUMENTS;
258 struct xscale_common *xscale = target_to_xscale(target);
259 int retval = ERROR_OK;
260 tap_state_t path[3];
261 struct scan_field fields[3];
262 uint8_t *field0 = malloc(num_words * 1);
263 uint8_t field0_check_value = 0x2;
264 uint8_t field0_check_mask = 0x6;
265 uint32_t *field1 = malloc(num_words * 4);
266 uint8_t field2_check_value = 0x0;
267 uint8_t field2_check_mask = 0x1;
268 int words_done = 0;
269 int words_scheduled = 0;
270 int i;
272 path[0] = TAP_DRSELECT;
273 path[1] = TAP_DRCAPTURE;
274 path[2] = TAP_DRSHIFT;
276 memset(&fields, 0, sizeof fields);
278 fields[0].num_bits = 3;
279 fields[0].check_value = &field0_check_value;
280 fields[0].check_mask = &field0_check_mask;
282 fields[1].num_bits = 32;
284 fields[2].num_bits = 1;
285 fields[2].check_value = &field2_check_value;
286 fields[2].check_mask = &field2_check_mask;
288 jtag_set_end_state(TAP_IDLE);
289 xscale_jtag_set_instr(target->tap,
290 XSCALE_DBGTX << xscale->xscale_variant);
291 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
293 /* repeat until all words have been collected */
294 int attempts = 0;
295 while (words_done < num_words)
297 /* schedule reads */
298 words_scheduled = 0;
299 for (i = words_done; i < num_words; i++)
301 fields[0].in_value = &field0[i];
303 jtag_add_pathmove(3, path);
305 fields[1].in_value = (uint8_t *)(field1 + i);
307 jtag_add_dr_scan_check(target->tap, 3, fields, jtag_set_end_state(TAP_IDLE));
309 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
311 words_scheduled++;
314 if ((retval = jtag_execute_queue()) != ERROR_OK)
316 LOG_ERROR("JTAG error while receiving data from debug handler");
317 break;
320 /* examine results */
321 for (i = words_done; i < num_words; i++)
323 if (!(field0[0] & 1))
325 /* move backwards if necessary */
326 int j;
327 for (j = i; j < num_words - 1; j++)
329 field0[j] = field0[j + 1];
330 field1[j] = field1[j + 1];
332 words_scheduled--;
335 if (words_scheduled == 0)
337 if (attempts++==1000)
339 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
340 retval = ERROR_TARGET_TIMEOUT;
341 break;
345 words_done += words_scheduled;
348 for (i = 0; i < num_words; i++)
349 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
351 free(field1);
353 return retval;
356 static int xscale_read_tx(struct target *target, int consume)
358 struct xscale_common *xscale = target_to_xscale(target);
359 tap_state_t path[3];
360 tap_state_t noconsume_path[6];
361 int retval;
362 struct timeval timeout, now;
363 struct scan_field fields[3];
364 uint8_t field0_in = 0x0;
365 uint8_t field0_check_value = 0x2;
366 uint8_t field0_check_mask = 0x6;
367 uint8_t field2_check_value = 0x0;
368 uint8_t field2_check_mask = 0x1;
370 jtag_set_end_state(TAP_IDLE);
372 xscale_jtag_set_instr(target->tap,
373 XSCALE_DBGTX << xscale->xscale_variant);
375 path[0] = TAP_DRSELECT;
376 path[1] = TAP_DRCAPTURE;
377 path[2] = TAP_DRSHIFT;
379 noconsume_path[0] = TAP_DRSELECT;
380 noconsume_path[1] = TAP_DRCAPTURE;
381 noconsume_path[2] = TAP_DREXIT1;
382 noconsume_path[3] = TAP_DRPAUSE;
383 noconsume_path[4] = TAP_DREXIT2;
384 noconsume_path[5] = TAP_DRSHIFT;
386 memset(&fields, 0, sizeof fields);
388 fields[0].num_bits = 3;
389 fields[0].in_value = &field0_in;
391 fields[1].num_bits = 32;
392 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
394 fields[2].num_bits = 1;
395 uint8_t tmp;
396 fields[2].in_value = &tmp;
398 gettimeofday(&timeout, NULL);
399 timeval_add_time(&timeout, 1, 0);
401 for (;;)
403 /* if we want to consume the register content (i.e. clear TX_READY),
404 * we have to go straight from Capture-DR to Shift-DR
405 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
407 if (consume)
408 jtag_add_pathmove(3, path);
409 else
411 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
414 jtag_add_dr_scan(target->tap, 3, fields, jtag_set_end_state(TAP_IDLE));
416 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
417 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
419 if ((retval = jtag_execute_queue()) != ERROR_OK)
421 LOG_ERROR("JTAG error while reading TX");
422 return ERROR_TARGET_TIMEOUT;
425 gettimeofday(&now, NULL);
426 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
428 LOG_ERROR("time out reading TX register");
429 return ERROR_TARGET_TIMEOUT;
431 if (!((!(field0_in & 1)) && consume))
433 goto done;
435 if (debug_level >= 3)
437 LOG_DEBUG("waiting 100ms");
438 alive_sleep(100); /* avoid flooding the logs */
439 } else
441 keep_alive();
444 done:
446 if (!(field0_in & 1))
447 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
449 return ERROR_OK;
452 static int xscale_write_rx(struct target *target)
454 struct xscale_common *xscale = target_to_xscale(target);
455 int retval;
456 struct timeval timeout, now;
457 struct scan_field fields[3];
458 uint8_t field0_out = 0x0;
459 uint8_t field0_in = 0x0;
460 uint8_t field0_check_value = 0x2;
461 uint8_t field0_check_mask = 0x6;
462 uint8_t field2 = 0x0;
463 uint8_t field2_check_value = 0x0;
464 uint8_t field2_check_mask = 0x1;
466 jtag_set_end_state(TAP_IDLE);
468 xscale_jtag_set_instr(target->tap,
469 XSCALE_DBGRX << xscale->xscale_variant);
471 memset(&fields, 0, sizeof fields);
473 fields[0].num_bits = 3;
474 fields[0].out_value = &field0_out;
475 fields[0].in_value = &field0_in;
477 fields[1].num_bits = 32;
478 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
480 fields[2].num_bits = 1;
481 fields[2].out_value = &field2;
482 uint8_t tmp;
483 fields[2].in_value = &tmp;
485 gettimeofday(&timeout, NULL);
486 timeval_add_time(&timeout, 1, 0);
488 /* poll until rx_read is low */
489 LOG_DEBUG("polling RX");
490 for (;;)
492 jtag_add_dr_scan(target->tap, 3, fields, jtag_set_end_state(TAP_IDLE));
494 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
495 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
497 if ((retval = jtag_execute_queue()) != ERROR_OK)
499 LOG_ERROR("JTAG error while writing RX");
500 return retval;
503 gettimeofday(&now, NULL);
504 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
506 LOG_ERROR("time out writing RX register");
507 return ERROR_TARGET_TIMEOUT;
509 if (!(field0_in & 1))
510 goto done;
511 if (debug_level >= 3)
513 LOG_DEBUG("waiting 100ms");
514 alive_sleep(100); /* avoid flooding the logs */
515 } else
517 keep_alive();
520 done:
522 /* set rx_valid */
523 field2 = 0x1;
524 jtag_add_dr_scan(target->tap, 3, fields, jtag_set_end_state(TAP_IDLE));
526 if ((retval = jtag_execute_queue()) != ERROR_OK)
528 LOG_ERROR("JTAG error while writing RX");
529 return retval;
532 return ERROR_OK;
535 /* send count elements of size byte to the debug handler */
536 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
538 struct xscale_common *xscale = target_to_xscale(target);
539 uint32_t t[3];
540 int bits[3];
541 int retval;
542 int done_count = 0;
544 jtag_set_end_state(TAP_IDLE);
546 xscale_jtag_set_instr(target->tap,
547 XSCALE_DBGRX << xscale->xscale_variant);
549 bits[0]=3;
550 t[0]=0;
551 bits[1]=32;
552 t[2]=1;
553 bits[2]=1;
554 int endianness = target->endianness;
555 while (done_count++ < count)
557 switch (size)
559 case 4:
560 if (endianness == TARGET_LITTLE_ENDIAN)
562 t[1]=le_to_h_u32(buffer);
563 } else
565 t[1]=be_to_h_u32(buffer);
567 break;
568 case 2:
569 if (endianness == TARGET_LITTLE_ENDIAN)
571 t[1]=le_to_h_u16(buffer);
572 } else
574 t[1]=be_to_h_u16(buffer);
576 break;
577 case 1:
578 t[1]=buffer[0];
579 break;
580 default:
581 LOG_ERROR("BUG: size neither 4, 2 nor 1");
582 return ERROR_INVALID_ARGUMENTS;
584 jtag_add_dr_out(target->tap,
586 bits,
588 jtag_set_end_state(TAP_IDLE));
589 buffer += size;
592 if ((retval = jtag_execute_queue()) != ERROR_OK)
594 LOG_ERROR("JTAG error while sending data to debug handler");
595 return retval;
598 return ERROR_OK;
601 static int xscale_send_u32(struct target *target, uint32_t value)
603 struct xscale_common *xscale = target_to_xscale(target);
605 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
606 return xscale_write_rx(target);
609 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
611 struct xscale_common *xscale = target_to_xscale(target);
612 int retval;
613 struct scan_field fields[3];
614 uint8_t field0 = 0x0;
615 uint8_t field0_check_value = 0x2;
616 uint8_t field0_check_mask = 0x7;
617 uint8_t field2 = 0x0;
618 uint8_t field2_check_value = 0x0;
619 uint8_t field2_check_mask = 0x1;
621 if (hold_rst != -1)
622 xscale->hold_rst = hold_rst;
624 if (ext_dbg_brk != -1)
625 xscale->external_debug_break = ext_dbg_brk;
627 jtag_set_end_state(TAP_IDLE);
628 xscale_jtag_set_instr(target->tap,
629 XSCALE_SELDCSR << xscale->xscale_variant);
631 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
632 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
634 memset(&fields, 0, sizeof fields);
636 fields[0].num_bits = 3;
637 fields[0].out_value = &field0;
638 uint8_t tmp;
639 fields[0].in_value = &tmp;
641 fields[1].num_bits = 32;
642 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
644 fields[2].num_bits = 1;
645 fields[2].out_value = &field2;
646 uint8_t tmp2;
647 fields[2].in_value = &tmp2;
649 jtag_add_dr_scan(target->tap, 3, fields, jtag_get_end_state());
651 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
652 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
654 if ((retval = jtag_execute_queue()) != ERROR_OK)
656 LOG_ERROR("JTAG error while writing DCSR");
657 return retval;
660 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
661 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
663 return ERROR_OK;
666 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
667 static unsigned int parity (unsigned int v)
669 // unsigned int ov = v;
670 v ^= v >> 16;
671 v ^= v >> 8;
672 v ^= v >> 4;
673 v &= 0xf;
674 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
675 return (0x6996 >> v) & 1;
678 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
680 struct xscale_common *xscale = target_to_xscale(target);
681 uint8_t packet[4];
682 uint8_t cmd;
683 int word;
684 struct scan_field fields[2];
686 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
688 /* LDIC into IR */
689 jtag_set_end_state(TAP_IDLE);
690 xscale_jtag_set_instr(target->tap,
691 XSCALE_LDIC << xscale->xscale_variant);
693 /* CMD is b011 to load a cacheline into the Mini ICache.
694 * Loading into the main ICache is deprecated, and unused.
695 * It's followed by three zero bits, and 27 address bits.
697 buf_set_u32(&cmd, 0, 6, 0x3);
699 /* virtual address of desired cache line */
700 buf_set_u32(packet, 0, 27, va >> 5);
702 memset(&fields, 0, sizeof fields);
704 fields[0].num_bits = 6;
705 fields[0].out_value = &cmd;
707 fields[1].num_bits = 27;
708 fields[1].out_value = packet;
710 jtag_add_dr_scan(target->tap, 2, fields, jtag_get_end_state());
712 /* rest of packet is a cacheline: 8 instructions, with parity */
713 fields[0].num_bits = 32;
714 fields[0].out_value = packet;
716 fields[1].num_bits = 1;
717 fields[1].out_value = &cmd;
719 for (word = 0; word < 8; word++)
721 buf_set_u32(packet, 0, 32, buffer[word]);
723 uint32_t value;
724 memcpy(&value, packet, sizeof(uint32_t));
725 cmd = parity(value);
727 jtag_add_dr_scan(target->tap, 2, fields, jtag_get_end_state());
730 return jtag_execute_queue();
733 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
735 struct xscale_common *xscale = target_to_xscale(target);
736 uint8_t packet[4];
737 uint8_t cmd;
738 struct scan_field fields[2];
740 jtag_set_end_state(TAP_IDLE);
741 xscale_jtag_set_instr(target->tap,
742 XSCALE_LDIC << xscale->xscale_variant);
744 /* CMD for invalidate IC line b000, bits [6:4] b000 */
745 buf_set_u32(&cmd, 0, 6, 0x0);
747 /* virtual address of desired cache line */
748 buf_set_u32(packet, 0, 27, va >> 5);
750 memset(&fields, 0, sizeof fields);
752 fields[0].num_bits = 6;
753 fields[0].out_value = &cmd;
755 fields[1].num_bits = 27;
756 fields[1].out_value = packet;
758 jtag_add_dr_scan(target->tap, 2, fields, jtag_get_end_state());
760 return ERROR_OK;
763 static int xscale_update_vectors(struct target *target)
765 struct xscale_common *xscale = target_to_xscale(target);
766 int i;
767 int retval;
769 uint32_t low_reset_branch, high_reset_branch;
771 for (i = 1; i < 8; i++)
773 /* if there's a static vector specified for this exception, override */
774 if (xscale->static_high_vectors_set & (1 << i))
776 xscale->high_vectors[i] = xscale->static_high_vectors[i];
778 else
780 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
781 if (retval == ERROR_TARGET_TIMEOUT)
782 return retval;
783 if (retval != ERROR_OK)
785 /* Some of these reads will fail as part of normal execution */
786 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
791 for (i = 1; i < 8; i++)
793 if (xscale->static_low_vectors_set & (1 << i))
795 xscale->low_vectors[i] = xscale->static_low_vectors[i];
797 else
799 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
800 if (retval == ERROR_TARGET_TIMEOUT)
801 return retval;
802 if (retval != ERROR_OK)
804 /* Some of these reads will fail as part of normal execution */
805 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
810 /* calculate branches to debug handler */
811 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
812 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
814 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
815 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
817 /* invalidate and load exception vectors in mini i-cache */
818 xscale_invalidate_ic_line(target, 0x0);
819 xscale_invalidate_ic_line(target, 0xffff0000);
821 xscale_load_ic(target, 0x0, xscale->low_vectors);
822 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
824 return ERROR_OK;
827 static int xscale_arch_state(struct target *target)
829 struct xscale_common *xscale = target_to_xscale(target);
830 struct arm *armv4_5 = &xscale->armv4_5_common;
832 static const char *state[] =
834 "disabled", "enabled"
837 static const char *arch_dbg_reason[] =
839 "", "\n(processor reset)", "\n(trace buffer full)"
842 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
844 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
845 return ERROR_INVALID_ARGUMENTS;
848 arm_arch_state(target);
849 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
850 state[xscale->armv4_5_mmu.mmu_enabled],
851 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
852 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
853 arch_dbg_reason[xscale->arch_debug_reason]);
855 return ERROR_OK;
858 static int xscale_poll(struct target *target)
860 int retval = ERROR_OK;
862 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
864 enum target_state previous_state = target->state;
865 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
868 /* there's data to read from the tx register, we entered debug state */
869 target->state = TARGET_HALTED;
871 /* process debug entry, fetching current mode regs */
872 retval = xscale_debug_entry(target);
874 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
876 LOG_USER("error while polling TX register, reset CPU");
877 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
878 target->state = TARGET_HALTED;
881 /* debug_entry could have overwritten target state (i.e. immediate resume)
882 * don't signal event handlers in that case
884 if (target->state != TARGET_HALTED)
885 return ERROR_OK;
887 /* if target was running, signal that we halted
888 * otherwise we reentered from debug execution */
889 if (previous_state == TARGET_RUNNING)
890 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
891 else
892 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
895 return retval;
898 static int xscale_debug_entry(struct target *target)
900 struct xscale_common *xscale = target_to_xscale(target);
901 struct arm *armv4_5 = &xscale->armv4_5_common;
902 uint32_t pc;
903 uint32_t buffer[10];
904 int i;
905 int retval;
906 uint32_t moe;
908 /* clear external dbg break (will be written on next DCSR read) */
909 xscale->external_debug_break = 0;
910 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
911 return retval;
913 /* get r0, pc, r1 to r7 and cpsr */
914 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
915 return retval;
917 /* move r0 from buffer to register cache */
918 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
919 armv4_5->core_cache->reg_list[0].dirty = 1;
920 armv4_5->core_cache->reg_list[0].valid = 1;
921 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
923 /* move pc from buffer to register cache */
924 buf_set_u32(armv4_5->pc->value, 0, 32, buffer[1]);
925 armv4_5->pc->dirty = 1;
926 armv4_5->pc->valid = 1;
927 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
929 /* move data from buffer to register cache */
930 for (i = 1; i <= 7; i++)
932 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
933 armv4_5->core_cache->reg_list[i].dirty = 1;
934 armv4_5->core_cache->reg_list[i].valid = 1;
935 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
938 arm_set_cpsr(armv4_5, buffer[9]);
939 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
941 if (!is_arm_mode(armv4_5->core_mode))
943 target->state = TARGET_UNKNOWN;
944 LOG_ERROR("cpsr contains invalid mode value - communication failure");
945 return ERROR_TARGET_FAILURE;
947 LOG_DEBUG("target entered debug state in %s mode",
948 arm_mode_name(armv4_5->core_mode));
950 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
951 if (armv4_5->spsr) {
952 xscale_receive(target, buffer, 8);
953 buf_set_u32(armv4_5->spsr->value, 0, 32, buffer[7]);
954 armv4_5->spsr->dirty = false;
955 armv4_5->spsr->valid = true;
957 else
959 /* r8 to r14, but no spsr */
960 xscale_receive(target, buffer, 7);
963 /* move data from buffer to right banked register in cache */
964 for (i = 8; i <= 14; i++)
966 struct reg *r = arm_reg_current(armv4_5, i);
968 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
969 r->dirty = false;
970 r->valid = true;
973 /* examine debug reason */
974 xscale_read_dcsr(target);
975 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
977 /* stored PC (for calculating fixup) */
978 pc = buf_get_u32(armv4_5->pc->value, 0, 32);
980 switch (moe)
982 case 0x0: /* Processor reset */
983 target->debug_reason = DBG_REASON_DBGRQ;
984 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
985 pc -= 4;
986 break;
987 case 0x1: /* Instruction breakpoint hit */
988 target->debug_reason = DBG_REASON_BREAKPOINT;
989 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
990 pc -= 4;
991 break;
992 case 0x2: /* Data breakpoint hit */
993 target->debug_reason = DBG_REASON_WATCHPOINT;
994 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
995 pc -= 4;
996 break;
997 case 0x3: /* BKPT instruction executed */
998 target->debug_reason = DBG_REASON_BREAKPOINT;
999 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1000 pc -= 4;
1001 break;
1002 case 0x4: /* Ext. debug event */
1003 target->debug_reason = DBG_REASON_DBGRQ;
1004 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1005 pc -= 4;
1006 break;
1007 case 0x5: /* Vector trap occured */
1008 target->debug_reason = DBG_REASON_BREAKPOINT;
1009 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1010 pc -= 4;
1011 break;
1012 case 0x6: /* Trace buffer full break */
1013 target->debug_reason = DBG_REASON_DBGRQ;
1014 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1015 pc -= 4;
1016 break;
1017 case 0x7: /* Reserved (may flag Hot-Debug support) */
1018 default:
1019 LOG_ERROR("Method of Entry is 'Reserved'");
1020 exit(-1);
1021 break;
1024 /* apply PC fixup */
1025 buf_set_u32(armv4_5->pc->value, 0, 32, pc);
1027 /* on the first debug entry, identify cache type */
1028 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1030 uint32_t cache_type_reg;
1032 /* read cp15 cache type register */
1033 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1034 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1036 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1039 /* examine MMU and Cache settings */
1040 /* read cp15 control register */
1041 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1042 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1043 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1044 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1045 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1047 /* tracing enabled, read collected trace data */
1048 if (xscale->trace.buffer_enabled)
1050 xscale_read_trace(target);
1051 xscale->trace.buffer_fill--;
1053 /* resume if we're still collecting trace data */
1054 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1055 && (xscale->trace.buffer_fill > 0))
1057 xscale_resume(target, 1, 0x0, 1, 0);
1059 else
1061 xscale->trace.buffer_enabled = 0;
1065 return ERROR_OK;
1068 static int xscale_halt(struct target *target)
1070 struct xscale_common *xscale = target_to_xscale(target);
1072 LOG_DEBUG("target->state: %s",
1073 target_state_name(target));
1075 if (target->state == TARGET_HALTED)
1077 LOG_DEBUG("target was already halted");
1078 return ERROR_OK;
1080 else if (target->state == TARGET_UNKNOWN)
1082 /* this must not happen for a xscale target */
1083 LOG_ERROR("target was in unknown state when halt was requested");
1084 return ERROR_TARGET_INVALID;
1086 else if (target->state == TARGET_RESET)
1088 LOG_DEBUG("target->state == TARGET_RESET");
1090 else
1092 /* assert external dbg break */
1093 xscale->external_debug_break = 1;
1094 xscale_read_dcsr(target);
1096 target->debug_reason = DBG_REASON_DBGRQ;
1099 return ERROR_OK;
1102 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1104 struct xscale_common *xscale = target_to_xscale(target);
1105 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1106 int retval;
1108 if (xscale->ibcr0_used)
1110 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1112 if (ibcr0_bp)
1114 xscale_unset_breakpoint(target, ibcr0_bp);
1116 else
1118 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1119 exit(-1);
1123 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1124 return retval;
1126 return ERROR_OK;
1129 static int xscale_disable_single_step(struct target *target)
1131 struct xscale_common *xscale = target_to_xscale(target);
1132 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1133 int retval;
1135 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1136 return retval;
1138 return ERROR_OK;
1141 static void xscale_enable_watchpoints(struct target *target)
1143 struct watchpoint *watchpoint = target->watchpoints;
1145 while (watchpoint)
1147 if (watchpoint->set == 0)
1148 xscale_set_watchpoint(target, watchpoint);
1149 watchpoint = watchpoint->next;
1153 static void xscale_enable_breakpoints(struct target *target)
1155 struct breakpoint *breakpoint = target->breakpoints;
1157 /* set any pending breakpoints */
1158 while (breakpoint)
1160 if (breakpoint->set == 0)
1161 xscale_set_breakpoint(target, breakpoint);
1162 breakpoint = breakpoint->next;
1166 static int xscale_resume(struct target *target, int current,
1167 uint32_t address, int handle_breakpoints, int debug_execution)
1169 struct xscale_common *xscale = target_to_xscale(target);
1170 struct arm *armv4_5 = &xscale->armv4_5_common;
1171 struct breakpoint *breakpoint = target->breakpoints;
1172 uint32_t current_pc;
1173 int retval;
1174 int i;
1176 LOG_DEBUG("-");
1178 if (target->state != TARGET_HALTED)
1180 LOG_WARNING("target not halted");
1181 return ERROR_TARGET_NOT_HALTED;
1184 if (!debug_execution)
1186 target_free_all_working_areas(target);
1189 /* update vector tables */
1190 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1191 return retval;
1193 /* current = 1: continue on current pc, otherwise continue at <address> */
1194 if (!current)
1195 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1197 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1199 /* if we're at the reset vector, we have to simulate the branch */
1200 if (current_pc == 0x0)
1202 arm_simulate_step(target, NULL);
1203 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1206 /* the front-end may request us not to handle breakpoints */
1207 if (handle_breakpoints)
1209 breakpoint = breakpoint_find(target,
1210 buf_get_u32(armv4_5->pc->value, 0, 32));
1211 if (breakpoint != NULL)
1213 uint32_t next_pc;
1215 /* there's a breakpoint at the current PC, we have to step over it */
1216 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1217 xscale_unset_breakpoint(target, breakpoint);
1219 /* calculate PC of next instruction */
1220 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1222 uint32_t current_opcode;
1223 target_read_u32(target, current_pc, &current_opcode);
1224 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1227 LOG_DEBUG("enable single-step");
1228 xscale_enable_single_step(target, next_pc);
1230 /* restore banked registers */
1231 retval = xscale_restore_banked(target);
1233 /* send resume request (command 0x30 or 0x31)
1234 * clean the trace buffer if it is to be enabled (0x62) */
1235 if (xscale->trace.buffer_enabled)
1237 xscale_send_u32(target, 0x62);
1238 xscale_send_u32(target, 0x31);
1240 else
1241 xscale_send_u32(target, 0x30);
1243 /* send CPSR */
1244 xscale_send_u32(target,
1245 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1246 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1247 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1249 for (i = 7; i >= 0; i--)
1251 /* send register */
1252 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1253 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1256 /* send PC */
1257 xscale_send_u32(target,
1258 buf_get_u32(armv4_5->pc->value, 0, 32));
1259 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1260 buf_get_u32(armv4_5->pc->value, 0, 32));
1262 /* wait for and process debug entry */
1263 xscale_debug_entry(target);
1265 LOG_DEBUG("disable single-step");
1266 xscale_disable_single_step(target);
1268 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1269 xscale_set_breakpoint(target, breakpoint);
1273 /* enable any pending breakpoints and watchpoints */
1274 xscale_enable_breakpoints(target);
1275 xscale_enable_watchpoints(target);
1277 /* restore banked registers */
1278 retval = xscale_restore_banked(target);
1280 /* send resume request (command 0x30 or 0x31)
1281 * clean the trace buffer if it is to be enabled (0x62) */
1282 if (xscale->trace.buffer_enabled)
1284 xscale_send_u32(target, 0x62);
1285 xscale_send_u32(target, 0x31);
1287 else
1288 xscale_send_u32(target, 0x30);
1290 /* send CPSR */
1291 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1292 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1293 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1295 for (i = 7; i >= 0; i--)
1297 /* send register */
1298 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1299 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1302 /* send PC */
1303 xscale_send_u32(target, buf_get_u32(armv4_5->pc->value, 0, 32));
1304 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1305 buf_get_u32(armv4_5->pc->value, 0, 32));
1307 target->debug_reason = DBG_REASON_NOTHALTED;
1309 if (!debug_execution)
1311 /* registers are now invalid */
1312 register_cache_invalidate(armv4_5->core_cache);
1313 target->state = TARGET_RUNNING;
1314 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1316 else
1318 target->state = TARGET_DEBUG_RUNNING;
1319 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1322 LOG_DEBUG("target resumed");
1324 return ERROR_OK;
1327 static int xscale_step_inner(struct target *target, int current,
1328 uint32_t address, int handle_breakpoints)
1330 struct xscale_common *xscale = target_to_xscale(target);
1331 struct arm *armv4_5 = &xscale->armv4_5_common;
1332 uint32_t next_pc;
1333 int retval;
1334 int i;
1336 target->debug_reason = DBG_REASON_SINGLESTEP;
1338 /* calculate PC of next instruction */
1339 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1341 uint32_t current_opcode, current_pc;
1342 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1344 target_read_u32(target, current_pc, &current_opcode);
1345 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1346 return retval;
1349 LOG_DEBUG("enable single-step");
1350 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1351 return retval;
1353 /* restore banked registers */
1354 if ((retval = xscale_restore_banked(target)) != ERROR_OK)
1355 return retval;
1357 /* send resume request (command 0x30 or 0x31)
1358 * clean the trace buffer if it is to be enabled (0x62) */
1359 if (xscale->trace.buffer_enabled)
1361 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1362 return retval;
1363 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1364 return retval;
1366 else
1367 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1368 return retval;
1370 /* send CPSR */
1371 retval = xscale_send_u32(target,
1372 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1373 if (retval != ERROR_OK)
1374 return retval;
1375 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1376 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1378 for (i = 7; i >= 0; i--)
1380 /* send register */
1381 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1382 return retval;
1383 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1386 /* send PC */
1387 retval = xscale_send_u32(target,
1388 buf_get_u32(armv4_5->pc->value, 0, 32));
1389 if (retval != ERROR_OK)
1390 return retval;
1391 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1392 buf_get_u32(armv4_5->pc->value, 0, 32));
1394 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1396 /* registers are now invalid */
1397 register_cache_invalidate(armv4_5->core_cache);
1399 /* wait for and process debug entry */
1400 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1401 return retval;
1403 LOG_DEBUG("disable single-step");
1404 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1405 return retval;
1407 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1409 return ERROR_OK;
1412 static int xscale_step(struct target *target, int current,
1413 uint32_t address, int handle_breakpoints)
1415 struct arm *armv4_5 = target_to_arm(target);
1416 struct breakpoint *breakpoint = NULL;
1418 uint32_t current_pc;
1419 int retval;
1421 if (target->state != TARGET_HALTED)
1423 LOG_WARNING("target not halted");
1424 return ERROR_TARGET_NOT_HALTED;
1427 /* current = 1: continue on current pc, otherwise continue at <address> */
1428 if (!current)
1429 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1431 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1433 /* if we're at the reset vector, we have to simulate the step */
1434 if (current_pc == 0x0)
1436 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1437 return retval;
1438 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1440 target->debug_reason = DBG_REASON_SINGLESTEP;
1441 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1443 return ERROR_OK;
1446 /* the front-end may request us not to handle breakpoints */
1447 if (handle_breakpoints)
1448 breakpoint = breakpoint_find(target,
1449 buf_get_u32(armv4_5->pc->value, 0, 32));
1450 if (breakpoint != NULL) {
1451 retval = xscale_unset_breakpoint(target, breakpoint);
1452 if (retval != ERROR_OK)
1453 return retval;
1456 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1458 if (breakpoint)
1460 xscale_set_breakpoint(target, breakpoint);
1463 LOG_DEBUG("target stepped");
1465 return ERROR_OK;
1469 static int xscale_assert_reset(struct target *target)
1471 struct xscale_common *xscale = target_to_xscale(target);
1473 LOG_DEBUG("target->state: %s",
1474 target_state_name(target));
1476 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1477 * end up in T-L-R, which would reset JTAG
1479 jtag_set_end_state(TAP_IDLE);
1480 xscale_jtag_set_instr(target->tap,
1481 XSCALE_SELDCSR << xscale->xscale_variant);
1483 /* set Hold reset, Halt mode and Trap Reset */
1484 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1485 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1486 xscale_write_dcsr(target, 1, 0);
1488 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1489 xscale_jtag_set_instr(target->tap, ~0);
1490 jtag_execute_queue();
1492 /* assert reset */
1493 jtag_add_reset(0, 1);
1495 /* sleep 1ms, to be sure we fulfill any requirements */
1496 jtag_add_sleep(1000);
1497 jtag_execute_queue();
1499 target->state = TARGET_RESET;
1501 if (target->reset_halt)
1503 int retval;
1504 if ((retval = target_halt(target)) != ERROR_OK)
1505 return retval;
1508 return ERROR_OK;
1511 static int xscale_deassert_reset(struct target *target)
1513 struct xscale_common *xscale = target_to_xscale(target);
1514 struct breakpoint *breakpoint = target->breakpoints;
1516 LOG_DEBUG("-");
1518 xscale->ibcr_available = 2;
1519 xscale->ibcr0_used = 0;
1520 xscale->ibcr1_used = 0;
1522 xscale->dbr_available = 2;
1523 xscale->dbr0_used = 0;
1524 xscale->dbr1_used = 0;
1526 /* mark all hardware breakpoints as unset */
1527 while (breakpoint)
1529 if (breakpoint->type == BKPT_HARD)
1531 breakpoint->set = 0;
1533 breakpoint = breakpoint->next;
1536 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1538 /* FIXME mark hardware watchpoints got unset too. Also,
1539 * at least some of the XScale registers are invalid...
1543 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1544 * contents got invalidated. Safer to force that, so writing new
1545 * contents can't ever fail..
1548 uint32_t address;
1549 unsigned buf_cnt;
1550 const uint8_t *buffer = xscale_debug_handler;
1551 int retval;
1553 /* release SRST */
1554 jtag_add_reset(0, 0);
1556 /* wait 300ms; 150 and 100ms were not enough */
1557 jtag_add_sleep(300*1000);
1559 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1560 jtag_execute_queue();
1562 /* set Hold reset, Halt mode and Trap Reset */
1563 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1564 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1565 xscale_write_dcsr(target, 1, 0);
1567 /* Load the debug handler into the mini-icache. Since
1568 * it's using halt mode (not monitor mode), it runs in
1569 * "Special Debug State" for access to registers, memory,
1570 * coprocessors, trace data, etc.
1572 address = xscale->handler_address;
1573 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1574 binary_size > 0;
1575 binary_size -= buf_cnt, buffer += buf_cnt)
1577 uint32_t cache_line[8];
1578 unsigned i;
1580 buf_cnt = binary_size;
1581 if (buf_cnt > 32)
1582 buf_cnt = 32;
1584 for (i = 0; i < buf_cnt; i += 4)
1586 /* convert LE buffer to host-endian uint32_t */
1587 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1590 for (; i < 32; i += 4)
1592 cache_line[i / 4] = 0xe1a08008;
1595 /* only load addresses other than the reset vectors */
1596 if ((address % 0x400) != 0x0)
1598 retval = xscale_load_ic(target, address,
1599 cache_line);
1600 if (retval != ERROR_OK)
1601 return retval;
1604 address += buf_cnt;
1607 retval = xscale_load_ic(target, 0x0,
1608 xscale->low_vectors);
1609 if (retval != ERROR_OK)
1610 return retval;
1611 retval = xscale_load_ic(target, 0xffff0000,
1612 xscale->high_vectors);
1613 if (retval != ERROR_OK)
1614 return retval;
1616 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1618 jtag_add_sleep(100000);
1620 /* set Hold reset, Halt mode and Trap Reset */
1621 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1622 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1623 xscale_write_dcsr(target, 1, 0);
1625 /* clear Hold reset to let the target run (should enter debug handler) */
1626 xscale_write_dcsr(target, 0, 1);
1627 target->state = TARGET_RUNNING;
1629 if (!target->reset_halt)
1631 jtag_add_sleep(10000);
1633 /* we should have entered debug now */
1634 xscale_debug_entry(target);
1635 target->state = TARGET_HALTED;
1637 /* resume the target */
1638 xscale_resume(target, 1, 0x0, 1, 0);
1642 return ERROR_OK;
1645 static int xscale_read_core_reg(struct target *target, struct reg *r,
1646 int num, enum arm_mode mode)
1648 /** \todo add debug handler support for core register reads */
1649 LOG_ERROR("not implemented");
1650 return ERROR_OK;
1653 static int xscale_write_core_reg(struct target *target, struct reg *r,
1654 int num, enum arm_mode mode, uint32_t value)
1656 /** \todo add debug handler support for core register writes */
1657 LOG_ERROR("not implemented");
1658 return ERROR_OK;
1661 static int xscale_full_context(struct target *target)
1663 struct arm *armv4_5 = target_to_arm(target);
1665 uint32_t *buffer;
1667 int i, j;
1669 LOG_DEBUG("-");
1671 if (target->state != TARGET_HALTED)
1673 LOG_WARNING("target not halted");
1674 return ERROR_TARGET_NOT_HALTED;
1677 buffer = malloc(4 * 8);
1679 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1680 * we can't enter User mode on an XScale (unpredictable),
1681 * but User shares registers with SYS
1683 for (i = 1; i < 7; i++)
1685 enum arm_mode mode = armv4_5_number_to_mode(i);
1686 bool valid = true;
1687 struct reg *r;
1689 if (mode == ARM_MODE_USR)
1690 continue;
1692 /* check if there are invalid registers in the current mode
1694 for (j = 0; valid && j <= 16; j++)
1696 if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1697 mode, j).valid)
1698 valid = false;
1700 if (valid)
1701 continue;
1703 /* request banked registers */
1704 xscale_send_u32(target, 0x0);
1706 /* send CPSR for desired bank mode */
1707 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1709 /* get banked registers: r8 to r14; and SPSR
1710 * except in USR/SYS mode
1712 if (mode != ARM_MODE_SYS) {
1713 /* SPSR */
1714 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1715 mode, 16);
1717 xscale_receive(target, buffer, 8);
1719 buf_set_u32(r->value, 0, 32, buffer[7]);
1720 r->dirty = false;
1721 r->valid = true;
1722 } else {
1723 xscale_receive(target, buffer, 7);
1726 /* move data from buffer to register cache */
1727 for (j = 8; j <= 14; j++)
1729 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1730 mode, j);
1732 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1733 r->dirty = false;
1734 r->valid = true;
1738 free(buffer);
1740 return ERROR_OK;
1743 static int xscale_restore_banked(struct target *target)
1745 struct arm *armv4_5 = target_to_arm(target);
1747 int i, j;
1749 if (target->state != TARGET_HALTED)
1751 LOG_WARNING("target not halted");
1752 return ERROR_TARGET_NOT_HALTED;
1755 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1756 * and check if any banked registers need to be written. Ignore
1757 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1758 * an XScale (unpredictable), but they share all registers.
1760 for (i = 1; i < 7; i++)
1762 enum arm_mode mode = armv4_5_number_to_mode(i);
1763 struct reg *r;
1765 if (mode == ARM_MODE_USR)
1766 continue;
1768 /* check if there are dirty registers in this mode */
1769 for (j = 8; j <= 14; j++)
1771 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1772 mode, j).dirty)
1773 goto dirty;
1776 /* if not USR/SYS, check if the SPSR needs to be written */
1777 if (mode != ARM_MODE_SYS)
1779 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1780 mode, 16).dirty)
1781 goto dirty;
1784 /* there's nothing to flush for this mode */
1785 continue;
1787 dirty:
1788 /* command 0x1: "send banked registers" */
1789 xscale_send_u32(target, 0x1);
1791 /* send CPSR for desired mode */
1792 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1794 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1795 * but this protocol doesn't understand that nuance.
1797 for (j = 8; j <= 14; j++) {
1798 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1799 mode, j);
1800 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1801 r->dirty = false;
1804 /* send spsr if not in USR/SYS mode */
1805 if (mode != ARM_MODE_SYS) {
1806 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1807 mode, 16);
1808 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1809 r->dirty = false;
1813 return ERROR_OK;
1816 static int xscale_read_memory(struct target *target, uint32_t address,
1817 uint32_t size, uint32_t count, uint8_t *buffer)
1819 struct xscale_common *xscale = target_to_xscale(target);
1820 uint32_t *buf32;
1821 uint32_t i;
1822 int retval;
1824 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1826 if (target->state != TARGET_HALTED)
1828 LOG_WARNING("target not halted");
1829 return ERROR_TARGET_NOT_HALTED;
1832 /* sanitize arguments */
1833 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1834 return ERROR_INVALID_ARGUMENTS;
1836 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1837 return ERROR_TARGET_UNALIGNED_ACCESS;
1839 /* send memory read request (command 0x1n, n: access size) */
1840 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1841 return retval;
1843 /* send base address for read request */
1844 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1845 return retval;
1847 /* send number of requested data words */
1848 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1849 return retval;
1851 /* receive data from target (count times 32-bit words in host endianness) */
1852 buf32 = malloc(4 * count);
1853 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1854 return retval;
1856 /* extract data from host-endian buffer into byte stream */
1857 for (i = 0; i < count; i++)
1859 switch (size)
1861 case 4:
1862 target_buffer_set_u32(target, buffer, buf32[i]);
1863 buffer += 4;
1864 break;
1865 case 2:
1866 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1867 buffer += 2;
1868 break;
1869 case 1:
1870 *buffer++ = buf32[i] & 0xff;
1871 break;
1872 default:
1873 LOG_ERROR("invalid read size");
1874 return ERROR_INVALID_ARGUMENTS;
1878 free(buf32);
1880 /* examine DCSR, to see if Sticky Abort (SA) got set */
1881 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1882 return retval;
1883 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1885 /* clear SA bit */
1886 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1887 return retval;
1889 return ERROR_TARGET_DATA_ABORT;
1892 return ERROR_OK;
1895 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1896 uint32_t size, uint32_t count, uint8_t *buffer)
1898 struct xscale_common *xscale = target_to_xscale(target);
1900 /* with MMU inactive, there are only physical addresses */
1901 if (!xscale->armv4_5_mmu.mmu_enabled)
1902 return xscale_read_memory(target, address, size, count, buffer);
1904 /** \todo: provide a non-stub implementation of this routine. */
1905 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1906 target_name(target), __func__);
1907 return ERROR_FAIL;
1910 static int xscale_write_memory(struct target *target, uint32_t address,
1911 uint32_t size, uint32_t count, uint8_t *buffer)
1913 struct xscale_common *xscale = target_to_xscale(target);
1914 int retval;
1916 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1918 if (target->state != TARGET_HALTED)
1920 LOG_WARNING("target not halted");
1921 return ERROR_TARGET_NOT_HALTED;
1924 /* sanitize arguments */
1925 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1926 return ERROR_INVALID_ARGUMENTS;
1928 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1929 return ERROR_TARGET_UNALIGNED_ACCESS;
1931 /* send memory write request (command 0x2n, n: access size) */
1932 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1933 return retval;
1935 /* send base address for read request */
1936 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1937 return retval;
1939 /* send number of requested data words to be written*/
1940 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1941 return retval;
1943 /* extract data from host-endian buffer into byte stream */
1944 #if 0
1945 for (i = 0; i < count; i++)
1947 switch (size)
1949 case 4:
1950 value = target_buffer_get_u32(target, buffer);
1951 xscale_send_u32(target, value);
1952 buffer += 4;
1953 break;
1954 case 2:
1955 value = target_buffer_get_u16(target, buffer);
1956 xscale_send_u32(target, value);
1957 buffer += 2;
1958 break;
1959 case 1:
1960 value = *buffer;
1961 xscale_send_u32(target, value);
1962 buffer += 1;
1963 break;
1964 default:
1965 LOG_ERROR("should never get here");
1966 exit(-1);
1969 #endif
1970 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1971 return retval;
1973 /* examine DCSR, to see if Sticky Abort (SA) got set */
1974 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1975 return retval;
1976 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1978 /* clear SA bit */
1979 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1980 return retval;
1982 return ERROR_TARGET_DATA_ABORT;
1985 return ERROR_OK;
1988 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1989 uint32_t size, uint32_t count, uint8_t *buffer)
1991 struct xscale_common *xscale = target_to_xscale(target);
1993 /* with MMU inactive, there are only physical addresses */
1994 if (!xscale->armv4_5_mmu.mmu_enabled)
1995 return xscale_read_memory(target, address, size, count, buffer);
1997 /** \todo: provide a non-stub implementation of this routine. */
1998 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1999 target_name(target), __func__);
2000 return ERROR_FAIL;
2003 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
2004 uint32_t count, uint8_t *buffer)
2006 return xscale_write_memory(target, address, 4, count, buffer);
2009 static uint32_t xscale_get_ttb(struct target *target)
2011 struct xscale_common *xscale = target_to_xscale(target);
2012 uint32_t ttb;
2014 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2015 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2017 return ttb;
2020 static void xscale_disable_mmu_caches(struct target *target, int mmu,
2021 int d_u_cache, int i_cache)
2023 struct xscale_common *xscale = target_to_xscale(target);
2024 uint32_t cp15_control;
2026 /* read cp15 control register */
2027 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2028 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2030 if (mmu)
2031 cp15_control &= ~0x1U;
2033 if (d_u_cache)
2035 /* clean DCache */
2036 xscale_send_u32(target, 0x50);
2037 xscale_send_u32(target, xscale->cache_clean_address);
2039 /* invalidate DCache */
2040 xscale_send_u32(target, 0x51);
2042 cp15_control &= ~0x4U;
2045 if (i_cache)
2047 /* invalidate ICache */
2048 xscale_send_u32(target, 0x52);
2049 cp15_control &= ~0x1000U;
2052 /* write new cp15 control register */
2053 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2055 /* execute cpwait to ensure outstanding operations complete */
2056 xscale_send_u32(target, 0x53);
2059 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2060 int d_u_cache, int i_cache)
2062 struct xscale_common *xscale = target_to_xscale(target);
2063 uint32_t cp15_control;
2065 /* read cp15 control register */
2066 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2067 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2069 if (mmu)
2070 cp15_control |= 0x1U;
2072 if (d_u_cache)
2073 cp15_control |= 0x4U;
2075 if (i_cache)
2076 cp15_control |= 0x1000U;
2078 /* write new cp15 control register */
2079 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2081 /* execute cpwait to ensure outstanding operations complete */
2082 xscale_send_u32(target, 0x53);
2085 static int xscale_set_breakpoint(struct target *target,
2086 struct breakpoint *breakpoint)
2088 int retval;
2089 struct xscale_common *xscale = target_to_xscale(target);
2091 if (target->state != TARGET_HALTED)
2093 LOG_WARNING("target not halted");
2094 return ERROR_TARGET_NOT_HALTED;
2097 if (breakpoint->set)
2099 LOG_WARNING("breakpoint already set");
2100 return ERROR_OK;
2103 if (breakpoint->type == BKPT_HARD)
2105 uint32_t value = breakpoint->address | 1;
2106 if (!xscale->ibcr0_used)
2108 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2109 xscale->ibcr0_used = 1;
2110 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2112 else if (!xscale->ibcr1_used)
2114 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2115 xscale->ibcr1_used = 1;
2116 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2118 else
2120 LOG_ERROR("BUG: no hardware comparator available");
2121 return ERROR_OK;
2124 else if (breakpoint->type == BKPT_SOFT)
2126 if (breakpoint->length == 4)
2128 /* keep the original instruction in target endianness */
2129 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2131 return retval;
2133 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2134 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2136 return retval;
2139 else
2141 /* keep the original instruction in target endianness */
2142 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2144 return retval;
2146 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2147 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2149 return retval;
2152 breakpoint->set = 1;
2155 return ERROR_OK;
2158 static int xscale_add_breakpoint(struct target *target,
2159 struct breakpoint *breakpoint)
2161 struct xscale_common *xscale = target_to_xscale(target);
2163 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2165 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2166 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2169 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2171 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2172 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2175 if (breakpoint->type == BKPT_HARD)
2177 xscale->ibcr_available--;
2180 return ERROR_OK;
2183 static int xscale_unset_breakpoint(struct target *target,
2184 struct breakpoint *breakpoint)
2186 int retval;
2187 struct xscale_common *xscale = target_to_xscale(target);
2189 if (target->state != TARGET_HALTED)
2191 LOG_WARNING("target not halted");
2192 return ERROR_TARGET_NOT_HALTED;
2195 if (!breakpoint->set)
2197 LOG_WARNING("breakpoint not set");
2198 return ERROR_OK;
2201 if (breakpoint->type == BKPT_HARD)
2203 if (breakpoint->set == 1)
2205 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2206 xscale->ibcr0_used = 0;
2208 else if (breakpoint->set == 2)
2210 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2211 xscale->ibcr1_used = 0;
2213 breakpoint->set = 0;
2215 else
2217 /* restore original instruction (kept in target endianness) */
2218 if (breakpoint->length == 4)
2220 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2222 return retval;
2225 else
2227 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2229 return retval;
2232 breakpoint->set = 0;
2235 return ERROR_OK;
2238 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2240 struct xscale_common *xscale = target_to_xscale(target);
2242 if (target->state != TARGET_HALTED)
2244 LOG_WARNING("target not halted");
2245 return ERROR_TARGET_NOT_HALTED;
2248 if (breakpoint->set)
2250 xscale_unset_breakpoint(target, breakpoint);
2253 if (breakpoint->type == BKPT_HARD)
2254 xscale->ibcr_available++;
2256 return ERROR_OK;
2259 static int xscale_set_watchpoint(struct target *target,
2260 struct watchpoint *watchpoint)
2262 struct xscale_common *xscale = target_to_xscale(target);
2263 uint8_t enable = 0;
2264 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2265 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2267 if (target->state != TARGET_HALTED)
2269 LOG_WARNING("target not halted");
2270 return ERROR_TARGET_NOT_HALTED;
2273 xscale_get_reg(dbcon);
2275 switch (watchpoint->rw)
2277 case WPT_READ:
2278 enable = 0x3;
2279 break;
2280 case WPT_ACCESS:
2281 enable = 0x2;
2282 break;
2283 case WPT_WRITE:
2284 enable = 0x1;
2285 break;
2286 default:
2287 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2290 if (!xscale->dbr0_used)
2292 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2293 dbcon_value |= enable;
2294 xscale_set_reg_u32(dbcon, dbcon_value);
2295 watchpoint->set = 1;
2296 xscale->dbr0_used = 1;
2298 else if (!xscale->dbr1_used)
2300 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2301 dbcon_value |= enable << 2;
2302 xscale_set_reg_u32(dbcon, dbcon_value);
2303 watchpoint->set = 2;
2304 xscale->dbr1_used = 1;
2306 else
2308 LOG_ERROR("BUG: no hardware comparator available");
2309 return ERROR_OK;
2312 return ERROR_OK;
2315 static int xscale_add_watchpoint(struct target *target,
2316 struct watchpoint *watchpoint)
2318 struct xscale_common *xscale = target_to_xscale(target);
2320 if (xscale->dbr_available < 1)
2322 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2325 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2327 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2330 xscale->dbr_available--;
2332 return ERROR_OK;
2335 static int xscale_unset_watchpoint(struct target *target,
2336 struct watchpoint *watchpoint)
2338 struct xscale_common *xscale = target_to_xscale(target);
2339 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2340 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2342 if (target->state != TARGET_HALTED)
2344 LOG_WARNING("target not halted");
2345 return ERROR_TARGET_NOT_HALTED;
2348 if (!watchpoint->set)
2350 LOG_WARNING("breakpoint not set");
2351 return ERROR_OK;
2354 if (watchpoint->set == 1)
2356 dbcon_value &= ~0x3;
2357 xscale_set_reg_u32(dbcon, dbcon_value);
2358 xscale->dbr0_used = 0;
2360 else if (watchpoint->set == 2)
2362 dbcon_value &= ~0xc;
2363 xscale_set_reg_u32(dbcon, dbcon_value);
2364 xscale->dbr1_used = 0;
2366 watchpoint->set = 0;
2368 return ERROR_OK;
2371 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2373 struct xscale_common *xscale = target_to_xscale(target);
2375 if (target->state != TARGET_HALTED)
2377 LOG_WARNING("target not halted");
2378 return ERROR_TARGET_NOT_HALTED;
2381 if (watchpoint->set)
2383 xscale_unset_watchpoint(target, watchpoint);
2386 xscale->dbr_available++;
2388 return ERROR_OK;
2391 static int xscale_get_reg(struct reg *reg)
2393 struct xscale_reg *arch_info = reg->arch_info;
2394 struct target *target = arch_info->target;
2395 struct xscale_common *xscale = target_to_xscale(target);
2397 /* DCSR, TX and RX are accessible via JTAG */
2398 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2400 return xscale_read_dcsr(arch_info->target);
2402 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2404 /* 1 = consume register content */
2405 return xscale_read_tx(arch_info->target, 1);
2407 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2409 /* can't read from RX register (host -> debug handler) */
2410 return ERROR_OK;
2412 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2414 /* can't (explicitly) read from TXRXCTRL register */
2415 return ERROR_OK;
2417 else /* Other DBG registers have to be transfered by the debug handler */
2419 /* send CP read request (command 0x40) */
2420 xscale_send_u32(target, 0x40);
2422 /* send CP register number */
2423 xscale_send_u32(target, arch_info->dbg_handler_number);
2425 /* read register value */
2426 xscale_read_tx(target, 1);
2427 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2429 reg->dirty = 0;
2430 reg->valid = 1;
2433 return ERROR_OK;
2436 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2438 struct xscale_reg *arch_info = reg->arch_info;
2439 struct target *target = arch_info->target;
2440 struct xscale_common *xscale = target_to_xscale(target);
2441 uint32_t value = buf_get_u32(buf, 0, 32);
2443 /* DCSR, TX and RX are accessible via JTAG */
2444 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2446 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2447 return xscale_write_dcsr(arch_info->target, -1, -1);
2449 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2451 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2452 return xscale_write_rx(arch_info->target);
2454 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2456 /* can't write to TX register (debug-handler -> host) */
2457 return ERROR_OK;
2459 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2461 /* can't (explicitly) write to TXRXCTRL register */
2462 return ERROR_OK;
2464 else /* Other DBG registers have to be transfered by the debug handler */
2466 /* send CP write request (command 0x41) */
2467 xscale_send_u32(target, 0x41);
2469 /* send CP register number */
2470 xscale_send_u32(target, arch_info->dbg_handler_number);
2472 /* send CP register value */
2473 xscale_send_u32(target, value);
2474 buf_set_u32(reg->value, 0, 32, value);
2477 return ERROR_OK;
2480 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2482 struct xscale_common *xscale = target_to_xscale(target);
2483 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2484 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2486 /* send CP write request (command 0x41) */
2487 xscale_send_u32(target, 0x41);
2489 /* send CP register number */
2490 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2492 /* send CP register value */
2493 xscale_send_u32(target, value);
2494 buf_set_u32(dcsr->value, 0, 32, value);
2496 return ERROR_OK;
2499 static int xscale_read_trace(struct target *target)
2501 struct xscale_common *xscale = target_to_xscale(target);
2502 struct arm *armv4_5 = &xscale->armv4_5_common;
2503 struct xscale_trace_data **trace_data_p;
2505 /* 258 words from debug handler
2506 * 256 trace buffer entries
2507 * 2 checkpoint addresses
2509 uint32_t trace_buffer[258];
2510 int is_address[256];
2511 int i, j;
2513 if (target->state != TARGET_HALTED)
2515 LOG_WARNING("target must be stopped to read trace data");
2516 return ERROR_TARGET_NOT_HALTED;
2519 /* send read trace buffer command (command 0x61) */
2520 xscale_send_u32(target, 0x61);
2522 /* receive trace buffer content */
2523 xscale_receive(target, trace_buffer, 258);
2525 /* parse buffer backwards to identify address entries */
2526 for (i = 255; i >= 0; i--)
2528 is_address[i] = 0;
2529 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2530 ((trace_buffer[i] & 0xf0) == 0xd0))
2532 if (i >= 3)
2533 is_address[--i] = 1;
2534 if (i >= 2)
2535 is_address[--i] = 1;
2536 if (i >= 1)
2537 is_address[--i] = 1;
2538 if (i >= 0)
2539 is_address[--i] = 1;
2544 /* search first non-zero entry */
2545 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2548 if (j == 256)
2550 LOG_DEBUG("no trace data collected");
2551 return ERROR_XSCALE_NO_TRACE_DATA;
2554 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2557 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2558 (*trace_data_p)->next = NULL;
2559 (*trace_data_p)->chkpt0 = trace_buffer[256];
2560 (*trace_data_p)->chkpt1 = trace_buffer[257];
2561 (*trace_data_p)->last_instruction =
2562 buf_get_u32(armv4_5->pc->value, 0, 32);
2563 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2564 (*trace_data_p)->depth = 256 - j;
2566 for (i = j; i < 256; i++)
2568 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2569 if (is_address[i])
2570 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2571 else
2572 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2575 return ERROR_OK;
2578 static int xscale_read_instruction(struct target *target,
2579 struct arm_instruction *instruction)
2581 struct xscale_common *xscale = target_to_xscale(target);
2582 int i;
2583 int section = -1;
2584 size_t size_read;
2585 uint32_t opcode;
2586 int retval;
2588 if (!xscale->trace.image)
2589 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2591 /* search for the section the current instruction belongs to */
2592 for (i = 0; i < xscale->trace.image->num_sections; i++)
2594 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2595 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2597 section = i;
2598 break;
2602 if (section == -1)
2604 /* current instruction couldn't be found in the image */
2605 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2608 if (xscale->trace.core_state == ARM_STATE_ARM)
2610 uint8_t buf[4];
2611 if ((retval = image_read_section(xscale->trace.image, section,
2612 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2613 4, buf, &size_read)) != ERROR_OK)
2615 LOG_ERROR("error while reading instruction: %i", retval);
2616 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2618 opcode = target_buffer_get_u32(target, buf);
2619 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2621 else if (xscale->trace.core_state == ARM_STATE_THUMB)
2623 uint8_t buf[2];
2624 if ((retval = image_read_section(xscale->trace.image, section,
2625 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2626 2, buf, &size_read)) != ERROR_OK)
2628 LOG_ERROR("error while reading instruction: %i", retval);
2629 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2631 opcode = target_buffer_get_u16(target, buf);
2632 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2634 else
2636 LOG_ERROR("BUG: unknown core state encountered");
2637 exit(-1);
2640 return ERROR_OK;
2643 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2644 int i, uint32_t *target)
2646 /* if there are less than four entries prior to the indirect branch message
2647 * we can't extract the address */
2648 if (i < 4)
2650 return -1;
2653 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2654 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2656 return 0;
2659 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2661 struct xscale_common *xscale = target_to_xscale(target);
2662 int next_pc_ok = 0;
2663 uint32_t next_pc = 0x0;
2664 struct xscale_trace_data *trace_data = xscale->trace.data;
2665 int retval;
2667 while (trace_data)
2669 int i, chkpt;
2670 int rollover;
2671 int branch;
2672 int exception;
2673 xscale->trace.core_state = ARM_STATE_ARM;
2675 chkpt = 0;
2676 rollover = 0;
2678 for (i = 0; i < trace_data->depth; i++)
2680 next_pc_ok = 0;
2681 branch = 0;
2682 exception = 0;
2684 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2685 continue;
2687 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2689 case 0: /* Exceptions */
2690 case 1:
2691 case 2:
2692 case 3:
2693 case 4:
2694 case 5:
2695 case 6:
2696 case 7:
2697 exception = (trace_data->entries[i].data & 0x70) >> 4;
2698 next_pc_ok = 1;
2699 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2700 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2701 break;
2702 case 8: /* Direct Branch */
2703 branch = 1;
2704 break;
2705 case 9: /* Indirect Branch */
2706 branch = 1;
2707 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2709 next_pc_ok = 1;
2711 break;
2712 case 13: /* Checkpointed Indirect Branch */
2713 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2715 next_pc_ok = 1;
2716 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2717 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2718 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2720 /* explicit fall-through */
2721 case 12: /* Checkpointed Direct Branch */
2722 branch = 1;
2723 if (chkpt == 0)
2725 next_pc_ok = 1;
2726 next_pc = trace_data->chkpt0;
2727 chkpt++;
2729 else if (chkpt == 1)
2731 next_pc_ok = 1;
2732 next_pc = trace_data->chkpt0;
2733 chkpt++;
2735 else
2737 LOG_WARNING("more than two checkpointed branches encountered");
2739 break;
2740 case 15: /* Roll-over */
2741 rollover++;
2742 continue;
2743 default: /* Reserved */
2744 command_print(cmd_ctx, "--- reserved trace message ---");
2745 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2746 return ERROR_OK;
2749 if (xscale->trace.pc_ok)
2751 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2752 struct arm_instruction instruction;
2754 if ((exception == 6) || (exception == 7))
2756 /* IRQ or FIQ exception, no instruction executed */
2757 executed -= 1;
2760 while (executed-- >= 0)
2762 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2764 /* can't continue tracing with no image available */
2765 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2767 return retval;
2769 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2771 /* TODO: handle incomplete images */
2775 /* a precise abort on a load to the PC is included in the incremental
2776 * word count, other instructions causing data aborts are not included
2778 if ((executed == 0) && (exception == 4)
2779 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2781 if ((instruction.type == ARM_LDM)
2782 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2784 executed--;
2786 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2787 && (instruction.info.load_store.Rd != 15))
2789 executed--;
2793 /* only the last instruction executed
2794 * (the one that caused the control flow change)
2795 * could be a taken branch
2797 if (((executed == -1) && (branch == 1)) &&
2798 (((instruction.type == ARM_B) ||
2799 (instruction.type == ARM_BL) ||
2800 (instruction.type == ARM_BLX)) &&
2801 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2803 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2805 else
2807 xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2;
2809 command_print(cmd_ctx, "%s", instruction.text);
2812 rollover = 0;
2815 if (next_pc_ok)
2817 xscale->trace.current_pc = next_pc;
2818 xscale->trace.pc_ok = 1;
2822 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2)
2824 struct arm_instruction instruction;
2825 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2827 /* can't continue tracing with no image available */
2828 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2830 return retval;
2832 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2834 /* TODO: handle incomplete images */
2837 command_print(cmd_ctx, "%s", instruction.text);
2840 trace_data = trace_data->next;
2843 return ERROR_OK;
2846 static const struct reg_arch_type xscale_reg_type = {
2847 .get = xscale_get_reg,
2848 .set = xscale_set_reg,
2851 static void xscale_build_reg_cache(struct target *target)
2853 struct xscale_common *xscale = target_to_xscale(target);
2854 struct arm *armv4_5 = &xscale->armv4_5_common;
2855 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2856 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2857 int i;
2858 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2860 (*cache_p) = arm_build_reg_cache(target, armv4_5);
2862 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2863 cache_p = &(*cache_p)->next;
2865 /* fill in values for the xscale reg cache */
2866 (*cache_p)->name = "XScale registers";
2867 (*cache_p)->next = NULL;
2868 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2869 (*cache_p)->num_regs = num_regs;
2871 for (i = 0; i < num_regs; i++)
2873 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2874 (*cache_p)->reg_list[i].value = calloc(4, 1);
2875 (*cache_p)->reg_list[i].dirty = 0;
2876 (*cache_p)->reg_list[i].valid = 0;
2877 (*cache_p)->reg_list[i].size = 32;
2878 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2879 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2880 arch_info[i] = xscale_reg_arch_info[i];
2881 arch_info[i].target = target;
2884 xscale->reg_cache = (*cache_p);
2887 static int xscale_init_target(struct command_context *cmd_ctx,
2888 struct target *target)
2890 xscale_build_reg_cache(target);
2891 return ERROR_OK;
2894 static int xscale_init_arch_info(struct target *target,
2895 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2897 struct arm *armv4_5;
2898 uint32_t high_reset_branch, low_reset_branch;
2899 int i;
2901 armv4_5 = &xscale->armv4_5_common;
2903 /* store architecture specfic data */
2904 xscale->common_magic = XSCALE_COMMON_MAGIC;
2906 /* we don't really *need* a variant param ... */
2907 if (variant) {
2908 int ir_length = 0;
2910 if (strcmp(variant, "pxa250") == 0
2911 || strcmp(variant, "pxa255") == 0
2912 || strcmp(variant, "pxa26x") == 0)
2913 ir_length = 5;
2914 else if (strcmp(variant, "pxa27x") == 0
2915 || strcmp(variant, "ixp42x") == 0
2916 || strcmp(variant, "ixp45x") == 0
2917 || strcmp(variant, "ixp46x") == 0)
2918 ir_length = 7;
2919 else if (strcmp(variant, "pxa3xx") == 0)
2920 ir_length = 11;
2921 else
2922 LOG_WARNING("%s: unrecognized variant %s",
2923 tap->dotted_name, variant);
2925 if (ir_length && ir_length != tap->ir_length) {
2926 LOG_WARNING("%s: IR length for %s is %d; fixing",
2927 tap->dotted_name, variant, ir_length);
2928 tap->ir_length = ir_length;
2932 /* PXA3xx shifts the JTAG instructions */
2933 if (tap->ir_length == 11)
2934 xscale->xscale_variant = XSCALE_PXA3XX;
2935 else
2936 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2938 /* the debug handler isn't installed (and thus not running) at this time */
2939 xscale->handler_address = 0xfe000800;
2941 /* clear the vectors we keep locally for reference */
2942 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2943 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2945 /* no user-specified vectors have been configured yet */
2946 xscale->static_low_vectors_set = 0x0;
2947 xscale->static_high_vectors_set = 0x0;
2949 /* calculate branches to debug handler */
2950 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2951 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2953 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2954 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2956 for (i = 1; i <= 7; i++)
2958 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2959 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2962 /* 64kB aligned region used for DCache cleaning */
2963 xscale->cache_clean_address = 0xfffe0000;
2965 xscale->hold_rst = 0;
2966 xscale->external_debug_break = 0;
2968 xscale->ibcr_available = 2;
2969 xscale->ibcr0_used = 0;
2970 xscale->ibcr1_used = 0;
2972 xscale->dbr_available = 2;
2973 xscale->dbr0_used = 0;
2974 xscale->dbr1_used = 0;
2976 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
2977 target_name(target));
2979 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2980 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2982 xscale->vector_catch = 0x1;
2984 xscale->trace.capture_status = TRACE_IDLE;
2985 xscale->trace.data = NULL;
2986 xscale->trace.image = NULL;
2987 xscale->trace.buffer_enabled = 0;
2988 xscale->trace.buffer_fill = 0;
2990 /* prepare ARMv4/5 specific information */
2991 armv4_5->arch_info = xscale;
2992 armv4_5->read_core_reg = xscale_read_core_reg;
2993 armv4_5->write_core_reg = xscale_write_core_reg;
2994 armv4_5->full_context = xscale_full_context;
2996 arm_init_arch_info(target, armv4_5);
2998 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2999 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3000 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3001 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3002 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3003 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3004 xscale->armv4_5_mmu.has_tiny_pages = 1;
3005 xscale->armv4_5_mmu.mmu_enabled = 0;
3007 return ERROR_OK;
3010 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3012 struct xscale_common *xscale;
3014 if (sizeof xscale_debug_handler - 1 > 0x800) {
3015 LOG_ERROR("debug_handler.bin: larger than 2kb");
3016 return ERROR_FAIL;
3019 xscale = calloc(1, sizeof(*xscale));
3020 if (!xscale)
3021 return ERROR_FAIL;
3023 return xscale_init_arch_info(target, xscale, target->tap,
3024 target->variant);
3027 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3029 struct target *target = NULL;
3030 struct xscale_common *xscale;
3031 int retval;
3032 uint32_t handler_address;
3034 if (CMD_ARGC < 2)
3036 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3037 return ERROR_OK;
3040 if ((target = get_target(CMD_ARGV[0])) == NULL)
3042 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3043 return ERROR_FAIL;
3046 xscale = target_to_xscale(target);
3047 retval = xscale_verify_pointer(CMD_CTX, xscale);
3048 if (retval != ERROR_OK)
3049 return retval;
3051 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3053 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3054 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3056 xscale->handler_address = handler_address;
3058 else
3060 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3061 return ERROR_FAIL;
3064 return ERROR_OK;
3067 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3069 struct target *target = NULL;
3070 struct xscale_common *xscale;
3071 int retval;
3072 uint32_t cache_clean_address;
3074 if (CMD_ARGC < 2)
3076 return ERROR_COMMAND_SYNTAX_ERROR;
3079 target = get_target(CMD_ARGV[0]);
3080 if (target == NULL)
3082 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3083 return ERROR_FAIL;
3085 xscale = target_to_xscale(target);
3086 retval = xscale_verify_pointer(CMD_CTX, xscale);
3087 if (retval != ERROR_OK)
3088 return retval;
3090 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3092 if (cache_clean_address & 0xffff)
3094 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3096 else
3098 xscale->cache_clean_address = cache_clean_address;
3101 return ERROR_OK;
3104 COMMAND_HANDLER(xscale_handle_cache_info_command)
3106 struct target *target = get_current_target(CMD_CTX);
3107 struct xscale_common *xscale = target_to_xscale(target);
3108 int retval;
3110 retval = xscale_verify_pointer(CMD_CTX, xscale);
3111 if (retval != ERROR_OK)
3112 return retval;
3114 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3117 static int xscale_virt2phys(struct target *target,
3118 uint32_t virtual, uint32_t *physical)
3120 struct xscale_common *xscale = target_to_xscale(target);
3121 int type;
3122 uint32_t cb;
3123 int domain;
3124 uint32_t ap;
3126 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3127 LOG_ERROR(xscale_not);
3128 return ERROR_TARGET_INVALID;
3131 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3132 if (type == -1)
3134 return ret;
3136 *physical = ret;
3137 return ERROR_OK;
3140 static int xscale_mmu(struct target *target, int *enabled)
3142 struct xscale_common *xscale = target_to_xscale(target);
3144 if (target->state != TARGET_HALTED)
3146 LOG_ERROR("Target not halted");
3147 return ERROR_TARGET_INVALID;
3149 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3150 return ERROR_OK;
3153 COMMAND_HANDLER(xscale_handle_mmu_command)
3155 struct target *target = get_current_target(CMD_CTX);
3156 struct xscale_common *xscale = target_to_xscale(target);
3157 int retval;
3159 retval = xscale_verify_pointer(CMD_CTX, xscale);
3160 if (retval != ERROR_OK)
3161 return retval;
3163 if (target->state != TARGET_HALTED)
3165 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3166 return ERROR_OK;
3169 if (CMD_ARGC >= 1)
3171 bool enable;
3172 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3173 if (enable)
3174 xscale_enable_mmu_caches(target, 1, 0, 0);
3175 else
3176 xscale_disable_mmu_caches(target, 1, 0, 0);
3177 xscale->armv4_5_mmu.mmu_enabled = enable;
3180 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3182 return ERROR_OK;
3185 COMMAND_HANDLER(xscale_handle_idcache_command)
3187 struct target *target = get_current_target(CMD_CTX);
3188 struct xscale_common *xscale = target_to_xscale(target);
3190 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3191 if (retval != ERROR_OK)
3192 return retval;
3194 if (target->state != TARGET_HALTED)
3196 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3197 return ERROR_OK;
3200 bool icache = false;
3201 if (strcmp(CMD_NAME, "icache") == 0)
3202 icache = true;
3203 if (CMD_ARGC >= 1)
3205 bool enable;
3206 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3207 if (enable)
3208 xscale_enable_mmu_caches(target, 1, 0, 0);
3209 else
3210 xscale_disable_mmu_caches(target, 1, 0, 0);
3211 if (icache)
3212 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3213 else
3214 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3217 bool enabled = icache ?
3218 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3219 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3220 const char *msg = enabled ? "enabled" : "disabled";
3221 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3223 return ERROR_OK;
3226 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3228 struct target *target = get_current_target(CMD_CTX);
3229 struct xscale_common *xscale = target_to_xscale(target);
3230 int retval;
3232 retval = xscale_verify_pointer(CMD_CTX, xscale);
3233 if (retval != ERROR_OK)
3234 return retval;
3236 if (CMD_ARGC < 1)
3238 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3240 else
3242 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3243 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3244 xscale_write_dcsr(target, -1, -1);
3247 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3249 return ERROR_OK;
3253 COMMAND_HANDLER(xscale_handle_vector_table_command)
3255 struct target *target = get_current_target(CMD_CTX);
3256 struct xscale_common *xscale = target_to_xscale(target);
3257 int err = 0;
3258 int retval;
3260 retval = xscale_verify_pointer(CMD_CTX, xscale);
3261 if (retval != ERROR_OK)
3262 return retval;
3264 if (CMD_ARGC == 0) /* print current settings */
3266 int idx;
3268 command_print(CMD_CTX, "active user-set static vectors:");
3269 for (idx = 1; idx < 8; idx++)
3270 if (xscale->static_low_vectors_set & (1 << idx))
3271 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3272 for (idx = 1; idx < 8; idx++)
3273 if (xscale->static_high_vectors_set & (1 << idx))
3274 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3275 return ERROR_OK;
3278 if (CMD_ARGC != 3)
3279 err = 1;
3280 else
3282 int idx;
3283 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3284 uint32_t vec;
3285 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3287 if (idx < 1 || idx >= 8)
3288 err = 1;
3290 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3292 xscale->static_low_vectors_set |= (1<<idx);
3293 xscale->static_low_vectors[idx] = vec;
3295 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3297 xscale->static_high_vectors_set |= (1<<idx);
3298 xscale->static_high_vectors[idx] = vec;
3300 else
3301 err = 1;
3304 if (err)
3305 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3307 return ERROR_OK;
3311 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3313 struct target *target = get_current_target(CMD_CTX);
3314 struct xscale_common *xscale = target_to_xscale(target);
3315 struct arm *armv4_5 = &xscale->armv4_5_common;
3316 uint32_t dcsr_value;
3317 int retval;
3319 retval = xscale_verify_pointer(CMD_CTX, xscale);
3320 if (retval != ERROR_OK)
3321 return retval;
3323 if (target->state != TARGET_HALTED)
3325 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3326 return ERROR_OK;
3329 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3331 struct xscale_trace_data *td, *next_td;
3332 xscale->trace.buffer_enabled = 1;
3334 /* free old trace data */
3335 td = xscale->trace.data;
3336 while (td)
3338 next_td = td->next;
3340 if (td->entries)
3341 free(td->entries);
3342 free(td);
3343 td = next_td;
3345 xscale->trace.data = NULL;
3347 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3349 xscale->trace.buffer_enabled = 0;
3352 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3354 uint32_t fill = 1;
3355 if (CMD_ARGC >= 3)
3356 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3357 xscale->trace.buffer_fill = fill;
3359 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3361 xscale->trace.buffer_fill = -1;
3364 if (xscale->trace.buffer_enabled)
3366 /* if we enable the trace buffer in fill-once
3367 * mode we know the address of the first instruction */
3368 xscale->trace.pc_ok = 1;
3369 xscale->trace.current_pc =
3370 buf_get_u32(armv4_5->pc->value, 0, 32);
3372 else
3374 /* otherwise the address is unknown, and we have no known good PC */
3375 xscale->trace.pc_ok = 0;
3378 command_print(CMD_CTX, "trace buffer %s (%s)",
3379 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3380 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3382 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3383 if (xscale->trace.buffer_fill >= 0)
3384 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3385 else
3386 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3388 return ERROR_OK;
3391 COMMAND_HANDLER(xscale_handle_trace_image_command)
3393 struct target *target = get_current_target(CMD_CTX);
3394 struct xscale_common *xscale = target_to_xscale(target);
3395 int retval;
3397 if (CMD_ARGC < 1)
3399 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3400 return ERROR_OK;
3403 retval = xscale_verify_pointer(CMD_CTX, xscale);
3404 if (retval != ERROR_OK)
3405 return retval;
3407 if (xscale->trace.image)
3409 image_close(xscale->trace.image);
3410 free(xscale->trace.image);
3411 command_print(CMD_CTX, "previously loaded image found and closed");
3414 xscale->trace.image = malloc(sizeof(struct image));
3415 xscale->trace.image->base_address_set = 0;
3416 xscale->trace.image->start_address_set = 0;
3418 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3419 if (CMD_ARGC >= 2)
3421 xscale->trace.image->base_address_set = 1;
3422 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], xscale->trace.image->base_address);
3424 else
3426 xscale->trace.image->base_address_set = 0;
3429 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3431 free(xscale->trace.image);
3432 xscale->trace.image = NULL;
3433 return ERROR_OK;
3436 return ERROR_OK;
3439 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3441 struct target *target = get_current_target(CMD_CTX);
3442 struct xscale_common *xscale = target_to_xscale(target);
3443 struct xscale_trace_data *trace_data;
3444 struct fileio file;
3445 int retval;
3447 retval = xscale_verify_pointer(CMD_CTX, xscale);
3448 if (retval != ERROR_OK)
3449 return retval;
3451 if (target->state != TARGET_HALTED)
3453 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3454 return ERROR_OK;
3457 if (CMD_ARGC < 1)
3459 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3460 return ERROR_OK;
3463 trace_data = xscale->trace.data;
3465 if (!trace_data)
3467 command_print(CMD_CTX, "no trace data collected");
3468 return ERROR_OK;
3471 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3473 return ERROR_OK;
3476 while (trace_data)
3478 int i;
3480 fileio_write_u32(&file, trace_data->chkpt0);
3481 fileio_write_u32(&file, trace_data->chkpt1);
3482 fileio_write_u32(&file, trace_data->last_instruction);
3483 fileio_write_u32(&file, trace_data->depth);
3485 for (i = 0; i < trace_data->depth; i++)
3486 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3488 trace_data = trace_data->next;
3491 fileio_close(&file);
3493 return ERROR_OK;
3496 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3498 struct target *target = get_current_target(CMD_CTX);
3499 struct xscale_common *xscale = target_to_xscale(target);
3500 int retval;
3502 retval = xscale_verify_pointer(CMD_CTX, xscale);
3503 if (retval != ERROR_OK)
3504 return retval;
3506 xscale_analyze_trace(target, CMD_CTX);
3508 return ERROR_OK;
3511 COMMAND_HANDLER(xscale_handle_cp15)
3513 struct target *target = get_current_target(CMD_CTX);
3514 struct xscale_common *xscale = target_to_xscale(target);
3515 int retval;
3517 retval = xscale_verify_pointer(CMD_CTX, xscale);
3518 if (retval != ERROR_OK)
3519 return retval;
3521 if (target->state != TARGET_HALTED)
3523 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3524 return ERROR_OK;
3526 uint32_t reg_no = 0;
3527 struct reg *reg = NULL;
3528 if (CMD_ARGC > 0)
3530 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3531 /*translate from xscale cp15 register no to openocd register*/
3532 switch (reg_no)
3534 case 0:
3535 reg_no = XSCALE_MAINID;
3536 break;
3537 case 1:
3538 reg_no = XSCALE_CTRL;
3539 break;
3540 case 2:
3541 reg_no = XSCALE_TTB;
3542 break;
3543 case 3:
3544 reg_no = XSCALE_DAC;
3545 break;
3546 case 5:
3547 reg_no = XSCALE_FSR;
3548 break;
3549 case 6:
3550 reg_no = XSCALE_FAR;
3551 break;
3552 case 13:
3553 reg_no = XSCALE_PID;
3554 break;
3555 case 15:
3556 reg_no = XSCALE_CPACCESS;
3557 break;
3558 default:
3559 command_print(CMD_CTX, "invalid register number");
3560 return ERROR_INVALID_ARGUMENTS;
3562 reg = &xscale->reg_cache->reg_list[reg_no];
3565 if (CMD_ARGC == 1)
3567 uint32_t value;
3569 /* read cp15 control register */
3570 xscale_get_reg(reg);
3571 value = buf_get_u32(reg->value, 0, 32);
3572 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3574 else if (CMD_ARGC == 2)
3576 uint32_t value;
3577 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3579 /* send CP write request (command 0x41) */
3580 xscale_send_u32(target, 0x41);
3582 /* send CP register number */
3583 xscale_send_u32(target, reg_no);
3585 /* send CP register value */
3586 xscale_send_u32(target, value);
3588 /* execute cpwait to ensure outstanding operations complete */
3589 xscale_send_u32(target, 0x53);
3591 else
3593 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3596 return ERROR_OK;
3599 static const struct command_registration xscale_exec_command_handlers[] = {
3601 .name = "cache_info",
3602 .handler = xscale_handle_cache_info_command,
3603 .mode = COMMAND_EXEC,
3604 .help = "display information about CPU caches",
3607 .name = "mmu",
3608 .handler = xscale_handle_mmu_command,
3609 .mode = COMMAND_EXEC,
3610 .help = "enable or disable the MMU",
3611 .usage = "['enable'|'disable']",
3614 .name = "icache",
3615 .handler = xscale_handle_idcache_command,
3616 .mode = COMMAND_EXEC,
3617 .help = "display ICache state, optionally enabling or "
3618 "disabling it",
3619 .usage = "['enable'|'disable']",
3622 .name = "dcache",
3623 .handler = xscale_handle_idcache_command,
3624 .mode = COMMAND_EXEC,
3625 .help = "display DCache state, optionally enabling or "
3626 "disabling it",
3627 .usage = "['enable'|'disable']",
3630 .name = "vector_catch",
3631 .handler = xscale_handle_vector_catch_command,
3632 .mode = COMMAND_EXEC,
3633 .help = "set or display 8-bit mask of vectors "
3634 "that should trigger debug entry",
3635 .usage = "[mask]",
3638 .name = "vector_table",
3639 .handler = xscale_handle_vector_table_command,
3640 .mode = COMMAND_EXEC,
3641 .help = "set vector table entry in mini-ICache, "
3642 "or display current tables",
3643 .usage = "[('high'|'low') index code]",
3646 .name = "trace_buffer",
3647 .handler = xscale_handle_trace_buffer_command,
3648 .mode = COMMAND_EXEC,
3649 .help = "display trace buffer status, enable or disable "
3650 "tracing, and optionally reconfigure trace mode",
3651 .usage = "['enable'|'disable' ['fill' number|'wrap']]",
3654 .name = "dump_trace",
3655 .handler = xscale_handle_dump_trace_command,
3656 .mode = COMMAND_EXEC,
3657 .help = "dump content of trace buffer to file",
3658 .usage = "filename",
3661 .name = "analyze_trace",
3662 .handler = xscale_handle_analyze_trace_buffer_command,
3663 .mode = COMMAND_EXEC,
3664 .help = "analyze content of trace buffer",
3665 .usage = "",
3668 .name = "trace_image",
3669 .handler = xscale_handle_trace_image_command,
3670 .mode = COMMAND_EXEC,
3671 .help = "load image from file to address (default 0)",
3672 .usage = "filename [offset [filetype]]",
3675 .name = "cp15",
3676 .handler = xscale_handle_cp15,
3677 .mode = COMMAND_EXEC,
3678 .help = "Read or write coprocessor 15 register.",
3679 .usage = "register [value]",
3681 COMMAND_REGISTRATION_DONE
3683 static const struct command_registration xscale_any_command_handlers[] = {
3685 .name = "debug_handler",
3686 .handler = xscale_handle_debug_handler_command,
3687 .mode = COMMAND_ANY,
3688 .help = "Change address used for debug handler.",
3689 .usage = "target address",
3692 .name = "cache_clean_address",
3693 .handler = xscale_handle_cache_clean_address_command,
3694 .mode = COMMAND_ANY,
3695 .help = "Change address used for cleaning data cache.",
3696 .usage = "address",
3699 .chain = xscale_exec_command_handlers,
3701 COMMAND_REGISTRATION_DONE
3703 static const struct command_registration xscale_command_handlers[] = {
3705 .chain = arm_command_handlers,
3708 .name = "xscale",
3709 .mode = COMMAND_ANY,
3710 .help = "xscale command group",
3711 .chain = xscale_any_command_handlers,
3713 COMMAND_REGISTRATION_DONE
3716 struct target_type xscale_target =
3718 .name = "xscale",
3720 .poll = xscale_poll,
3721 .arch_state = xscale_arch_state,
3723 .target_request_data = NULL,
3725 .halt = xscale_halt,
3726 .resume = xscale_resume,
3727 .step = xscale_step,
3729 .assert_reset = xscale_assert_reset,
3730 .deassert_reset = xscale_deassert_reset,
3731 .soft_reset_halt = NULL,
3733 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3734 .get_gdb_reg_list = arm_get_gdb_reg_list,
3736 .read_memory = xscale_read_memory,
3737 .read_phys_memory = xscale_read_phys_memory,
3738 .write_memory = xscale_write_memory,
3739 .write_phys_memory = xscale_write_phys_memory,
3740 .bulk_write_memory = xscale_bulk_write_memory,
3742 .checksum_memory = arm_checksum_memory,
3743 .blank_check_memory = arm_blank_check_memory,
3745 .run_algorithm = armv4_5_run_algorithm,
3747 .add_breakpoint = xscale_add_breakpoint,
3748 .remove_breakpoint = xscale_remove_breakpoint,
3749 .add_watchpoint = xscale_add_watchpoint,
3750 .remove_watchpoint = xscale_remove_watchpoint,
3752 .commands = xscale_command_handlers,
3753 .target_create = xscale_target_create,
3754 .init_target = xscale_init_target,
3756 .virt2phys = xscale_virt2phys,
3757 .mmu = xscale_mmu