cleanup: rename armv4_5 to arm for readability
[openocd/andreasf.git] / src / target / xscale.c
blob6f00aea7e1c8b7aaad53bc880a1bb8727919c3e1
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
37 #include "register.h"
38 #include "image.h"
39 #include "arm_opcodes.h"
40 #include "armv4_5.h"
44 * Important XScale documents available as of October 2009 include:
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
59 * Chip-specific microarchitecture documents may also be useful.
63 /* forward declarations */
64 static int xscale_resume(struct target *, int current,
65 uint32_t address, int handle_breakpoints, int debug_execution);
66 static int xscale_debug_entry(struct target *);
67 static int xscale_restore_banked(struct target *);
68 static int xscale_get_reg(struct reg *reg);
69 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
70 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
72 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
73 static int xscale_read_trace(struct target *);
76 /* This XScale "debug handler" is loaded into the processor's
77 * mini-ICache, which is 2K of code writable only via JTAG.
79 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
80 * binary files cleanly. It's string oriented, and terminates them
81 * with a NUL character. Better would be to generate the constants
82 * and let other code decide names, scoping, and other housekeeping.
84 static /* unsigned const char xscale_debug_handler[] = ... */
85 #include "xscale_debug.h"
87 static char *const xscale_reg_list[] =
89 "XSCALE_MAINID", /* 0 */
90 "XSCALE_CACHETYPE",
91 "XSCALE_CTRL",
92 "XSCALE_AUXCTRL",
93 "XSCALE_TTB",
94 "XSCALE_DAC",
95 "XSCALE_FSR",
96 "XSCALE_FAR",
97 "XSCALE_PID",
98 "XSCALE_CPACCESS",
99 "XSCALE_IBCR0", /* 10 */
100 "XSCALE_IBCR1",
101 "XSCALE_DBR0",
102 "XSCALE_DBR1",
103 "XSCALE_DBCON",
104 "XSCALE_TBREG",
105 "XSCALE_CHKPT0",
106 "XSCALE_CHKPT1",
107 "XSCALE_DCSR",
108 "XSCALE_TX",
109 "XSCALE_RX", /* 20 */
110 "XSCALE_TXRXCTRL",
113 static const struct xscale_reg xscale_reg_arch_info[] =
115 {XSCALE_MAINID, NULL},
116 {XSCALE_CACHETYPE, NULL},
117 {XSCALE_CTRL, NULL},
118 {XSCALE_AUXCTRL, NULL},
119 {XSCALE_TTB, NULL},
120 {XSCALE_DAC, NULL},
121 {XSCALE_FSR, NULL},
122 {XSCALE_FAR, NULL},
123 {XSCALE_PID, NULL},
124 {XSCALE_CPACCESS, NULL},
125 {XSCALE_IBCR0, NULL},
126 {XSCALE_IBCR1, NULL},
127 {XSCALE_DBR0, NULL},
128 {XSCALE_DBR1, NULL},
129 {XSCALE_DBCON, NULL},
130 {XSCALE_TBREG, NULL},
131 {XSCALE_CHKPT0, NULL},
132 {XSCALE_CHKPT1, NULL},
133 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
134 {-1, NULL}, /* TX accessed via JTAG */
135 {-1, NULL}, /* RX accessed via JTAG */
136 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
139 /* convenience wrapper to access XScale specific registers */
140 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
142 uint8_t buf[4];
144 buf_set_u32(buf, 0, 32, value);
146 return xscale_set_reg(reg, buf);
149 static const char xscale_not[] = "target is not an XScale";
151 static int xscale_verify_pointer(struct command_context *cmd_ctx,
152 struct xscale_common *xscale)
154 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
155 command_print(cmd_ctx, xscale_not);
156 return ERROR_TARGET_INVALID;
158 return ERROR_OK;
161 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
163 assert (tap != NULL);
165 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
167 struct scan_field field;
168 uint8_t scratch[4];
170 memset(&field, 0, sizeof field);
171 field.num_bits = tap->ir_length;
172 field.out_value = scratch;
173 buf_set_u32(scratch, 0, field.num_bits, new_instr);
175 jtag_add_ir_scan(tap, &field, end_state);
178 return ERROR_OK;
181 static int xscale_read_dcsr(struct target *target)
183 struct xscale_common *xscale = target_to_xscale(target);
184 int retval;
185 struct scan_field fields[3];
186 uint8_t field0 = 0x0;
187 uint8_t field0_check_value = 0x2;
188 uint8_t field0_check_mask = 0x7;
189 uint8_t field2 = 0x0;
190 uint8_t field2_check_value = 0x0;
191 uint8_t field2_check_mask = 0x1;
193 xscale_jtag_set_instr(target->tap,
194 XSCALE_SELDCSR << xscale->xscale_variant,
195 TAP_DRPAUSE);
197 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
198 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
200 memset(&fields, 0, sizeof fields);
202 fields[0].num_bits = 3;
203 fields[0].out_value = &field0;
204 uint8_t tmp;
205 fields[0].in_value = &tmp;
207 fields[1].num_bits = 32;
208 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
210 fields[2].num_bits = 1;
211 fields[2].out_value = &field2;
212 uint8_t tmp2;
213 fields[2].in_value = &tmp2;
215 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
217 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
218 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
220 if ((retval = jtag_execute_queue()) != ERROR_OK)
222 LOG_ERROR("JTAG error while reading DCSR");
223 return retval;
226 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
227 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
229 /* write the register with the value we just read
230 * on this second pass, only the first bit of field0 is guaranteed to be 0)
232 field0_check_mask = 0x1;
233 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
234 fields[1].in_value = NULL;
236 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
238 /* DANGER!!! this must be here. It will make sure that the arguments
239 * to jtag_set_check_value() does not go out of scope! */
240 return jtag_execute_queue();
244 static void xscale_getbuf(jtag_callback_data_t arg)
246 uint8_t *in = (uint8_t *)arg;
247 *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
250 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
252 if (num_words == 0)
253 return ERROR_COMMAND_SYNTAX_ERROR;
255 struct xscale_common *xscale = target_to_xscale(target);
256 int retval = ERROR_OK;
257 tap_state_t path[3];
258 struct scan_field fields[3];
259 uint8_t *field0 = malloc(num_words * 1);
260 uint8_t field0_check_value = 0x2;
261 uint8_t field0_check_mask = 0x6;
262 uint32_t *field1 = malloc(num_words * 4);
263 uint8_t field2_check_value = 0x0;
264 uint8_t field2_check_mask = 0x1;
265 int words_done = 0;
266 int words_scheduled = 0;
267 int i;
269 path[0] = TAP_DRSELECT;
270 path[1] = TAP_DRCAPTURE;
271 path[2] = TAP_DRSHIFT;
273 memset(&fields, 0, sizeof fields);
275 fields[0].num_bits = 3;
276 uint8_t tmp;
277 fields[0].in_value = &tmp;
278 fields[0].check_value = &field0_check_value;
279 fields[0].check_mask = &field0_check_mask;
281 fields[1].num_bits = 32;
283 fields[2].num_bits = 1;
284 uint8_t tmp2;
285 fields[2].in_value = &tmp2;
286 fields[2].check_value = &field2_check_value;
287 fields[2].check_mask = &field2_check_mask;
289 xscale_jtag_set_instr(target->tap,
290 XSCALE_DBGTX << xscale->xscale_variant,
291 TAP_IDLE);
292 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
294 /* repeat until all words have been collected */
295 int attempts = 0;
296 while (words_done < num_words)
298 /* schedule reads */
299 words_scheduled = 0;
300 for (i = words_done; i < num_words; i++)
302 fields[0].in_value = &field0[i];
304 jtag_add_pathmove(3, path);
306 fields[1].in_value = (uint8_t *)(field1 + i);
308 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
310 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
312 words_scheduled++;
315 if ((retval = jtag_execute_queue()) != ERROR_OK)
317 LOG_ERROR("JTAG error while receiving data from debug handler");
318 break;
321 /* examine results */
322 for (i = words_done; i < num_words; i++)
324 if (!(field0[i] & 1))
326 /* move backwards if necessary */
327 int j;
328 for (j = i; j < num_words - 1; j++)
330 field0[j] = field0[j + 1];
331 field1[j] = field1[j + 1];
333 words_scheduled--;
336 if (words_scheduled == 0)
338 if (attempts++==1000)
340 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
341 retval = ERROR_TARGET_TIMEOUT;
342 break;
346 words_done += words_scheduled;
349 for (i = 0; i < num_words; i++)
350 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
352 free(field1);
354 return retval;
357 static int xscale_read_tx(struct target *target, int consume)
359 struct xscale_common *xscale = target_to_xscale(target);
360 tap_state_t path[3];
361 tap_state_t noconsume_path[6];
362 int retval;
363 struct timeval timeout, now;
364 struct scan_field fields[3];
365 uint8_t field0_in = 0x0;
366 uint8_t field0_check_value = 0x2;
367 uint8_t field0_check_mask = 0x6;
368 uint8_t field2_check_value = 0x0;
369 uint8_t field2_check_mask = 0x1;
371 xscale_jtag_set_instr(target->tap,
372 XSCALE_DBGTX << xscale->xscale_variant,
373 TAP_IDLE);
375 path[0] = TAP_DRSELECT;
376 path[1] = TAP_DRCAPTURE;
377 path[2] = TAP_DRSHIFT;
379 noconsume_path[0] = TAP_DRSELECT;
380 noconsume_path[1] = TAP_DRCAPTURE;
381 noconsume_path[2] = TAP_DREXIT1;
382 noconsume_path[3] = TAP_DRPAUSE;
383 noconsume_path[4] = TAP_DREXIT2;
384 noconsume_path[5] = TAP_DRSHIFT;
386 memset(&fields, 0, sizeof fields);
388 fields[0].num_bits = 3;
389 fields[0].in_value = &field0_in;
391 fields[1].num_bits = 32;
392 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
394 fields[2].num_bits = 1;
395 uint8_t tmp;
396 fields[2].in_value = &tmp;
398 gettimeofday(&timeout, NULL);
399 timeval_add_time(&timeout, 1, 0);
401 for (;;)
403 /* if we want to consume the register content (i.e. clear TX_READY),
404 * we have to go straight from Capture-DR to Shift-DR
405 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
407 if (consume)
408 jtag_add_pathmove(3, path);
409 else
411 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
414 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
416 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
417 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
419 if ((retval = jtag_execute_queue()) != ERROR_OK)
421 LOG_ERROR("JTAG error while reading TX");
422 return ERROR_TARGET_TIMEOUT;
425 gettimeofday(&now, NULL);
426 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
428 LOG_ERROR("time out reading TX register");
429 return ERROR_TARGET_TIMEOUT;
431 if (!((!(field0_in & 1)) && consume))
433 goto done;
435 if (debug_level >= 3)
437 LOG_DEBUG("waiting 100ms");
438 alive_sleep(100); /* avoid flooding the logs */
439 } else
441 keep_alive();
444 done:
446 if (!(field0_in & 1))
447 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
449 return ERROR_OK;
452 static int xscale_write_rx(struct target *target)
454 struct xscale_common *xscale = target_to_xscale(target);
455 int retval;
456 struct timeval timeout, now;
457 struct scan_field fields[3];
458 uint8_t field0_out = 0x0;
459 uint8_t field0_in = 0x0;
460 uint8_t field0_check_value = 0x2;
461 uint8_t field0_check_mask = 0x6;
462 uint8_t field2 = 0x0;
463 uint8_t field2_check_value = 0x0;
464 uint8_t field2_check_mask = 0x1;
466 xscale_jtag_set_instr(target->tap,
467 XSCALE_DBGRX << xscale->xscale_variant,
468 TAP_IDLE);
470 memset(&fields, 0, sizeof fields);
472 fields[0].num_bits = 3;
473 fields[0].out_value = &field0_out;
474 fields[0].in_value = &field0_in;
476 fields[1].num_bits = 32;
477 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
479 fields[2].num_bits = 1;
480 fields[2].out_value = &field2;
481 uint8_t tmp;
482 fields[2].in_value = &tmp;
484 gettimeofday(&timeout, NULL);
485 timeval_add_time(&timeout, 1, 0);
487 /* poll until rx_read is low */
488 LOG_DEBUG("polling RX");
489 for (;;)
491 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
493 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
494 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
496 if ((retval = jtag_execute_queue()) != ERROR_OK)
498 LOG_ERROR("JTAG error while writing RX");
499 return retval;
502 gettimeofday(&now, NULL);
503 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
505 LOG_ERROR("time out writing RX register");
506 return ERROR_TARGET_TIMEOUT;
508 if (!(field0_in & 1))
509 goto done;
510 if (debug_level >= 3)
512 LOG_DEBUG("waiting 100ms");
513 alive_sleep(100); /* avoid flooding the logs */
514 } else
516 keep_alive();
519 done:
521 /* set rx_valid */
522 field2 = 0x1;
523 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
525 if ((retval = jtag_execute_queue()) != ERROR_OK)
527 LOG_ERROR("JTAG error while writing RX");
528 return retval;
531 return ERROR_OK;
534 /* send count elements of size byte to the debug handler */
535 static int xscale_send(struct target *target, const uint8_t *buffer, int count, int size)
537 struct xscale_common *xscale = target_to_xscale(target);
538 uint32_t t[3];
539 int bits[3];
540 int retval;
541 int done_count = 0;
543 xscale_jtag_set_instr(target->tap,
544 XSCALE_DBGRX << xscale->xscale_variant,
545 TAP_IDLE);
547 bits[0]=3;
548 t[0]=0;
549 bits[1]=32;
550 t[2]=1;
551 bits[2]=1;
552 int endianness = target->endianness;
553 while (done_count++ < count)
555 switch (size)
557 case 4:
558 if (endianness == TARGET_LITTLE_ENDIAN)
560 t[1]=le_to_h_u32(buffer);
561 } else
563 t[1]=be_to_h_u32(buffer);
565 break;
566 case 2:
567 if (endianness == TARGET_LITTLE_ENDIAN)
569 t[1]=le_to_h_u16(buffer);
570 } else
572 t[1]=be_to_h_u16(buffer);
574 break;
575 case 1:
576 t[1]=buffer[0];
577 break;
578 default:
579 LOG_ERROR("BUG: size neither 4, 2 nor 1");
580 return ERROR_COMMAND_SYNTAX_ERROR;
582 jtag_add_dr_out(target->tap,
584 bits,
586 TAP_IDLE);
587 buffer += size;
590 if ((retval = jtag_execute_queue()) != ERROR_OK)
592 LOG_ERROR("JTAG error while sending data to debug handler");
593 return retval;
596 return ERROR_OK;
599 static int xscale_send_u32(struct target *target, uint32_t value)
601 struct xscale_common *xscale = target_to_xscale(target);
603 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
604 return xscale_write_rx(target);
607 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
609 struct xscale_common *xscale = target_to_xscale(target);
610 int retval;
611 struct scan_field fields[3];
612 uint8_t field0 = 0x0;
613 uint8_t field0_check_value = 0x2;
614 uint8_t field0_check_mask = 0x7;
615 uint8_t field2 = 0x0;
616 uint8_t field2_check_value = 0x0;
617 uint8_t field2_check_mask = 0x1;
619 if (hold_rst != -1)
620 xscale->hold_rst = hold_rst;
622 if (ext_dbg_brk != -1)
623 xscale->external_debug_break = ext_dbg_brk;
625 xscale_jtag_set_instr(target->tap,
626 XSCALE_SELDCSR << xscale->xscale_variant,
627 TAP_IDLE);
629 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
630 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
632 memset(&fields, 0, sizeof fields);
634 fields[0].num_bits = 3;
635 fields[0].out_value = &field0;
636 uint8_t tmp;
637 fields[0].in_value = &tmp;
639 fields[1].num_bits = 32;
640 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
642 fields[2].num_bits = 1;
643 fields[2].out_value = &field2;
644 uint8_t tmp2;
645 fields[2].in_value = &tmp2;
647 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
649 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
650 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
652 if ((retval = jtag_execute_queue()) != ERROR_OK)
654 LOG_ERROR("JTAG error while writing DCSR");
655 return retval;
658 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
659 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
661 return ERROR_OK;
664 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
665 static unsigned int parity (unsigned int v)
667 // unsigned int ov = v;
668 v ^= v >> 16;
669 v ^= v >> 8;
670 v ^= v >> 4;
671 v &= 0xf;
672 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
673 return (0x6996 >> v) & 1;
676 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
678 struct xscale_common *xscale = target_to_xscale(target);
679 uint8_t packet[4];
680 uint8_t cmd;
681 int word;
682 struct scan_field fields[2];
684 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
686 /* LDIC into IR */
687 xscale_jtag_set_instr(target->tap,
688 XSCALE_LDIC << xscale->xscale_variant,
689 TAP_IDLE);
691 /* CMD is b011 to load a cacheline into the Mini ICache.
692 * Loading into the main ICache is deprecated, and unused.
693 * It's followed by three zero bits, and 27 address bits.
695 buf_set_u32(&cmd, 0, 6, 0x3);
697 /* virtual address of desired cache line */
698 buf_set_u32(packet, 0, 27, va >> 5);
700 memset(&fields, 0, sizeof fields);
702 fields[0].num_bits = 6;
703 fields[0].out_value = &cmd;
705 fields[1].num_bits = 27;
706 fields[1].out_value = packet;
708 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
710 /* rest of packet is a cacheline: 8 instructions, with parity */
711 fields[0].num_bits = 32;
712 fields[0].out_value = packet;
714 fields[1].num_bits = 1;
715 fields[1].out_value = &cmd;
717 for (word = 0; word < 8; word++)
719 buf_set_u32(packet, 0, 32, buffer[word]);
721 uint32_t value;
722 memcpy(&value, packet, sizeof(uint32_t));
723 cmd = parity(value);
725 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
728 return jtag_execute_queue();
731 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
733 struct xscale_common *xscale = target_to_xscale(target);
734 uint8_t packet[4];
735 uint8_t cmd;
736 struct scan_field fields[2];
738 xscale_jtag_set_instr(target->tap,
739 XSCALE_LDIC << xscale->xscale_variant,
740 TAP_IDLE);
742 /* CMD for invalidate IC line b000, bits [6:4] b000 */
743 buf_set_u32(&cmd, 0, 6, 0x0);
745 /* virtual address of desired cache line */
746 buf_set_u32(packet, 0, 27, va >> 5);
748 memset(&fields, 0, sizeof fields);
750 fields[0].num_bits = 6;
751 fields[0].out_value = &cmd;
753 fields[1].num_bits = 27;
754 fields[1].out_value = packet;
756 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
758 return ERROR_OK;
761 static int xscale_update_vectors(struct target *target)
763 struct xscale_common *xscale = target_to_xscale(target);
764 int i;
765 int retval;
767 uint32_t low_reset_branch, high_reset_branch;
769 for (i = 1; i < 8; i++)
771 /* if there's a static vector specified for this exception, override */
772 if (xscale->static_high_vectors_set & (1 << i))
774 xscale->high_vectors[i] = xscale->static_high_vectors[i];
776 else
778 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
779 if (retval == ERROR_TARGET_TIMEOUT)
780 return retval;
781 if (retval != ERROR_OK)
783 /* Some of these reads will fail as part of normal execution */
784 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
789 for (i = 1; i < 8; i++)
791 if (xscale->static_low_vectors_set & (1 << i))
793 xscale->low_vectors[i] = xscale->static_low_vectors[i];
795 else
797 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
798 if (retval == ERROR_TARGET_TIMEOUT)
799 return retval;
800 if (retval != ERROR_OK)
802 /* Some of these reads will fail as part of normal execution */
803 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
808 /* calculate branches to debug handler */
809 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
810 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
812 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
813 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
815 /* invalidate and load exception vectors in mini i-cache */
816 xscale_invalidate_ic_line(target, 0x0);
817 xscale_invalidate_ic_line(target, 0xffff0000);
819 xscale_load_ic(target, 0x0, xscale->low_vectors);
820 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
822 return ERROR_OK;
825 static int xscale_arch_state(struct target *target)
827 struct xscale_common *xscale = target_to_xscale(target);
828 struct arm *arm = &xscale->arm;
830 static const char *state[] =
832 "disabled", "enabled"
835 static const char *arch_dbg_reason[] =
837 "", "\n(processor reset)", "\n(trace buffer full)"
840 if (arm->common_magic != ARM_COMMON_MAGIC)
842 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
843 return ERROR_COMMAND_SYNTAX_ERROR;
846 arm_arch_state(target);
847 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
848 state[xscale->armv4_5_mmu.mmu_enabled],
849 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
850 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
851 arch_dbg_reason[xscale->arch_debug_reason]);
853 return ERROR_OK;
856 static int xscale_poll(struct target *target)
858 int retval = ERROR_OK;
860 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
862 enum target_state previous_state = target->state;
863 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
866 /* there's data to read from the tx register, we entered debug state */
867 target->state = TARGET_HALTED;
869 /* process debug entry, fetching current mode regs */
870 retval = xscale_debug_entry(target);
872 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
874 LOG_USER("error while polling TX register, reset CPU");
875 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
876 target->state = TARGET_HALTED;
879 /* debug_entry could have overwritten target state (i.e. immediate resume)
880 * don't signal event handlers in that case
882 if (target->state != TARGET_HALTED)
883 return ERROR_OK;
885 /* if target was running, signal that we halted
886 * otherwise we reentered from debug execution */
887 if (previous_state == TARGET_RUNNING)
888 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
889 else
890 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
893 return retval;
896 static int xscale_debug_entry(struct target *target)
898 struct xscale_common *xscale = target_to_xscale(target);
899 struct arm *arm = &xscale->arm;
900 uint32_t pc;
901 uint32_t buffer[10];
902 unsigned i;
903 int retval;
904 uint32_t moe;
906 /* clear external dbg break (will be written on next DCSR read) */
907 xscale->external_debug_break = 0;
908 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
909 return retval;
911 /* get r0, pc, r1 to r7 and cpsr */
912 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
913 return retval;
915 /* move r0 from buffer to register cache */
916 buf_set_u32(arm->core_cache->reg_list[0].value, 0, 32, buffer[0]);
917 arm->core_cache->reg_list[0].dirty = 1;
918 arm->core_cache->reg_list[0].valid = 1;
919 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
921 /* move pc from buffer to register cache */
922 buf_set_u32(arm->pc->value, 0, 32, buffer[1]);
923 arm->pc->dirty = 1;
924 arm->pc->valid = 1;
925 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
927 /* move data from buffer to register cache */
928 for (i = 1; i <= 7; i++)
930 buf_set_u32(arm->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
931 arm->core_cache->reg_list[i].dirty = 1;
932 arm->core_cache->reg_list[i].valid = 1;
933 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
936 arm_set_cpsr(arm, buffer[9]);
937 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
939 if (!is_arm_mode(arm->core_mode))
941 target->state = TARGET_UNKNOWN;
942 LOG_ERROR("cpsr contains invalid mode value - communication failure");
943 return ERROR_TARGET_FAILURE;
945 LOG_DEBUG("target entered debug state in %s mode",
946 arm_mode_name(arm->core_mode));
948 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
949 if (arm->spsr) {
950 xscale_receive(target, buffer, 8);
951 buf_set_u32(arm->spsr->value, 0, 32, buffer[7]);
952 arm->spsr->dirty = false;
953 arm->spsr->valid = true;
955 else
957 /* r8 to r14, but no spsr */
958 xscale_receive(target, buffer, 7);
961 /* move data from buffer to right banked register in cache */
962 for (i = 8; i <= 14; i++)
964 struct reg *r = arm_reg_current(arm, i);
966 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
967 r->dirty = false;
968 r->valid = true;
971 /* mark xscale regs invalid to ensure they are retrieved from the
972 * debug handler if requested */
973 for (i = 0; i < xscale->reg_cache->num_regs; i++)
974 xscale->reg_cache->reg_list[i].valid = 0;
976 /* examine debug reason */
977 xscale_read_dcsr(target);
978 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
980 /* stored PC (for calculating fixup) */
981 pc = buf_get_u32(arm->pc->value, 0, 32);
983 switch (moe)
985 case 0x0: /* Processor reset */
986 target->debug_reason = DBG_REASON_DBGRQ;
987 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
988 pc -= 4;
989 break;
990 case 0x1: /* Instruction breakpoint hit */
991 target->debug_reason = DBG_REASON_BREAKPOINT;
992 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
993 pc -= 4;
994 break;
995 case 0x2: /* Data breakpoint hit */
996 target->debug_reason = DBG_REASON_WATCHPOINT;
997 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
998 pc -= 4;
999 break;
1000 case 0x3: /* BKPT instruction executed */
1001 target->debug_reason = DBG_REASON_BREAKPOINT;
1002 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1003 pc -= 4;
1004 break;
1005 case 0x4: /* Ext. debug event */
1006 target->debug_reason = DBG_REASON_DBGRQ;
1007 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1008 pc -= 4;
1009 break;
1010 case 0x5: /* Vector trap occured */
1011 target->debug_reason = DBG_REASON_BREAKPOINT;
1012 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1013 pc -= 4;
1014 break;
1015 case 0x6: /* Trace buffer full break */
1016 target->debug_reason = DBG_REASON_DBGRQ;
1017 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1018 pc -= 4;
1019 break;
1020 case 0x7: /* Reserved (may flag Hot-Debug support) */
1021 default:
1022 LOG_ERROR("Method of Entry is 'Reserved'");
1023 exit(-1);
1024 break;
1027 /* apply PC fixup */
1028 buf_set_u32(arm->pc->value, 0, 32, pc);
1030 /* on the first debug entry, identify cache type */
1031 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1033 uint32_t cache_type_reg;
1035 /* read cp15 cache type register */
1036 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1037 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1039 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1042 /* examine MMU and Cache settings */
1043 /* read cp15 control register */
1044 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1045 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1046 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1047 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1048 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1050 /* tracing enabled, read collected trace data */
1051 if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
1053 xscale_read_trace(target);
1055 /* Resume if entered debug due to buffer fill and we're still collecting
1056 * trace data. Note that a debug exception due to trace buffer full
1057 * can only happen in fill mode. */
1058 if (xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1060 if (--xscale->trace.fill_counter > 0)
1061 xscale_resume(target, 1, 0x0, 1, 0);
1063 else /* entered debug for other reason; reset counter */
1064 xscale->trace.fill_counter = 0;
1067 return ERROR_OK;
1070 static int xscale_halt(struct target *target)
1072 struct xscale_common *xscale = target_to_xscale(target);
1074 LOG_DEBUG("target->state: %s",
1075 target_state_name(target));
1077 if (target->state == TARGET_HALTED)
1079 LOG_DEBUG("target was already halted");
1080 return ERROR_OK;
1082 else if (target->state == TARGET_UNKNOWN)
1084 /* this must not happen for a xscale target */
1085 LOG_ERROR("target was in unknown state when halt was requested");
1086 return ERROR_TARGET_INVALID;
1088 else if (target->state == TARGET_RESET)
1090 LOG_DEBUG("target->state == TARGET_RESET");
1092 else
1094 /* assert external dbg break */
1095 xscale->external_debug_break = 1;
1096 xscale_read_dcsr(target);
1098 target->debug_reason = DBG_REASON_DBGRQ;
1101 return ERROR_OK;
1104 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1106 struct xscale_common *xscale = target_to_xscale(target);
1107 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1108 int retval;
1110 if (xscale->ibcr0_used)
1112 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1114 if (ibcr0_bp)
1116 xscale_unset_breakpoint(target, ibcr0_bp);
1118 else
1120 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1121 exit(-1);
1125 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1126 return retval;
1128 return ERROR_OK;
1131 static int xscale_disable_single_step(struct target *target)
1133 struct xscale_common *xscale = target_to_xscale(target);
1134 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1135 int retval;
1137 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1138 return retval;
1140 return ERROR_OK;
1143 static void xscale_enable_watchpoints(struct target *target)
1145 struct watchpoint *watchpoint = target->watchpoints;
1147 while (watchpoint)
1149 if (watchpoint->set == 0)
1150 xscale_set_watchpoint(target, watchpoint);
1151 watchpoint = watchpoint->next;
1155 static void xscale_enable_breakpoints(struct target *target)
1157 struct breakpoint *breakpoint = target->breakpoints;
1159 /* set any pending breakpoints */
1160 while (breakpoint)
1162 if (breakpoint->set == 0)
1163 xscale_set_breakpoint(target, breakpoint);
1164 breakpoint = breakpoint->next;
1168 static void xscale_free_trace_data(struct xscale_common *xscale)
1170 struct xscale_trace_data *td = xscale->trace.data;
1171 while (td)
1173 struct xscale_trace_data *next_td = td->next;
1174 if (td->entries)
1175 free(td->entries);
1176 free(td);
1177 td = next_td;
1179 xscale->trace.data = NULL;
1182 static int xscale_resume(struct target *target, int current,
1183 uint32_t address, int handle_breakpoints, int debug_execution)
1185 struct xscale_common *xscale = target_to_xscale(target);
1186 struct arm *arm = &xscale->arm;
1187 uint32_t current_pc;
1188 int retval;
1189 int i;
1191 LOG_DEBUG("-");
1193 if (target->state != TARGET_HALTED)
1195 LOG_WARNING("target not halted");
1196 return ERROR_TARGET_NOT_HALTED;
1199 if (!debug_execution)
1201 target_free_all_working_areas(target);
1204 /* update vector tables */
1205 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1206 return retval;
1208 /* current = 1: continue on current pc, otherwise continue at <address> */
1209 if (!current)
1210 buf_set_u32(arm->pc->value, 0, 32, address);
1212 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1214 /* if we're at the reset vector, we have to simulate the branch */
1215 if (current_pc == 0x0)
1217 arm_simulate_step(target, NULL);
1218 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1221 /* the front-end may request us not to handle breakpoints */
1222 if (handle_breakpoints)
1224 struct breakpoint *breakpoint;
1225 breakpoint = breakpoint_find(target,
1226 buf_get_u32(arm->pc->value, 0, 32));
1227 if (breakpoint != NULL)
1229 uint32_t next_pc;
1230 enum trace_mode saved_trace_mode;
1232 /* there's a breakpoint at the current PC, we have to step over it */
1233 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1234 xscale_unset_breakpoint(target, breakpoint);
1236 /* calculate PC of next instruction */
1237 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1239 uint32_t current_opcode;
1240 target_read_u32(target, current_pc, &current_opcode);
1241 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1244 LOG_DEBUG("enable single-step");
1245 xscale_enable_single_step(target, next_pc);
1247 /* restore banked registers */
1248 retval = xscale_restore_banked(target);
1249 if (retval != ERROR_OK)
1250 return retval;
1252 /* send resume request */
1253 xscale_send_u32(target, 0x30);
1255 /* send CPSR */
1256 xscale_send_u32(target,
1257 buf_get_u32(arm->cpsr->value, 0, 32));
1258 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1259 buf_get_u32(arm->cpsr->value, 0, 32));
1261 for (i = 7; i >= 0; i--)
1263 /* send register */
1264 xscale_send_u32(target, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1265 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1266 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1269 /* send PC */
1270 xscale_send_u32(target,
1271 buf_get_u32(arm->pc->value, 0, 32));
1272 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1273 buf_get_u32(arm->pc->value, 0, 32));
1275 /* disable trace data collection in xscale_debug_entry() */
1276 saved_trace_mode = xscale->trace.mode;
1277 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1279 /* wait for and process debug entry */
1280 xscale_debug_entry(target);
1282 /* re-enable trace buffer, if enabled previously */
1283 xscale->trace.mode = saved_trace_mode;
1285 LOG_DEBUG("disable single-step");
1286 xscale_disable_single_step(target);
1288 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1289 xscale_set_breakpoint(target, breakpoint);
1293 /* enable any pending breakpoints and watchpoints */
1294 xscale_enable_breakpoints(target);
1295 xscale_enable_watchpoints(target);
1297 /* restore banked registers */
1298 retval = xscale_restore_banked(target);
1299 if (retval != ERROR_OK)
1300 return retval;
1302 /* send resume request (command 0x30 or 0x31)
1303 * clean the trace buffer if it is to be enabled (0x62) */
1304 if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
1306 if (xscale->trace.mode == XSCALE_TRACE_FILL)
1308 /* If trace enabled in fill mode and starting collection of new set
1309 * of buffers, initialize buffer counter and free previous buffers */
1310 if (xscale->trace.fill_counter == 0)
1312 xscale->trace.fill_counter = xscale->trace.buffer_fill;
1313 xscale_free_trace_data(xscale);
1316 else /* wrap mode; free previous buffer */
1317 xscale_free_trace_data(xscale);
1319 xscale_send_u32(target, 0x62);
1320 xscale_send_u32(target, 0x31);
1322 else
1323 xscale_send_u32(target, 0x30);
1325 /* send CPSR */
1326 xscale_send_u32(target, buf_get_u32(arm->cpsr->value, 0, 32));
1327 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1328 buf_get_u32(arm->cpsr->value, 0, 32));
1330 for (i = 7; i >= 0; i--)
1332 /* send register */
1333 xscale_send_u32(target, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1334 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1335 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1338 /* send PC */
1339 xscale_send_u32(target, buf_get_u32(arm->pc->value, 0, 32));
1340 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1341 buf_get_u32(arm->pc->value, 0, 32));
1343 target->debug_reason = DBG_REASON_NOTHALTED;
1345 if (!debug_execution)
1347 /* registers are now invalid */
1348 register_cache_invalidate(arm->core_cache);
1349 target->state = TARGET_RUNNING;
1350 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1352 else
1354 target->state = TARGET_DEBUG_RUNNING;
1355 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1358 LOG_DEBUG("target resumed");
1360 return ERROR_OK;
1363 static int xscale_step_inner(struct target *target, int current,
1364 uint32_t address, int handle_breakpoints)
1366 struct xscale_common *xscale = target_to_xscale(target);
1367 struct arm *arm = &xscale->arm;
1368 uint32_t next_pc;
1369 int retval;
1370 int i;
1372 target->debug_reason = DBG_REASON_SINGLESTEP;
1374 /* calculate PC of next instruction */
1375 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1377 uint32_t current_opcode, current_pc;
1378 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1380 target_read_u32(target, current_pc, &current_opcode);
1381 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1382 return retval;
1385 LOG_DEBUG("enable single-step");
1386 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1387 return retval;
1389 /* restore banked registers */
1390 if ((retval = xscale_restore_banked(target)) != ERROR_OK)
1391 return retval;
1393 /* send resume request (command 0x30 or 0x31)
1394 * clean the trace buffer if it is to be enabled (0x62) */
1395 if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
1397 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1398 return retval;
1399 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1400 return retval;
1402 else
1403 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1404 return retval;
1406 /* send CPSR */
1407 retval = xscale_send_u32(target,
1408 buf_get_u32(arm->cpsr->value, 0, 32));
1409 if (retval != ERROR_OK)
1410 return retval;
1411 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1412 buf_get_u32(arm->cpsr->value, 0, 32));
1414 for (i = 7; i >= 0; i--) {
1415 /* send register */
1416 retval = xscale_send_u32(target,
1417 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1418 if (retval != ERROR_OK)
1419 return retval;
1420 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i,
1421 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1424 /* send PC */
1425 retval = xscale_send_u32(target,
1426 buf_get_u32(arm->pc->value, 0, 32));
1427 if (retval != ERROR_OK)
1428 return retval;
1429 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1430 buf_get_u32(arm->pc->value, 0, 32));
1432 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1434 /* registers are now invalid */
1435 register_cache_invalidate(arm->core_cache);
1437 /* wait for and process debug entry */
1438 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1439 return retval;
1441 LOG_DEBUG("disable single-step");
1442 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1443 return retval;
1445 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1447 return ERROR_OK;
1450 static int xscale_step(struct target *target, int current,
1451 uint32_t address, int handle_breakpoints)
1453 struct arm *arm = target_to_arm(target);
1454 struct breakpoint *breakpoint = NULL;
1456 uint32_t current_pc;
1457 int retval;
1459 if (target->state != TARGET_HALTED)
1461 LOG_WARNING("target not halted");
1462 return ERROR_TARGET_NOT_HALTED;
1465 /* current = 1: continue on current pc, otherwise continue at <address> */
1466 if (!current)
1467 buf_set_u32(arm->pc->value, 0, 32, address);
1469 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1471 /* if we're at the reset vector, we have to simulate the step */
1472 if (current_pc == 0x0)
1474 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1475 return retval;
1476 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1477 LOG_DEBUG("current pc %" PRIx32, current_pc);
1479 target->debug_reason = DBG_REASON_SINGLESTEP;
1480 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1482 return ERROR_OK;
1485 /* the front-end may request us not to handle breakpoints */
1486 if (handle_breakpoints)
1487 breakpoint = breakpoint_find(target,
1488 buf_get_u32(arm->pc->value, 0, 32));
1489 if (breakpoint != NULL) {
1490 retval = xscale_unset_breakpoint(target, breakpoint);
1491 if (retval != ERROR_OK)
1492 return retval;
1495 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1496 if (retval != ERROR_OK)
1497 return retval;
1499 if (breakpoint)
1501 xscale_set_breakpoint(target, breakpoint);
1504 LOG_DEBUG("target stepped");
1506 return ERROR_OK;
1510 static int xscale_assert_reset(struct target *target)
1512 struct xscale_common *xscale = target_to_xscale(target);
1514 LOG_DEBUG("target->state: %s",
1515 target_state_name(target));
1517 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1518 * end up in T-L-R, which would reset JTAG
1520 xscale_jtag_set_instr(target->tap,
1521 XSCALE_SELDCSR << xscale->xscale_variant,
1522 TAP_IDLE);
1524 /* set Hold reset, Halt mode and Trap Reset */
1525 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1526 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1527 xscale_write_dcsr(target, 1, 0);
1529 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1530 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1531 jtag_execute_queue();
1533 /* assert reset */
1534 jtag_add_reset(0, 1);
1536 /* sleep 1ms, to be sure we fulfill any requirements */
1537 jtag_add_sleep(1000);
1538 jtag_execute_queue();
1540 target->state = TARGET_RESET;
1542 if (target->reset_halt)
1544 int retval;
1545 if ((retval = target_halt(target)) != ERROR_OK)
1546 return retval;
1549 return ERROR_OK;
1552 static int xscale_deassert_reset(struct target *target)
1554 struct xscale_common *xscale = target_to_xscale(target);
1555 struct breakpoint *breakpoint = target->breakpoints;
1557 LOG_DEBUG("-");
1559 xscale->ibcr_available = 2;
1560 xscale->ibcr0_used = 0;
1561 xscale->ibcr1_used = 0;
1563 xscale->dbr_available = 2;
1564 xscale->dbr0_used = 0;
1565 xscale->dbr1_used = 0;
1567 /* mark all hardware breakpoints as unset */
1568 while (breakpoint)
1570 if (breakpoint->type == BKPT_HARD)
1572 breakpoint->set = 0;
1574 breakpoint = breakpoint->next;
1577 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1578 xscale_free_trace_data(xscale);
1580 register_cache_invalidate(xscale->arm.core_cache);
1582 /* FIXME mark hardware watchpoints got unset too. Also,
1583 * at least some of the XScale registers are invalid...
1587 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1588 * contents got invalidated. Safer to force that, so writing new
1589 * contents can't ever fail..
1592 uint32_t address;
1593 unsigned buf_cnt;
1594 const uint8_t *buffer = xscale_debug_handler;
1595 int retval;
1597 /* release SRST */
1598 jtag_add_reset(0, 0);
1600 /* wait 300ms; 150 and 100ms were not enough */
1601 jtag_add_sleep(300*1000);
1603 jtag_add_runtest(2030, TAP_IDLE);
1604 jtag_execute_queue();
1606 /* set Hold reset, Halt mode and Trap Reset */
1607 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1608 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1609 xscale_write_dcsr(target, 1, 0);
1611 /* Load the debug handler into the mini-icache. Since
1612 * it's using halt mode (not monitor mode), it runs in
1613 * "Special Debug State" for access to registers, memory,
1614 * coprocessors, trace data, etc.
1616 address = xscale->handler_address;
1617 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1618 binary_size > 0;
1619 binary_size -= buf_cnt, buffer += buf_cnt)
1621 uint32_t cache_line[8];
1622 unsigned i;
1624 buf_cnt = binary_size;
1625 if (buf_cnt > 32)
1626 buf_cnt = 32;
1628 for (i = 0; i < buf_cnt; i += 4)
1630 /* convert LE buffer to host-endian uint32_t */
1631 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1634 for (; i < 32; i += 4)
1636 cache_line[i / 4] = 0xe1a08008;
1639 /* only load addresses other than the reset vectors */
1640 if ((address % 0x400) != 0x0)
1642 retval = xscale_load_ic(target, address,
1643 cache_line);
1644 if (retval != ERROR_OK)
1645 return retval;
1648 address += buf_cnt;
1651 retval = xscale_load_ic(target, 0x0,
1652 xscale->low_vectors);
1653 if (retval != ERROR_OK)
1654 return retval;
1655 retval = xscale_load_ic(target, 0xffff0000,
1656 xscale->high_vectors);
1657 if (retval != ERROR_OK)
1658 return retval;
1660 jtag_add_runtest(30, TAP_IDLE);
1662 jtag_add_sleep(100000);
1664 /* set Hold reset, Halt mode and Trap Reset */
1665 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1666 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1667 xscale_write_dcsr(target, 1, 0);
1669 /* clear Hold reset to let the target run (should enter debug handler) */
1670 xscale_write_dcsr(target, 0, 1);
1671 target->state = TARGET_RUNNING;
1673 if (!target->reset_halt)
1675 jtag_add_sleep(10000);
1677 /* we should have entered debug now */
1678 xscale_debug_entry(target);
1679 target->state = TARGET_HALTED;
1681 /* resume the target */
1682 xscale_resume(target, 1, 0x0, 1, 0);
1686 return ERROR_OK;
1689 static int xscale_read_core_reg(struct target *target, struct reg *r,
1690 int num, enum arm_mode mode)
1692 /** \todo add debug handler support for core register reads */
1693 LOG_ERROR("not implemented");
1694 return ERROR_OK;
1697 static int xscale_write_core_reg(struct target *target, struct reg *r,
1698 int num, enum arm_mode mode, uint32_t value)
1700 /** \todo add debug handler support for core register writes */
1701 LOG_ERROR("not implemented");
1702 return ERROR_OK;
1705 static int xscale_full_context(struct target *target)
1707 struct arm *arm = target_to_arm(target);
1709 uint32_t *buffer;
1711 int i, j;
1713 LOG_DEBUG("-");
1715 if (target->state != TARGET_HALTED)
1717 LOG_WARNING("target not halted");
1718 return ERROR_TARGET_NOT_HALTED;
1721 buffer = malloc(4 * 8);
1723 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1724 * we can't enter User mode on an XScale (unpredictable),
1725 * but User shares registers with SYS
1727 for (i = 1; i < 7; i++)
1729 enum arm_mode mode = armv4_5_number_to_mode(i);
1730 bool valid = true;
1731 struct reg *r;
1733 if (mode == ARM_MODE_USR)
1734 continue;
1736 /* check if there are invalid registers in the current mode
1738 for (j = 0; valid && j <= 16; j++)
1740 if (!ARMV4_5_CORE_REG_MODE(arm->core_cache,
1741 mode, j).valid)
1742 valid = false;
1744 if (valid)
1745 continue;
1747 /* request banked registers */
1748 xscale_send_u32(target, 0x0);
1750 /* send CPSR for desired bank mode */
1751 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1753 /* get banked registers: r8 to r14; and SPSR
1754 * except in USR/SYS mode
1756 if (mode != ARM_MODE_SYS) {
1757 /* SPSR */
1758 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1759 mode, 16);
1761 xscale_receive(target, buffer, 8);
1763 buf_set_u32(r->value, 0, 32, buffer[7]);
1764 r->dirty = false;
1765 r->valid = true;
1766 } else {
1767 xscale_receive(target, buffer, 7);
1770 /* move data from buffer to register cache */
1771 for (j = 8; j <= 14; j++)
1773 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1774 mode, j);
1776 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1777 r->dirty = false;
1778 r->valid = true;
1782 free(buffer);
1784 return ERROR_OK;
1787 static int xscale_restore_banked(struct target *target)
1789 struct arm *arm = target_to_arm(target);
1791 int i, j;
1793 if (target->state != TARGET_HALTED)
1795 LOG_WARNING("target not halted");
1796 return ERROR_TARGET_NOT_HALTED;
1799 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1800 * and check if any banked registers need to be written. Ignore
1801 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1802 * an XScale (unpredictable), but they share all registers.
1804 for (i = 1; i < 7; i++)
1806 enum arm_mode mode = armv4_5_number_to_mode(i);
1807 struct reg *r;
1809 if (mode == ARM_MODE_USR)
1810 continue;
1812 /* check if there are dirty registers in this mode */
1813 for (j = 8; j <= 14; j++)
1815 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1816 mode, j).dirty)
1817 goto dirty;
1820 /* if not USR/SYS, check if the SPSR needs to be written */
1821 if (mode != ARM_MODE_SYS)
1823 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1824 mode, 16).dirty)
1825 goto dirty;
1828 /* there's nothing to flush for this mode */
1829 continue;
1831 dirty:
1832 /* command 0x1: "send banked registers" */
1833 xscale_send_u32(target, 0x1);
1835 /* send CPSR for desired mode */
1836 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1838 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1839 * but this protocol doesn't understand that nuance.
1841 for (j = 8; j <= 14; j++) {
1842 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1843 mode, j);
1844 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1845 r->dirty = false;
1848 /* send spsr if not in USR/SYS mode */
1849 if (mode != ARM_MODE_SYS) {
1850 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1851 mode, 16);
1852 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1853 r->dirty = false;
1857 return ERROR_OK;
1860 static int xscale_read_memory(struct target *target, uint32_t address,
1861 uint32_t size, uint32_t count, uint8_t *buffer)
1863 struct xscale_common *xscale = target_to_xscale(target);
1864 uint32_t *buf32;
1865 uint32_t i;
1866 int retval;
1868 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1870 if (target->state != TARGET_HALTED)
1872 LOG_WARNING("target not halted");
1873 return ERROR_TARGET_NOT_HALTED;
1876 /* sanitize arguments */
1877 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1878 return ERROR_COMMAND_SYNTAX_ERROR;
1880 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1881 return ERROR_TARGET_UNALIGNED_ACCESS;
1883 /* send memory read request (command 0x1n, n: access size) */
1884 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1885 return retval;
1887 /* send base address for read request */
1888 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1889 return retval;
1891 /* send number of requested data words */
1892 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1893 return retval;
1895 /* receive data from target (count times 32-bit words in host endianness) */
1896 buf32 = malloc(4 * count);
1897 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1898 return retval;
1900 /* extract data from host-endian buffer into byte stream */
1901 for (i = 0; i < count; i++)
1903 switch (size)
1905 case 4:
1906 target_buffer_set_u32(target, buffer, buf32[i]);
1907 buffer += 4;
1908 break;
1909 case 2:
1910 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1911 buffer += 2;
1912 break;
1913 case 1:
1914 *buffer++ = buf32[i] & 0xff;
1915 break;
1916 default:
1917 LOG_ERROR("invalid read size");
1918 return ERROR_COMMAND_SYNTAX_ERROR;
1922 free(buf32);
1924 /* examine DCSR, to see if Sticky Abort (SA) got set */
1925 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1926 return retval;
1927 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1929 /* clear SA bit */
1930 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1931 return retval;
1933 return ERROR_TARGET_DATA_ABORT;
1936 return ERROR_OK;
1939 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1940 uint32_t size, uint32_t count, uint8_t *buffer)
1942 struct xscale_common *xscale = target_to_xscale(target);
1944 /* with MMU inactive, there are only physical addresses */
1945 if (!xscale->armv4_5_mmu.mmu_enabled)
1946 return xscale_read_memory(target, address, size, count, buffer);
1948 /** \todo: provide a non-stub implementation of this routine. */
1949 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1950 target_name(target), __func__);
1951 return ERROR_FAIL;
1954 static int xscale_write_memory(struct target *target, uint32_t address,
1955 uint32_t size, uint32_t count, const uint8_t *buffer)
1957 struct xscale_common *xscale = target_to_xscale(target);
1958 int retval;
1960 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1962 if (target->state != TARGET_HALTED)
1964 LOG_WARNING("target not halted");
1965 return ERROR_TARGET_NOT_HALTED;
1968 /* sanitize arguments */
1969 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1970 return ERROR_COMMAND_SYNTAX_ERROR;
1972 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1973 return ERROR_TARGET_UNALIGNED_ACCESS;
1975 /* send memory write request (command 0x2n, n: access size) */
1976 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1977 return retval;
1979 /* send base address for read request */
1980 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1981 return retval;
1983 /* send number of requested data words to be written*/
1984 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1985 return retval;
1987 /* extract data from host-endian buffer into byte stream */
1988 #if 0
1989 for (i = 0; i < count; i++)
1991 switch (size)
1993 case 4:
1994 value = target_buffer_get_u32(target, buffer);
1995 xscale_send_u32(target, value);
1996 buffer += 4;
1997 break;
1998 case 2:
1999 value = target_buffer_get_u16(target, buffer);
2000 xscale_send_u32(target, value);
2001 buffer += 2;
2002 break;
2003 case 1:
2004 value = *buffer;
2005 xscale_send_u32(target, value);
2006 buffer += 1;
2007 break;
2008 default:
2009 LOG_ERROR("should never get here");
2010 exit(-1);
2013 #endif
2014 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
2015 return retval;
2017 /* examine DCSR, to see if Sticky Abort (SA) got set */
2018 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
2019 return retval;
2020 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
2022 /* clear SA bit */
2023 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
2024 return retval;
2026 LOG_ERROR("data abort writing memory");
2027 return ERROR_TARGET_DATA_ABORT;
2030 return ERROR_OK;
2033 static int xscale_write_phys_memory(struct target *target, uint32_t address,
2034 uint32_t size, uint32_t count, const uint8_t *buffer)
2036 struct xscale_common *xscale = target_to_xscale(target);
2038 /* with MMU inactive, there are only physical addresses */
2039 if (!xscale->armv4_5_mmu.mmu_enabled)
2040 return xscale_write_memory(target, address, size, count, buffer);
2042 /** \todo: provide a non-stub implementation of this routine. */
2043 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
2044 target_name(target), __func__);
2045 return ERROR_FAIL;
2048 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
2049 uint32_t count, const uint8_t *buffer)
2051 return xscale_write_memory(target, address, 4, count, buffer);
2054 static int xscale_get_ttb(struct target *target, uint32_t *result)
2056 struct xscale_common *xscale = target_to_xscale(target);
2057 uint32_t ttb;
2058 int retval;
2060 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2061 if (retval != ERROR_OK)
2062 return retval;
2063 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2065 *result = ttb;
2067 return ERROR_OK;
2070 static int xscale_disable_mmu_caches(struct target *target, int mmu,
2071 int d_u_cache, int i_cache)
2073 struct xscale_common *xscale = target_to_xscale(target);
2074 uint32_t cp15_control;
2075 int retval;
2077 /* read cp15 control register */
2078 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2079 if (retval !=ERROR_OK)
2080 return retval;
2081 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2083 if (mmu)
2084 cp15_control &= ~0x1U;
2086 if (d_u_cache)
2088 /* clean DCache */
2089 retval = xscale_send_u32(target, 0x50);
2090 if (retval !=ERROR_OK)
2091 return retval;
2092 retval = xscale_send_u32(target, xscale->cache_clean_address);
2093 if (retval !=ERROR_OK)
2094 return retval;
2096 /* invalidate DCache */
2097 retval = xscale_send_u32(target, 0x51);
2098 if (retval !=ERROR_OK)
2099 return retval;
2101 cp15_control &= ~0x4U;
2104 if (i_cache)
2106 /* invalidate ICache */
2107 retval = xscale_send_u32(target, 0x52);
2108 if (retval !=ERROR_OK)
2109 return retval;
2110 cp15_control &= ~0x1000U;
2113 /* write new cp15 control register */
2114 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2115 if (retval !=ERROR_OK)
2116 return retval;
2118 /* execute cpwait to ensure outstanding operations complete */
2119 retval = xscale_send_u32(target, 0x53);
2120 return retval;
2123 static int xscale_enable_mmu_caches(struct target *target, int mmu,
2124 int d_u_cache, int i_cache)
2126 struct xscale_common *xscale = target_to_xscale(target);
2127 uint32_t cp15_control;
2128 int retval;
2130 /* read cp15 control register */
2131 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2132 if (retval !=ERROR_OK)
2133 return retval;
2134 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2136 if (mmu)
2137 cp15_control |= 0x1U;
2139 if (d_u_cache)
2140 cp15_control |= 0x4U;
2142 if (i_cache)
2143 cp15_control |= 0x1000U;
2145 /* write new cp15 control register */
2146 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2147 if (retval !=ERROR_OK)
2148 return retval;
2150 /* execute cpwait to ensure outstanding operations complete */
2151 retval = xscale_send_u32(target, 0x53);
2152 return retval;
2155 static int xscale_set_breakpoint(struct target *target,
2156 struct breakpoint *breakpoint)
2158 int retval;
2159 struct xscale_common *xscale = target_to_xscale(target);
2161 if (target->state != TARGET_HALTED)
2163 LOG_WARNING("target not halted");
2164 return ERROR_TARGET_NOT_HALTED;
2167 if (breakpoint->set)
2169 LOG_WARNING("breakpoint already set");
2170 return ERROR_OK;
2173 if (breakpoint->type == BKPT_HARD)
2175 uint32_t value = breakpoint->address | 1;
2176 if (!xscale->ibcr0_used)
2178 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2179 xscale->ibcr0_used = 1;
2180 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2182 else if (!xscale->ibcr1_used)
2184 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2185 xscale->ibcr1_used = 1;
2186 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2188 else
2189 { /* bug: availability previously verified in xscale_add_breakpoint() */
2190 LOG_ERROR("BUG: no hardware comparator available");
2191 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2194 else if (breakpoint->type == BKPT_SOFT)
2196 if (breakpoint->length == 4)
2198 /* keep the original instruction in target endianness */
2199 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2201 return retval;
2203 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2204 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2206 return retval;
2209 else
2211 /* keep the original instruction in target endianness */
2212 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2214 return retval;
2216 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2217 if ((retval = target_write_u16(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2219 return retval;
2222 breakpoint->set = 1;
2224 xscale_send_u32(target, 0x50); /* clean dcache */
2225 xscale_send_u32(target, xscale->cache_clean_address);
2226 xscale_send_u32(target, 0x51); /* invalidate dcache */
2227 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2230 return ERROR_OK;
2233 static int xscale_add_breakpoint(struct target *target,
2234 struct breakpoint *breakpoint)
2236 struct xscale_common *xscale = target_to_xscale(target);
2238 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2240 LOG_ERROR("no breakpoint unit available for hardware breakpoint");
2241 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2244 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2246 LOG_ERROR("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2247 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2250 if (breakpoint->type == BKPT_HARD)
2252 xscale->ibcr_available--;
2255 return xscale_set_breakpoint(target, breakpoint);
2258 static int xscale_unset_breakpoint(struct target *target,
2259 struct breakpoint *breakpoint)
2261 int retval;
2262 struct xscale_common *xscale = target_to_xscale(target);
2264 if (target->state != TARGET_HALTED)
2266 LOG_WARNING("target not halted");
2267 return ERROR_TARGET_NOT_HALTED;
2270 if (!breakpoint->set)
2272 LOG_WARNING("breakpoint not set");
2273 return ERROR_OK;
2276 if (breakpoint->type == BKPT_HARD)
2278 if (breakpoint->set == 1)
2280 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2281 xscale->ibcr0_used = 0;
2283 else if (breakpoint->set == 2)
2285 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2286 xscale->ibcr1_used = 0;
2288 breakpoint->set = 0;
2290 else
2292 /* restore original instruction (kept in target endianness) */
2293 if (breakpoint->length == 4)
2295 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2297 return retval;
2300 else
2302 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2304 return retval;
2307 breakpoint->set = 0;
2309 xscale_send_u32(target, 0x50); /* clean dcache */
2310 xscale_send_u32(target, xscale->cache_clean_address);
2311 xscale_send_u32(target, 0x51); /* invalidate dcache */
2312 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2315 return ERROR_OK;
2318 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2320 struct xscale_common *xscale = target_to_xscale(target);
2322 if (target->state != TARGET_HALTED)
2324 LOG_ERROR("target not halted");
2325 return ERROR_TARGET_NOT_HALTED;
2328 if (breakpoint->set)
2330 xscale_unset_breakpoint(target, breakpoint);
2333 if (breakpoint->type == BKPT_HARD)
2334 xscale->ibcr_available++;
2336 return ERROR_OK;
2339 static int xscale_set_watchpoint(struct target *target,
2340 struct watchpoint *watchpoint)
2342 struct xscale_common *xscale = target_to_xscale(target);
2343 uint32_t enable = 0;
2344 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2345 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2347 if (target->state != TARGET_HALTED)
2349 LOG_ERROR("target not halted");
2350 return ERROR_TARGET_NOT_HALTED;
2353 switch (watchpoint->rw)
2355 case WPT_READ:
2356 enable = 0x3;
2357 break;
2358 case WPT_ACCESS:
2359 enable = 0x2;
2360 break;
2361 case WPT_WRITE:
2362 enable = 0x1;
2363 break;
2364 default:
2365 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2368 /* For watchpoint across more than one word, both DBR registers must
2369 be enlisted, with the second used as a mask. */
2370 if (watchpoint->length > 4)
2372 if (xscale->dbr0_used || xscale->dbr1_used)
2374 LOG_ERROR("BUG: sufficient hardware comparators unavailable");
2375 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2378 /* Write mask value to DBR1, based on the length argument.
2379 * Address bits ignored by the comparator are those set in mask. */
2380 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1],
2381 watchpoint->length - 1);
2382 xscale->dbr1_used = 1;
2383 enable |= 0x100; /* DBCON[M] */
2386 if (!xscale->dbr0_used)
2388 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2389 dbcon_value |= enable;
2390 xscale_set_reg_u32(dbcon, dbcon_value);
2391 watchpoint->set = 1;
2392 xscale->dbr0_used = 1;
2394 else if (!xscale->dbr1_used)
2396 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2397 dbcon_value |= enable << 2;
2398 xscale_set_reg_u32(dbcon, dbcon_value);
2399 watchpoint->set = 2;
2400 xscale->dbr1_used = 1;
2402 else
2404 LOG_ERROR("BUG: no hardware comparator available");
2405 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2408 return ERROR_OK;
2411 static int xscale_add_watchpoint(struct target *target,
2412 struct watchpoint *watchpoint)
2414 struct xscale_common *xscale = target_to_xscale(target);
2416 if (xscale->dbr_available < 1)
2418 LOG_ERROR("no more watchpoint registers available");
2419 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2422 if (watchpoint->value)
2423 LOG_WARNING("xscale does not support value, mask arguments; ignoring");
2425 /* check that length is a power of two */
2426 for (uint32_t len = watchpoint->length; len != 1; len /= 2)
2428 if (len % 2)
2430 LOG_ERROR("xscale requires that watchpoint length is a power of two");
2431 return ERROR_COMMAND_ARGUMENT_INVALID;
2435 if (watchpoint->length == 4) /* single word watchpoint */
2437 xscale->dbr_available--; /* one DBR reg used */
2438 return ERROR_OK;
2441 /* watchpoints across multiple words require both DBR registers */
2442 if (xscale->dbr_available < 2)
2444 LOG_ERROR("insufficient watchpoint registers available");
2445 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2448 if (watchpoint->length > watchpoint->address)
2450 LOG_ERROR("xscale does not support watchpoints with length "
2451 "greater than address");
2452 return ERROR_COMMAND_ARGUMENT_INVALID;
2455 xscale->dbr_available = 0;
2456 return ERROR_OK;
2459 static int xscale_unset_watchpoint(struct target *target,
2460 struct watchpoint *watchpoint)
2462 struct xscale_common *xscale = target_to_xscale(target);
2463 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2464 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2466 if (target->state != TARGET_HALTED)
2468 LOG_WARNING("target not halted");
2469 return ERROR_TARGET_NOT_HALTED;
2472 if (!watchpoint->set)
2474 LOG_WARNING("breakpoint not set");
2475 return ERROR_OK;
2478 if (watchpoint->set == 1)
2480 if (watchpoint->length > 4)
2482 dbcon_value &= ~0x103; /* clear DBCON[M] as well */
2483 xscale->dbr1_used = 0; /* DBR1 was used for mask */
2485 else
2486 dbcon_value &= ~0x3;
2488 xscale_set_reg_u32(dbcon, dbcon_value);
2489 xscale->dbr0_used = 0;
2491 else if (watchpoint->set == 2)
2493 dbcon_value &= ~0xc;
2494 xscale_set_reg_u32(dbcon, dbcon_value);
2495 xscale->dbr1_used = 0;
2497 watchpoint->set = 0;
2499 return ERROR_OK;
2502 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2504 struct xscale_common *xscale = target_to_xscale(target);
2506 if (target->state != TARGET_HALTED)
2508 LOG_ERROR("target not halted");
2509 return ERROR_TARGET_NOT_HALTED;
2512 if (watchpoint->set)
2514 xscale_unset_watchpoint(target, watchpoint);
2517 if (watchpoint->length > 4)
2518 xscale->dbr_available++; /* both DBR regs now available */
2520 xscale->dbr_available++;
2522 return ERROR_OK;
2525 static int xscale_get_reg(struct reg *reg)
2527 struct xscale_reg *arch_info = reg->arch_info;
2528 struct target *target = arch_info->target;
2529 struct xscale_common *xscale = target_to_xscale(target);
2531 /* DCSR, TX and RX are accessible via JTAG */
2532 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2534 return xscale_read_dcsr(arch_info->target);
2536 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2538 /* 1 = consume register content */
2539 return xscale_read_tx(arch_info->target, 1);
2541 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2543 /* can't read from RX register (host -> debug handler) */
2544 return ERROR_OK;
2546 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2548 /* can't (explicitly) read from TXRXCTRL register */
2549 return ERROR_OK;
2551 else /* Other DBG registers have to be transfered by the debug handler */
2553 /* send CP read request (command 0x40) */
2554 xscale_send_u32(target, 0x40);
2556 /* send CP register number */
2557 xscale_send_u32(target, arch_info->dbg_handler_number);
2559 /* read register value */
2560 xscale_read_tx(target, 1);
2561 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2563 reg->dirty = 0;
2564 reg->valid = 1;
2567 return ERROR_OK;
2570 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2572 struct xscale_reg *arch_info = reg->arch_info;
2573 struct target *target = arch_info->target;
2574 struct xscale_common *xscale = target_to_xscale(target);
2575 uint32_t value = buf_get_u32(buf, 0, 32);
2577 /* DCSR, TX and RX are accessible via JTAG */
2578 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2580 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2581 return xscale_write_dcsr(arch_info->target, -1, -1);
2583 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2585 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2586 return xscale_write_rx(arch_info->target);
2588 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2590 /* can't write to TX register (debug-handler -> host) */
2591 return ERROR_OK;
2593 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2595 /* can't (explicitly) write to TXRXCTRL register */
2596 return ERROR_OK;
2598 else /* Other DBG registers have to be transfered by the debug handler */
2600 /* send CP write request (command 0x41) */
2601 xscale_send_u32(target, 0x41);
2603 /* send CP register number */
2604 xscale_send_u32(target, arch_info->dbg_handler_number);
2606 /* send CP register value */
2607 xscale_send_u32(target, value);
2608 buf_set_u32(reg->value, 0, 32, value);
2611 return ERROR_OK;
2614 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2616 struct xscale_common *xscale = target_to_xscale(target);
2617 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2618 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2620 /* send CP write request (command 0x41) */
2621 xscale_send_u32(target, 0x41);
2623 /* send CP register number */
2624 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2626 /* send CP register value */
2627 xscale_send_u32(target, value);
2628 buf_set_u32(dcsr->value, 0, 32, value);
2630 return ERROR_OK;
2633 static int xscale_read_trace(struct target *target)
2635 struct xscale_common *xscale = target_to_xscale(target);
2636 struct arm *arm = &xscale->arm;
2637 struct xscale_trace_data **trace_data_p;
2639 /* 258 words from debug handler
2640 * 256 trace buffer entries
2641 * 2 checkpoint addresses
2643 uint32_t trace_buffer[258];
2644 int is_address[256];
2645 int i, j;
2646 unsigned int num_checkpoints = 0;
2648 if (target->state != TARGET_HALTED)
2650 LOG_WARNING("target must be stopped to read trace data");
2651 return ERROR_TARGET_NOT_HALTED;
2654 /* send read trace buffer command (command 0x61) */
2655 xscale_send_u32(target, 0x61);
2657 /* receive trace buffer content */
2658 xscale_receive(target, trace_buffer, 258);
2660 /* parse buffer backwards to identify address entries */
2661 for (i = 255; i >= 0; i--)
2663 /* also count number of checkpointed entries */
2664 if ((trace_buffer[i] & 0xe0) == 0xc0)
2665 num_checkpoints++;
2667 is_address[i] = 0;
2668 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2669 ((trace_buffer[i] & 0xf0) == 0xd0))
2671 if (i > 0)
2672 is_address[--i] = 1;
2673 if (i > 0)
2674 is_address[--i] = 1;
2675 if (i > 0)
2676 is_address[--i] = 1;
2677 if (i > 0)
2678 is_address[--i] = 1;
2683 /* search first non-zero entry that is not part of an address */
2684 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2687 if (j == 256)
2689 LOG_DEBUG("no trace data collected");
2690 return ERROR_XSCALE_NO_TRACE_DATA;
2693 /* account for possible partial address at buffer start (wrap mode only) */
2694 if (is_address[0])
2695 { /* first entry is address; complete set of 4? */
2696 i = 1;
2697 while (i < 4)
2698 if (!is_address[i++])
2699 break;
2700 if (i < 4)
2701 j += i; /* partial address; can't use it */
2704 /* if first valid entry is indirect branch, can't use that either (no address) */
2705 if (((trace_buffer[j] & 0xf0) == 0x90) || ((trace_buffer[j] & 0xf0) == 0xd0))
2706 j++;
2708 /* walk linked list to terminating entry */
2709 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2712 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2713 (*trace_data_p)->next = NULL;
2714 (*trace_data_p)->chkpt0 = trace_buffer[256];
2715 (*trace_data_p)->chkpt1 = trace_buffer[257];
2716 (*trace_data_p)->last_instruction =
2717 buf_get_u32(arm->pc->value, 0, 32);
2718 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2719 (*trace_data_p)->depth = 256 - j;
2720 (*trace_data_p)->num_checkpoints = num_checkpoints;
2722 for (i = j; i < 256; i++)
2724 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2725 if (is_address[i])
2726 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2727 else
2728 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2731 return ERROR_OK;
2734 static int xscale_read_instruction(struct target *target, uint32_t pc,
2735 struct arm_instruction *instruction)
2737 struct xscale_common *const xscale = target_to_xscale(target);
2738 int i;
2739 int section = -1;
2740 size_t size_read;
2741 uint32_t opcode;
2742 int retval;
2744 if (!xscale->trace.image)
2745 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2747 /* search for the section the current instruction belongs to */
2748 for (i = 0; i < xscale->trace.image->num_sections; i++)
2750 if ((xscale->trace.image->sections[i].base_address <= pc) &&
2751 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > pc))
2753 section = i;
2754 break;
2758 if (section == -1)
2760 /* current instruction couldn't be found in the image */
2761 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2764 if (xscale->trace.core_state == ARM_STATE_ARM)
2766 uint8_t buf[4];
2767 if ((retval = image_read_section(xscale->trace.image, section,
2768 pc - xscale->trace.image->sections[section].base_address,
2769 4, buf, &size_read)) != ERROR_OK)
2771 LOG_ERROR("error while reading instruction");
2772 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2774 opcode = target_buffer_get_u32(target, buf);
2775 arm_evaluate_opcode(opcode, pc, instruction);
2777 else if (xscale->trace.core_state == ARM_STATE_THUMB)
2779 uint8_t buf[2];
2780 if ((retval = image_read_section(xscale->trace.image, section,
2781 pc - xscale->trace.image->sections[section].base_address,
2782 2, buf, &size_read)) != ERROR_OK)
2784 LOG_ERROR("error while reading instruction");
2785 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2787 opcode = target_buffer_get_u16(target, buf);
2788 thumb_evaluate_opcode(opcode, pc, instruction);
2790 else
2792 LOG_ERROR("BUG: unknown core state encountered");
2793 exit(-1);
2796 return ERROR_OK;
2799 /* Extract address encoded into trace data.
2800 * Write result to address referenced by argument 'target', or 0 if incomplete. */
2801 static inline void xscale_branch_address(struct xscale_trace_data *trace_data,
2802 int i, uint32_t *target)
2804 /* if there are less than four entries prior to the indirect branch message
2805 * we can't extract the address */
2806 if (i < 4)
2807 *target = 0;
2808 else
2809 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2810 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2813 static inline void xscale_display_instruction(struct target *target, uint32_t pc,
2814 struct arm_instruction *instruction,
2815 struct command_context *cmd_ctx)
2817 int retval = xscale_read_instruction(target, pc, instruction);
2818 if (retval == ERROR_OK)
2819 command_print(cmd_ctx, "%s", instruction->text);
2820 else
2821 command_print(cmd_ctx, "0x%8.8" PRIx32 "\t<not found in image>", pc);
2824 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2826 struct xscale_common *xscale = target_to_xscale(target);
2827 struct xscale_trace_data *trace_data = xscale->trace.data;
2828 int i, retval;
2829 uint32_t breakpoint_pc;
2830 struct arm_instruction instruction;
2831 uint32_t current_pc = 0; /* initialized when address determined */
2833 if (!xscale->trace.image)
2834 LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
2836 /* loop for each trace buffer that was loaded from target */
2837 while (trace_data)
2839 int chkpt = 0; /* incremented as checkpointed entries found */
2840 int j;
2842 /* FIXME: set this to correct mode when trace buffer is first enabled */
2843 xscale->trace.core_state = ARM_STATE_ARM;
2845 /* loop for each entry in this trace buffer */
2846 for (i = 0; i < trace_data->depth; i++)
2848 int exception = 0;
2849 uint32_t chkpt_reg = 0x0;
2850 uint32_t branch_target = 0;
2851 int count;
2853 /* trace entry type is upper nybble of 'message byte' */
2854 int trace_msg_type = (trace_data->entries[i].data & 0xf0) >> 4;
2856 /* Target addresses of indirect branches are written into buffer
2857 * before the message byte representing the branch. Skip past it */
2858 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2859 continue;
2861 switch (trace_msg_type)
2863 case 0: /* Exceptions */
2864 case 1:
2865 case 2:
2866 case 3:
2867 case 4:
2868 case 5:
2869 case 6:
2870 case 7:
2871 exception = (trace_data->entries[i].data & 0x70) >> 4;
2873 /* FIXME: vector table may be at ffff0000 */
2874 branch_target = (trace_data->entries[i].data & 0xf0) >> 2;
2875 break;
2877 case 8: /* Direct Branch */
2878 break;
2880 case 9: /* Indirect Branch */
2881 xscale_branch_address(trace_data, i, &branch_target);
2882 break;
2884 case 13: /* Checkpointed Indirect Branch */
2885 xscale_branch_address(trace_data, i, &branch_target);
2886 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2887 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is oldest */
2888 else
2889 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and newest */
2891 chkpt++;
2892 break;
2894 case 12: /* Checkpointed Direct Branch */
2895 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2896 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is oldest */
2897 else
2898 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and newest */
2900 /* if no current_pc, checkpoint will be starting point */
2901 if (current_pc == 0)
2902 branch_target = chkpt_reg;
2904 chkpt++;
2905 break;
2907 case 15: /* Roll-over */
2908 break;
2910 default: /* Reserved */
2911 LOG_WARNING("trace is suspect: invalid trace message byte");
2912 continue;
2916 /* If we don't have the current_pc yet, but we did get the branch target
2917 * (either from the trace buffer on indirect branch, or from a checkpoint reg),
2918 * then we can start displaying instructions at the next iteration, with
2919 * branch_target as the starting point.
2921 if (current_pc == 0)
2923 current_pc = branch_target; /* remains 0 unless branch_target obtained */
2924 continue;
2927 /* We have current_pc. Read and display the instructions from the image.
2928 * First, display count instructions (lower nybble of message byte). */
2929 count = trace_data->entries[i].data & 0x0f;
2930 for (j = 0; j < count; j++)
2932 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2933 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2936 /* An additional instruction is implicitly added to count for
2937 * rollover and some exceptions: undef, swi, prefetch abort. */
2938 if ((trace_msg_type == 15) || (exception > 0 && exception < 4))
2940 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2941 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2944 if (trace_msg_type == 15) /* rollover */
2945 continue;
2947 if (exception)
2949 command_print(cmd_ctx, "--- exception %i ---", exception);
2950 continue;
2953 /* not exception or rollover; next instruction is a branch and is
2954 * not included in the count */
2955 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2957 /* for direct branches, extract branch destination from instruction */
2958 if ((trace_msg_type == 8) || (trace_msg_type == 12))
2960 retval = xscale_read_instruction(target, current_pc, &instruction);
2961 if (retval == ERROR_OK)
2962 current_pc = instruction.info.b_bl_bx_blx.target_address;
2963 else
2964 current_pc = 0; /* branch destination unknown */
2966 /* direct branch w/ checkpoint; can also get from checkpoint reg */
2967 if (trace_msg_type == 12)
2969 if (current_pc == 0)
2970 current_pc = chkpt_reg;
2971 else if (current_pc != chkpt_reg) /* sanity check */
2972 LOG_WARNING("trace is suspect: checkpoint register "
2973 "inconsistent with adddress from image");
2976 if (current_pc == 0)
2977 command_print(cmd_ctx, "address unknown");
2979 continue;
2982 /* indirect branch; the branch destination was read from trace buffer */
2983 if ((trace_msg_type == 9) || (trace_msg_type == 13))
2985 current_pc = branch_target;
2987 /* sanity check (checkpoint reg is redundant) */
2988 if ((trace_msg_type == 13) && (chkpt_reg != branch_target))
2989 LOG_WARNING("trace is suspect: checkpoint register "
2990 "inconsistent with address from trace buffer");
2993 } /* END: for (i = 0; i < trace_data->depth; i++) */
2995 breakpoint_pc = trace_data->last_instruction; /* used below */
2996 trace_data = trace_data->next;
2998 } /* END: while (trace_data) */
3000 /* Finally... display all instructions up to the value of the pc when the
3001 * debug break occurred (saved when trace data was collected from target).
3002 * This is necessary because the trace only records execution branches and 16
3003 * consecutive instructions (rollovers), so last few typically missed.
3005 if (current_pc == 0)
3006 return ERROR_OK; /* current_pc was never found */
3008 /* how many instructions remaining? */
3009 int gap_count = (breakpoint_pc - current_pc) /
3010 (xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2);
3012 /* should never be negative or over 16, but verify */
3013 if (gap_count < 0 || gap_count > 16)
3015 LOG_WARNING("trace is suspect: excessive gap at end of trace");
3016 return ERROR_OK; /* bail; large number or negative value no good */
3019 /* display remaining instructions */
3020 for (i = 0; i < gap_count; i++)
3022 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
3023 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
3026 return ERROR_OK;
3029 static const struct reg_arch_type xscale_reg_type = {
3030 .get = xscale_get_reg,
3031 .set = xscale_set_reg,
3034 static void xscale_build_reg_cache(struct target *target)
3036 struct xscale_common *xscale = target_to_xscale(target);
3037 struct arm *arm = &xscale->arm;
3038 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
3039 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
3040 int i;
3041 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
3043 (*cache_p) = arm_build_reg_cache(target, arm);
3045 (*cache_p)->next = malloc(sizeof(struct reg_cache));
3046 cache_p = &(*cache_p)->next;
3048 /* fill in values for the xscale reg cache */
3049 (*cache_p)->name = "XScale registers";
3050 (*cache_p)->next = NULL;
3051 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
3052 (*cache_p)->num_regs = num_regs;
3054 for (i = 0; i < num_regs; i++)
3056 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
3057 (*cache_p)->reg_list[i].value = calloc(4, 1);
3058 (*cache_p)->reg_list[i].dirty = 0;
3059 (*cache_p)->reg_list[i].valid = 0;
3060 (*cache_p)->reg_list[i].size = 32;
3061 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
3062 (*cache_p)->reg_list[i].type = &xscale_reg_type;
3063 arch_info[i] = xscale_reg_arch_info[i];
3064 arch_info[i].target = target;
3067 xscale->reg_cache = (*cache_p);
3070 static int xscale_init_target(struct command_context *cmd_ctx,
3071 struct target *target)
3073 xscale_build_reg_cache(target);
3074 return ERROR_OK;
3077 static int xscale_init_arch_info(struct target *target,
3078 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
3080 struct arm *arm;
3081 uint32_t high_reset_branch, low_reset_branch;
3082 int i;
3084 arm = &xscale->arm;
3086 /* store architecture specfic data */
3087 xscale->common_magic = XSCALE_COMMON_MAGIC;
3089 /* we don't really *need* a variant param ... */
3090 if (variant) {
3091 int ir_length = 0;
3093 if (strcmp(variant, "pxa250") == 0
3094 || strcmp(variant, "pxa255") == 0
3095 || strcmp(variant, "pxa26x") == 0)
3096 ir_length = 5;
3097 else if (strcmp(variant, "pxa27x") == 0
3098 || strcmp(variant, "ixp42x") == 0
3099 || strcmp(variant, "ixp45x") == 0
3100 || strcmp(variant, "ixp46x") == 0)
3101 ir_length = 7;
3102 else if (strcmp(variant, "pxa3xx") == 0)
3103 ir_length = 11;
3104 else
3105 LOG_WARNING("%s: unrecognized variant %s",
3106 tap->dotted_name, variant);
3108 if (ir_length && ir_length != tap->ir_length) {
3109 LOG_WARNING("%s: IR length for %s is %d; fixing",
3110 tap->dotted_name, variant, ir_length);
3111 tap->ir_length = ir_length;
3115 /* PXA3xx shifts the JTAG instructions */
3116 if (tap->ir_length == 11)
3117 xscale->xscale_variant = XSCALE_PXA3XX;
3118 else
3119 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
3121 /* the debug handler isn't installed (and thus not running) at this time */
3122 xscale->handler_address = 0xfe000800;
3124 /* clear the vectors we keep locally for reference */
3125 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
3126 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
3128 /* no user-specified vectors have been configured yet */
3129 xscale->static_low_vectors_set = 0x0;
3130 xscale->static_high_vectors_set = 0x0;
3132 /* calculate branches to debug handler */
3133 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
3134 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
3136 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
3137 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
3139 for (i = 1; i <= 7; i++)
3141 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3142 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3145 /* 64kB aligned region used for DCache cleaning */
3146 xscale->cache_clean_address = 0xfffe0000;
3148 xscale->hold_rst = 0;
3149 xscale->external_debug_break = 0;
3151 xscale->ibcr_available = 2;
3152 xscale->ibcr0_used = 0;
3153 xscale->ibcr1_used = 0;
3155 xscale->dbr_available = 2;
3156 xscale->dbr0_used = 0;
3157 xscale->dbr1_used = 0;
3159 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
3160 target_name(target));
3162 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3163 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3165 xscale->vector_catch = 0x1;
3167 xscale->trace.data = NULL;
3168 xscale->trace.image = NULL;
3169 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3170 xscale->trace.buffer_fill = 0;
3171 xscale->trace.fill_counter = 0;
3173 /* prepare ARMv4/5 specific information */
3174 arm->arch_info = xscale;
3175 arm->read_core_reg = xscale_read_core_reg;
3176 arm->write_core_reg = xscale_write_core_reg;
3177 arm->full_context = xscale_full_context;
3179 arm_init_arch_info(target, arm);
3181 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3182 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3183 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3184 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3185 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3186 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3187 xscale->armv4_5_mmu.has_tiny_pages = 1;
3188 xscale->armv4_5_mmu.mmu_enabled = 0;
3190 return ERROR_OK;
3193 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3195 struct xscale_common *xscale;
3197 if (sizeof xscale_debug_handler - 1 > 0x800) {
3198 LOG_ERROR("debug_handler.bin: larger than 2kb");
3199 return ERROR_FAIL;
3202 xscale = calloc(1, sizeof(*xscale));
3203 if (!xscale)
3204 return ERROR_FAIL;
3206 return xscale_init_arch_info(target, xscale, target->tap,
3207 target->variant);
3210 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3212 struct target *target = NULL;
3213 struct xscale_common *xscale;
3214 int retval;
3215 uint32_t handler_address;
3217 if (CMD_ARGC < 2)
3219 return ERROR_COMMAND_SYNTAX_ERROR;
3222 if ((target = get_target(CMD_ARGV[0])) == NULL)
3224 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3225 return ERROR_FAIL;
3228 xscale = target_to_xscale(target);
3229 retval = xscale_verify_pointer(CMD_CTX, xscale);
3230 if (retval != ERROR_OK)
3231 return retval;
3233 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3235 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3236 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3238 xscale->handler_address = handler_address;
3240 else
3242 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3243 return ERROR_FAIL;
3246 return ERROR_OK;
3249 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3251 struct target *target = NULL;
3252 struct xscale_common *xscale;
3253 int retval;
3254 uint32_t cache_clean_address;
3256 if (CMD_ARGC < 2)
3258 return ERROR_COMMAND_SYNTAX_ERROR;
3261 target = get_target(CMD_ARGV[0]);
3262 if (target == NULL)
3264 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3265 return ERROR_FAIL;
3267 xscale = target_to_xscale(target);
3268 retval = xscale_verify_pointer(CMD_CTX, xscale);
3269 if (retval != ERROR_OK)
3270 return retval;
3272 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3274 if (cache_clean_address & 0xffff)
3276 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3278 else
3280 xscale->cache_clean_address = cache_clean_address;
3283 return ERROR_OK;
3286 COMMAND_HANDLER(xscale_handle_cache_info_command)
3288 struct target *target = get_current_target(CMD_CTX);
3289 struct xscale_common *xscale = target_to_xscale(target);
3290 int retval;
3292 retval = xscale_verify_pointer(CMD_CTX, xscale);
3293 if (retval != ERROR_OK)
3294 return retval;
3296 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3299 static int xscale_virt2phys(struct target *target,
3300 uint32_t virtual, uint32_t *physical)
3302 struct xscale_common *xscale = target_to_xscale(target);
3303 uint32_t cb;
3305 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3306 LOG_ERROR(xscale_not);
3307 return ERROR_TARGET_INVALID;
3310 uint32_t ret;
3311 int retval = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu,
3312 virtual, &cb, &ret);
3313 if (retval != ERROR_OK)
3314 return retval;
3315 *physical = ret;
3316 return ERROR_OK;
3319 static int xscale_mmu(struct target *target, int *enabled)
3321 struct xscale_common *xscale = target_to_xscale(target);
3323 if (target->state != TARGET_HALTED)
3325 LOG_ERROR("Target not halted");
3326 return ERROR_TARGET_INVALID;
3328 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3329 return ERROR_OK;
3332 COMMAND_HANDLER(xscale_handle_mmu_command)
3334 struct target *target = get_current_target(CMD_CTX);
3335 struct xscale_common *xscale = target_to_xscale(target);
3336 int retval;
3338 retval = xscale_verify_pointer(CMD_CTX, xscale);
3339 if (retval != ERROR_OK)
3340 return retval;
3342 if (target->state != TARGET_HALTED)
3344 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3345 return ERROR_OK;
3348 if (CMD_ARGC >= 1)
3350 bool enable;
3351 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3352 if (enable)
3353 xscale_enable_mmu_caches(target, 1, 0, 0);
3354 else
3355 xscale_disable_mmu_caches(target, 1, 0, 0);
3356 xscale->armv4_5_mmu.mmu_enabled = enable;
3359 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3361 return ERROR_OK;
3364 COMMAND_HANDLER(xscale_handle_idcache_command)
3366 struct target *target = get_current_target(CMD_CTX);
3367 struct xscale_common *xscale = target_to_xscale(target);
3369 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3370 if (retval != ERROR_OK)
3371 return retval;
3373 if (target->state != TARGET_HALTED)
3375 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3376 return ERROR_OK;
3379 bool icache = false;
3380 if (strcmp(CMD_NAME, "icache") == 0)
3381 icache = true;
3382 if (CMD_ARGC >= 1)
3384 bool enable;
3385 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3386 if (icache) {
3387 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3388 if (enable)
3389 xscale_enable_mmu_caches(target, 0, 0, 1);
3390 else
3391 xscale_disable_mmu_caches(target, 0, 0, 1);
3392 } else {
3393 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3394 if (enable)
3395 xscale_enable_mmu_caches(target, 0, 1, 0);
3396 else
3397 xscale_disable_mmu_caches(target, 0, 1, 0);
3401 bool enabled = icache ?
3402 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3403 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3404 const char *msg = enabled ? "enabled" : "disabled";
3405 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3407 return ERROR_OK;
3410 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3412 struct target *target = get_current_target(CMD_CTX);
3413 struct xscale_common *xscale = target_to_xscale(target);
3414 int retval;
3416 retval = xscale_verify_pointer(CMD_CTX, xscale);
3417 if (retval != ERROR_OK)
3418 return retval;
3420 if (CMD_ARGC < 1)
3422 return ERROR_COMMAND_SYNTAX_ERROR;
3424 else
3426 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3427 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3428 xscale_write_dcsr(target, -1, -1);
3431 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3433 return ERROR_OK;
3437 COMMAND_HANDLER(xscale_handle_vector_table_command)
3439 struct target *target = get_current_target(CMD_CTX);
3440 struct xscale_common *xscale = target_to_xscale(target);
3441 int err = 0;
3442 int retval;
3444 retval = xscale_verify_pointer(CMD_CTX, xscale);
3445 if (retval != ERROR_OK)
3446 return retval;
3448 if (CMD_ARGC == 0) /* print current settings */
3450 int idx;
3452 command_print(CMD_CTX, "active user-set static vectors:");
3453 for (idx = 1; idx < 8; idx++)
3454 if (xscale->static_low_vectors_set & (1 << idx))
3455 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3456 for (idx = 1; idx < 8; idx++)
3457 if (xscale->static_high_vectors_set & (1 << idx))
3458 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3459 return ERROR_OK;
3462 if (CMD_ARGC != 3)
3463 err = 1;
3464 else
3466 int idx;
3467 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3468 uint32_t vec;
3469 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3471 if (idx < 1 || idx >= 8)
3472 err = 1;
3474 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3476 xscale->static_low_vectors_set |= (1<<idx);
3477 xscale->static_low_vectors[idx] = vec;
3479 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3481 xscale->static_high_vectors_set |= (1<<idx);
3482 xscale->static_high_vectors[idx] = vec;
3484 else
3485 err = 1;
3488 if (err)
3489 return ERROR_COMMAND_SYNTAX_ERROR;
3491 return ERROR_OK;
3495 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3497 struct target *target = get_current_target(CMD_CTX);
3498 struct xscale_common *xscale = target_to_xscale(target);
3499 uint32_t dcsr_value;
3500 int retval;
3502 retval = xscale_verify_pointer(CMD_CTX, xscale);
3503 if (retval != ERROR_OK)
3504 return retval;
3506 if (target->state != TARGET_HALTED)
3508 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3509 return ERROR_OK;
3512 if (CMD_ARGC >= 1)
3514 if (strcmp("enable", CMD_ARGV[0]) == 0)
3515 xscale->trace.mode = XSCALE_TRACE_WRAP; /* default */
3516 else if (strcmp("disable", CMD_ARGV[0]) == 0)
3517 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3518 else
3519 return ERROR_COMMAND_SYNTAX_ERROR;
3522 if (CMD_ARGC >= 2 && xscale->trace.mode != XSCALE_TRACE_DISABLED)
3524 if (strcmp("fill", CMD_ARGV[1]) == 0)
3526 int buffcount = 1; /* default */
3527 if (CMD_ARGC >= 3)
3528 COMMAND_PARSE_NUMBER(int, CMD_ARGV[2], buffcount);
3529 if (buffcount < 1) /* invalid */
3531 command_print(CMD_CTX, "fill buffer count must be > 0");
3532 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3533 return ERROR_COMMAND_SYNTAX_ERROR;
3535 xscale->trace.buffer_fill = buffcount;
3536 xscale->trace.mode = XSCALE_TRACE_FILL;
3538 else if (strcmp("wrap", CMD_ARGV[1]) == 0)
3539 xscale->trace.mode = XSCALE_TRACE_WRAP;
3540 else
3542 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3543 return ERROR_COMMAND_SYNTAX_ERROR;
3547 if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
3549 char fill_string[12];
3550 sprintf(fill_string, "fill %" PRId32, xscale->trace.buffer_fill);
3551 command_print(CMD_CTX, "trace buffer enabled (%s)",
3552 (xscale->trace.mode == XSCALE_TRACE_FILL)
3553 ? fill_string : "wrap");
3555 else
3556 command_print(CMD_CTX, "trace buffer disabled");
3558 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3559 if (xscale->trace.mode == XSCALE_TRACE_FILL)
3560 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3561 else
3562 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3564 return ERROR_OK;
3567 COMMAND_HANDLER(xscale_handle_trace_image_command)
3569 struct target *target = get_current_target(CMD_CTX);
3570 struct xscale_common *xscale = target_to_xscale(target);
3571 int retval;
3573 if (CMD_ARGC < 1)
3575 return ERROR_COMMAND_SYNTAX_ERROR;
3578 retval = xscale_verify_pointer(CMD_CTX, xscale);
3579 if (retval != ERROR_OK)
3580 return retval;
3582 if (xscale->trace.image)
3584 image_close(xscale->trace.image);
3585 free(xscale->trace.image);
3586 command_print(CMD_CTX, "previously loaded image found and closed");
3589 xscale->trace.image = malloc(sizeof(struct image));
3590 xscale->trace.image->base_address_set = 0;
3591 xscale->trace.image->start_address_set = 0;
3593 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3594 if (CMD_ARGC >= 2)
3596 xscale->trace.image->base_address_set = 1;
3597 COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
3599 else
3601 xscale->trace.image->base_address_set = 0;
3604 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3606 free(xscale->trace.image);
3607 xscale->trace.image = NULL;
3608 return ERROR_OK;
3611 return ERROR_OK;
3614 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3616 struct target *target = get_current_target(CMD_CTX);
3617 struct xscale_common *xscale = target_to_xscale(target);
3618 struct xscale_trace_data *trace_data;
3619 struct fileio file;
3620 int retval;
3622 retval = xscale_verify_pointer(CMD_CTX, xscale);
3623 if (retval != ERROR_OK)
3624 return retval;
3626 if (target->state != TARGET_HALTED)
3628 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3629 return ERROR_OK;
3632 if (CMD_ARGC < 1)
3634 return ERROR_COMMAND_SYNTAX_ERROR;
3637 trace_data = xscale->trace.data;
3639 if (!trace_data)
3641 command_print(CMD_CTX, "no trace data collected");
3642 return ERROR_OK;
3645 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3647 return ERROR_OK;
3650 while (trace_data)
3652 int i;
3654 fileio_write_u32(&file, trace_data->chkpt0);
3655 fileio_write_u32(&file, trace_data->chkpt1);
3656 fileio_write_u32(&file, trace_data->last_instruction);
3657 fileio_write_u32(&file, trace_data->depth);
3659 for (i = 0; i < trace_data->depth; i++)
3660 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3662 trace_data = trace_data->next;
3665 fileio_close(&file);
3667 return ERROR_OK;
3670 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3672 struct target *target = get_current_target(CMD_CTX);
3673 struct xscale_common *xscale = target_to_xscale(target);
3674 int retval;
3676 retval = xscale_verify_pointer(CMD_CTX, xscale);
3677 if (retval != ERROR_OK)
3678 return retval;
3680 xscale_analyze_trace(target, CMD_CTX);
3682 return ERROR_OK;
3685 COMMAND_HANDLER(xscale_handle_cp15)
3687 struct target *target = get_current_target(CMD_CTX);
3688 struct xscale_common *xscale = target_to_xscale(target);
3689 int retval;
3691 retval = xscale_verify_pointer(CMD_CTX, xscale);
3692 if (retval != ERROR_OK)
3693 return retval;
3695 if (target->state != TARGET_HALTED)
3697 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3698 return ERROR_OK;
3700 uint32_t reg_no = 0;
3701 struct reg *reg = NULL;
3702 if (CMD_ARGC > 0)
3704 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3705 /*translate from xscale cp15 register no to openocd register*/
3706 switch (reg_no)
3708 case 0:
3709 reg_no = XSCALE_MAINID;
3710 break;
3711 case 1:
3712 reg_no = XSCALE_CTRL;
3713 break;
3714 case 2:
3715 reg_no = XSCALE_TTB;
3716 break;
3717 case 3:
3718 reg_no = XSCALE_DAC;
3719 break;
3720 case 5:
3721 reg_no = XSCALE_FSR;
3722 break;
3723 case 6:
3724 reg_no = XSCALE_FAR;
3725 break;
3726 case 13:
3727 reg_no = XSCALE_PID;
3728 break;
3729 case 15:
3730 reg_no = XSCALE_CPACCESS;
3731 break;
3732 default:
3733 command_print(CMD_CTX, "invalid register number");
3734 return ERROR_COMMAND_SYNTAX_ERROR;
3736 reg = &xscale->reg_cache->reg_list[reg_no];
3739 if (CMD_ARGC == 1)
3741 uint32_t value;
3743 /* read cp15 control register */
3744 xscale_get_reg(reg);
3745 value = buf_get_u32(reg->value, 0, 32);
3746 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3748 else if (CMD_ARGC == 2)
3750 uint32_t value;
3751 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3753 /* send CP write request (command 0x41) */
3754 xscale_send_u32(target, 0x41);
3756 /* send CP register number */
3757 xscale_send_u32(target, reg_no);
3759 /* send CP register value */
3760 xscale_send_u32(target, value);
3762 /* execute cpwait to ensure outstanding operations complete */
3763 xscale_send_u32(target, 0x53);
3765 else
3767 return ERROR_COMMAND_SYNTAX_ERROR;
3770 return ERROR_OK;
3773 static const struct command_registration xscale_exec_command_handlers[] = {
3775 .name = "cache_info",
3776 .handler = xscale_handle_cache_info_command,
3777 .mode = COMMAND_EXEC,
3778 .help = "display information about CPU caches",
3781 .name = "mmu",
3782 .handler = xscale_handle_mmu_command,
3783 .mode = COMMAND_EXEC,
3784 .help = "enable or disable the MMU",
3785 .usage = "['enable'|'disable']",
3788 .name = "icache",
3789 .handler = xscale_handle_idcache_command,
3790 .mode = COMMAND_EXEC,
3791 .help = "display ICache state, optionally enabling or "
3792 "disabling it",
3793 .usage = "['enable'|'disable']",
3796 .name = "dcache",
3797 .handler = xscale_handle_idcache_command,
3798 .mode = COMMAND_EXEC,
3799 .help = "display DCache state, optionally enabling or "
3800 "disabling it",
3801 .usage = "['enable'|'disable']",
3804 .name = "vector_catch",
3805 .handler = xscale_handle_vector_catch_command,
3806 .mode = COMMAND_EXEC,
3807 .help = "set or display 8-bit mask of vectors "
3808 "that should trigger debug entry",
3809 .usage = "[mask]",
3812 .name = "vector_table",
3813 .handler = xscale_handle_vector_table_command,
3814 .mode = COMMAND_EXEC,
3815 .help = "set vector table entry in mini-ICache, "
3816 "or display current tables",
3817 .usage = "[('high'|'low') index code]",
3820 .name = "trace_buffer",
3821 .handler = xscale_handle_trace_buffer_command,
3822 .mode = COMMAND_EXEC,
3823 .help = "display trace buffer status, enable or disable "
3824 "tracing, and optionally reconfigure trace mode",
3825 .usage = "['enable'|'disable' ['fill' [number]|'wrap']]",
3828 .name = "dump_trace",
3829 .handler = xscale_handle_dump_trace_command,
3830 .mode = COMMAND_EXEC,
3831 .help = "dump content of trace buffer to file",
3832 .usage = "filename",
3835 .name = "analyze_trace",
3836 .handler = xscale_handle_analyze_trace_buffer_command,
3837 .mode = COMMAND_EXEC,
3838 .help = "analyze content of trace buffer",
3839 .usage = "",
3842 .name = "trace_image",
3843 .handler = xscale_handle_trace_image_command,
3844 .mode = COMMAND_EXEC,
3845 .help = "load image from file to address (default 0)",
3846 .usage = "filename [offset [filetype]]",
3849 .name = "cp15",
3850 .handler = xscale_handle_cp15,
3851 .mode = COMMAND_EXEC,
3852 .help = "Read or write coprocessor 15 register.",
3853 .usage = "register [value]",
3855 COMMAND_REGISTRATION_DONE
3857 static const struct command_registration xscale_any_command_handlers[] = {
3859 .name = "debug_handler",
3860 .handler = xscale_handle_debug_handler_command,
3861 .mode = COMMAND_ANY,
3862 .help = "Change address used for debug handler.",
3863 .usage = "<target> <address>",
3866 .name = "cache_clean_address",
3867 .handler = xscale_handle_cache_clean_address_command,
3868 .mode = COMMAND_ANY,
3869 .help = "Change address used for cleaning data cache.",
3870 .usage = "address",
3873 .chain = xscale_exec_command_handlers,
3875 COMMAND_REGISTRATION_DONE
3877 static const struct command_registration xscale_command_handlers[] = {
3879 .chain = arm_command_handlers,
3882 .name = "xscale",
3883 .mode = COMMAND_ANY,
3884 .help = "xscale command group",
3885 .usage = "",
3886 .chain = xscale_any_command_handlers,
3888 COMMAND_REGISTRATION_DONE
3891 struct target_type xscale_target =
3893 .name = "xscale",
3895 .poll = xscale_poll,
3896 .arch_state = xscale_arch_state,
3898 .target_request_data = NULL,
3900 .halt = xscale_halt,
3901 .resume = xscale_resume,
3902 .step = xscale_step,
3904 .assert_reset = xscale_assert_reset,
3905 .deassert_reset = xscale_deassert_reset,
3906 .soft_reset_halt = NULL,
3908 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3909 .get_gdb_reg_list = arm_get_gdb_reg_list,
3911 .read_memory = xscale_read_memory,
3912 .read_phys_memory = xscale_read_phys_memory,
3913 .write_memory = xscale_write_memory,
3914 .write_phys_memory = xscale_write_phys_memory,
3915 .bulk_write_memory = xscale_bulk_write_memory,
3917 .checksum_memory = arm_checksum_memory,
3918 .blank_check_memory = arm_blank_check_memory,
3920 .run_algorithm = armv4_5_run_algorithm,
3922 .add_breakpoint = xscale_add_breakpoint,
3923 .remove_breakpoint = xscale_remove_breakpoint,
3924 .add_watchpoint = xscale_add_watchpoint,
3925 .remove_watchpoint = xscale_remove_watchpoint,
3927 .commands = xscale_command_handlers,
3928 .target_create = xscale_target_create,
3929 .init_target = xscale_init_target,
3931 .virt2phys = xscale_virt2phys,
3932 .mmu = xscale_mmu