1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
31 #include "breakpoints.h"
33 #include "target_type.h"
35 #include "arm_simulator.h"
36 #include "arm_disassembler.h"
37 #include <helper/time_support.h>
40 #include "arm_opcodes.h"
44 * Important XScale documents available as of October 2009 include:
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
59 * Chip-specific microarchitecture documents may also be useful.
62 /* forward declarations */
63 static int xscale_resume(struct target
*, int current
,
64 uint32_t address
, int handle_breakpoints
, int debug_execution
);
65 static int xscale_debug_entry(struct target
*);
66 static int xscale_restore_banked(struct target
*);
67 static int xscale_get_reg(struct reg
*reg
);
68 static int xscale_set_reg(struct reg
*reg
, uint8_t *buf
);
69 static int xscale_set_breakpoint(struct target
*, struct breakpoint
*);
70 static int xscale_set_watchpoint(struct target
*, struct watchpoint
*);
71 static int xscale_unset_breakpoint(struct target
*, struct breakpoint
*);
72 static int xscale_read_trace(struct target
*);
74 /* This XScale "debug handler" is loaded into the processor's
75 * mini-ICache, which is 2K of code writable only via JTAG.
77 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
78 * binary files cleanly. It's string oriented, and terminates them
79 * with a NUL character. Better would be to generate the constants
80 * and let other code decide names, scoping, and other housekeeping.
82 static /* unsigned const char xscale_debug_handler[] = ... */
83 #include "xscale_debug.h"
85 static char *const xscale_reg_list
[] = {
86 "XSCALE_MAINID", /* 0 */
96 "XSCALE_IBCR0", /* 10 */
106 "XSCALE_RX", /* 20 */
110 static const struct xscale_reg xscale_reg_arch_info
[] = {
111 {XSCALE_MAINID
, NULL
},
112 {XSCALE_CACHETYPE
, NULL
},
114 {XSCALE_AUXCTRL
, NULL
},
120 {XSCALE_CPACCESS
, NULL
},
121 {XSCALE_IBCR0
, NULL
},
122 {XSCALE_IBCR1
, NULL
},
125 {XSCALE_DBCON
, NULL
},
126 {XSCALE_TBREG
, NULL
},
127 {XSCALE_CHKPT0
, NULL
},
128 {XSCALE_CHKPT1
, NULL
},
129 {XSCALE_DCSR
, NULL
}, /* DCSR accessed via JTAG or SW */
130 {-1, NULL
}, /* TX accessed via JTAG */
131 {-1, NULL
}, /* RX accessed via JTAG */
132 {-1, NULL
}, /* TXRXCTRL implicit access via JTAG */
135 /* convenience wrapper to access XScale specific registers */
136 static int xscale_set_reg_u32(struct reg
*reg
, uint32_t value
)
140 buf_set_u32(buf
, 0, 32, value
);
142 return xscale_set_reg(reg
, buf
);
145 static const char xscale_not
[] = "target is not an XScale";
147 static int xscale_verify_pointer(struct command_context
*cmd_ctx
,
148 struct xscale_common
*xscale
)
150 if (xscale
->common_magic
!= XSCALE_COMMON_MAGIC
) {
151 command_print(cmd_ctx
, xscale_not
);
152 return ERROR_TARGET_INVALID
;
157 static int xscale_jtag_set_instr(struct jtag_tap
*tap
, uint32_t new_instr
, tap_state_t end_state
)
161 if (buf_get_u32(tap
->cur_instr
, 0, tap
->ir_length
) != new_instr
) {
162 struct scan_field field
;
165 memset(&field
, 0, sizeof field
);
166 field
.num_bits
= tap
->ir_length
;
167 field
.out_value
= scratch
;
168 buf_set_u32(scratch
, 0, field
.num_bits
, new_instr
);
170 jtag_add_ir_scan(tap
, &field
, end_state
);
176 static int xscale_read_dcsr(struct target
*target
)
178 struct xscale_common
*xscale
= target_to_xscale(target
);
180 struct scan_field fields
[3];
181 uint8_t field0
= 0x0;
182 uint8_t field0_check_value
= 0x2;
183 uint8_t field0_check_mask
= 0x7;
184 uint8_t field2
= 0x0;
185 uint8_t field2_check_value
= 0x0;
186 uint8_t field2_check_mask
= 0x1;
188 xscale_jtag_set_instr(target
->tap
,
189 XSCALE_SELDCSR
<< xscale
->xscale_variant
,
192 buf_set_u32(&field0
, 1, 1, xscale
->hold_rst
);
193 buf_set_u32(&field0
, 2, 1, xscale
->external_debug_break
);
195 memset(&fields
, 0, sizeof fields
);
197 fields
[0].num_bits
= 3;
198 fields
[0].out_value
= &field0
;
200 fields
[0].in_value
= &tmp
;
202 fields
[1].num_bits
= 32;
203 fields
[1].in_value
= xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
;
205 fields
[2].num_bits
= 1;
206 fields
[2].out_value
= &field2
;
208 fields
[2].in_value
= &tmp2
;
210 jtag_add_dr_scan(target
->tap
, 3, fields
, TAP_DRPAUSE
);
212 jtag_check_value_mask(fields
+ 0, &field0_check_value
, &field0_check_mask
);
213 jtag_check_value_mask(fields
+ 2, &field2_check_value
, &field2_check_mask
);
215 retval
= jtag_execute_queue();
216 if (retval
!= ERROR_OK
) {
217 LOG_ERROR("JTAG error while reading DCSR");
221 xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].dirty
= 0;
222 xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].valid
= 1;
224 /* write the register with the value we just read
225 * on this second pass, only the first bit of field0 is guaranteed to be 0)
227 field0_check_mask
= 0x1;
228 fields
[1].out_value
= xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
;
229 fields
[1].in_value
= NULL
;
231 jtag_add_dr_scan(target
->tap
, 3, fields
, TAP_DRPAUSE
);
233 /* DANGER!!! this must be here. It will make sure that the arguments
234 * to jtag_set_check_value() does not go out of scope! */
235 return jtag_execute_queue();
239 static void xscale_getbuf(jtag_callback_data_t arg
)
241 uint8_t *in
= (uint8_t *)arg
;
242 *((uint32_t *)arg
) = buf_get_u32(in
, 0, 32);
245 static int xscale_receive(struct target
*target
, uint32_t *buffer
, int num_words
)
248 return ERROR_COMMAND_SYNTAX_ERROR
;
250 struct xscale_common
*xscale
= target_to_xscale(target
);
251 int retval
= ERROR_OK
;
253 struct scan_field fields
[3];
254 uint8_t *field0
= malloc(num_words
* 1);
255 uint8_t field0_check_value
= 0x2;
256 uint8_t field0_check_mask
= 0x6;
257 uint32_t *field1
= malloc(num_words
* 4);
258 uint8_t field2_check_value
= 0x0;
259 uint8_t field2_check_mask
= 0x1;
261 int words_scheduled
= 0;
264 path
[0] = TAP_DRSELECT
;
265 path
[1] = TAP_DRCAPTURE
;
266 path
[2] = TAP_DRSHIFT
;
268 memset(&fields
, 0, sizeof fields
);
270 fields
[0].num_bits
= 3;
272 fields
[0].in_value
= &tmp
;
273 fields
[0].check_value
= &field0_check_value
;
274 fields
[0].check_mask
= &field0_check_mask
;
276 fields
[1].num_bits
= 32;
278 fields
[2].num_bits
= 1;
280 fields
[2].in_value
= &tmp2
;
281 fields
[2].check_value
= &field2_check_value
;
282 fields
[2].check_mask
= &field2_check_mask
;
284 xscale_jtag_set_instr(target
->tap
,
285 XSCALE_DBGTX
<< xscale
->xscale_variant
,
287 jtag_add_runtest(1, TAP_IDLE
); /* ensures that we're in the TAP_IDLE state as the above
290 /* repeat until all words have been collected */
292 while (words_done
< num_words
) {
295 for (i
= words_done
; i
< num_words
; i
++) {
296 fields
[0].in_value
= &field0
[i
];
298 jtag_add_pathmove(3, path
);
300 fields
[1].in_value
= (uint8_t *)(field1
+ i
);
302 jtag_add_dr_scan_check(target
->tap
, 3, fields
, TAP_IDLE
);
304 jtag_add_callback(xscale_getbuf
, (jtag_callback_data_t
)(field1
+ i
));
309 retval
= jtag_execute_queue();
310 if (retval
!= ERROR_OK
) {
311 LOG_ERROR("JTAG error while receiving data from debug handler");
315 /* examine results */
316 for (i
= words_done
; i
< num_words
; i
++) {
317 if (!(field0
[i
] & 1)) {
318 /* move backwards if necessary */
320 for (j
= i
; j
< num_words
- 1; j
++) {
321 field0
[j
] = field0
[j
+ 1];
322 field1
[j
] = field1
[j
+ 1];
327 if (words_scheduled
== 0) {
328 if (attempts
++ == 1000) {
330 "Failed to receiving data from debug handler after 1000 attempts");
331 retval
= ERROR_TARGET_TIMEOUT
;
336 words_done
+= words_scheduled
;
339 for (i
= 0; i
< num_words
; i
++)
340 *(buffer
++) = buf_get_u32((uint8_t *)&field1
[i
], 0, 32);
347 static int xscale_read_tx(struct target
*target
, int consume
)
349 struct xscale_common
*xscale
= target_to_xscale(target
);
351 tap_state_t noconsume_path
[6];
353 struct timeval timeout
, now
;
354 struct scan_field fields
[3];
355 uint8_t field0_in
= 0x0;
356 uint8_t field0_check_value
= 0x2;
357 uint8_t field0_check_mask
= 0x6;
358 uint8_t field2_check_value
= 0x0;
359 uint8_t field2_check_mask
= 0x1;
361 xscale_jtag_set_instr(target
->tap
,
362 XSCALE_DBGTX
<< xscale
->xscale_variant
,
365 path
[0] = TAP_DRSELECT
;
366 path
[1] = TAP_DRCAPTURE
;
367 path
[2] = TAP_DRSHIFT
;
369 noconsume_path
[0] = TAP_DRSELECT
;
370 noconsume_path
[1] = TAP_DRCAPTURE
;
371 noconsume_path
[2] = TAP_DREXIT1
;
372 noconsume_path
[3] = TAP_DRPAUSE
;
373 noconsume_path
[4] = TAP_DREXIT2
;
374 noconsume_path
[5] = TAP_DRSHIFT
;
376 memset(&fields
, 0, sizeof fields
);
378 fields
[0].num_bits
= 3;
379 fields
[0].in_value
= &field0_in
;
381 fields
[1].num_bits
= 32;
382 fields
[1].in_value
= xscale
->reg_cache
->reg_list
[XSCALE_TX
].value
;
384 fields
[2].num_bits
= 1;
386 fields
[2].in_value
= &tmp
;
388 gettimeofday(&timeout
, NULL
);
389 timeval_add_time(&timeout
, 1, 0);
392 /* if we want to consume the register content (i.e. clear TX_READY),
393 * we have to go straight from Capture-DR to Shift-DR
394 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
397 jtag_add_pathmove(3, path
);
399 jtag_add_pathmove(ARRAY_SIZE(noconsume_path
), noconsume_path
);
401 jtag_add_dr_scan(target
->tap
, 3, fields
, TAP_IDLE
);
403 jtag_check_value_mask(fields
+ 0, &field0_check_value
, &field0_check_mask
);
404 jtag_check_value_mask(fields
+ 2, &field2_check_value
, &field2_check_mask
);
406 retval
= jtag_execute_queue();
407 if (retval
!= ERROR_OK
) {
408 LOG_ERROR("JTAG error while reading TX");
409 return ERROR_TARGET_TIMEOUT
;
412 gettimeofday(&now
, NULL
);
413 if ((now
.tv_sec
> timeout
.tv_sec
) ||
414 ((now
.tv_sec
== timeout
.tv_sec
) && (now
.tv_usec
> timeout
.tv_usec
))) {
415 LOG_ERROR("time out reading TX register");
416 return ERROR_TARGET_TIMEOUT
;
418 if (!((!(field0_in
& 1)) && consume
))
420 if (debug_level
>= 3) {
421 LOG_DEBUG("waiting 100ms");
422 alive_sleep(100); /* avoid flooding the logs */
428 if (!(field0_in
& 1))
429 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
434 static int xscale_write_rx(struct target
*target
)
436 struct xscale_common
*xscale
= target_to_xscale(target
);
438 struct timeval timeout
, now
;
439 struct scan_field fields
[3];
440 uint8_t field0_out
= 0x0;
441 uint8_t field0_in
= 0x0;
442 uint8_t field0_check_value
= 0x2;
443 uint8_t field0_check_mask
= 0x6;
444 uint8_t field2
= 0x0;
445 uint8_t field2_check_value
= 0x0;
446 uint8_t field2_check_mask
= 0x1;
448 xscale_jtag_set_instr(target
->tap
,
449 XSCALE_DBGRX
<< xscale
->xscale_variant
,
452 memset(&fields
, 0, sizeof fields
);
454 fields
[0].num_bits
= 3;
455 fields
[0].out_value
= &field0_out
;
456 fields
[0].in_value
= &field0_in
;
458 fields
[1].num_bits
= 32;
459 fields
[1].out_value
= xscale
->reg_cache
->reg_list
[XSCALE_RX
].value
;
461 fields
[2].num_bits
= 1;
462 fields
[2].out_value
= &field2
;
464 fields
[2].in_value
= &tmp
;
466 gettimeofday(&timeout
, NULL
);
467 timeval_add_time(&timeout
, 1, 0);
469 /* poll until rx_read is low */
470 LOG_DEBUG("polling RX");
472 jtag_add_dr_scan(target
->tap
, 3, fields
, TAP_IDLE
);
474 jtag_check_value_mask(fields
+ 0, &field0_check_value
, &field0_check_mask
);
475 jtag_check_value_mask(fields
+ 2, &field2_check_value
, &field2_check_mask
);
477 retval
= jtag_execute_queue();
478 if (retval
!= ERROR_OK
) {
479 LOG_ERROR("JTAG error while writing RX");
483 gettimeofday(&now
, NULL
);
484 if ((now
.tv_sec
> timeout
.tv_sec
) ||
485 ((now
.tv_sec
== timeout
.tv_sec
) && (now
.tv_usec
> timeout
.tv_usec
))) {
486 LOG_ERROR("time out writing RX register");
487 return ERROR_TARGET_TIMEOUT
;
489 if (!(field0_in
& 1))
491 if (debug_level
>= 3) {
492 LOG_DEBUG("waiting 100ms");
493 alive_sleep(100); /* avoid flooding the logs */
501 jtag_add_dr_scan(target
->tap
, 3, fields
, TAP_IDLE
);
503 retval
= jtag_execute_queue();
504 if (retval
!= ERROR_OK
) {
505 LOG_ERROR("JTAG error while writing RX");
512 /* send count elements of size byte to the debug handler */
513 static int xscale_send(struct target
*target
, const uint8_t *buffer
, int count
, int size
)
515 struct xscale_common
*xscale
= target_to_xscale(target
);
521 xscale_jtag_set_instr(target
->tap
,
522 XSCALE_DBGRX
<< xscale
->xscale_variant
,
530 int endianness
= target
->endianness
;
531 while (done_count
++ < count
) {
534 if (endianness
== TARGET_LITTLE_ENDIAN
)
535 t
[1] = le_to_h_u32(buffer
);
537 t
[1] = be_to_h_u32(buffer
);
540 if (endianness
== TARGET_LITTLE_ENDIAN
)
541 t
[1] = le_to_h_u16(buffer
);
543 t
[1] = be_to_h_u16(buffer
);
549 LOG_ERROR("BUG: size neither 4, 2 nor 1");
550 return ERROR_COMMAND_SYNTAX_ERROR
;
552 jtag_add_dr_out(target
->tap
,
560 retval
= jtag_execute_queue();
561 if (retval
!= ERROR_OK
) {
562 LOG_ERROR("JTAG error while sending data to debug handler");
569 static int xscale_send_u32(struct target
*target
, uint32_t value
)
571 struct xscale_common
*xscale
= target_to_xscale(target
);
573 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_RX
].value
, 0, 32, value
);
574 return xscale_write_rx(target
);
577 static int xscale_write_dcsr(struct target
*target
, int hold_rst
, int ext_dbg_brk
)
579 struct xscale_common
*xscale
= target_to_xscale(target
);
581 struct scan_field fields
[3];
582 uint8_t field0
= 0x0;
583 uint8_t field0_check_value
= 0x2;
584 uint8_t field0_check_mask
= 0x7;
585 uint8_t field2
= 0x0;
586 uint8_t field2_check_value
= 0x0;
587 uint8_t field2_check_mask
= 0x1;
590 xscale
->hold_rst
= hold_rst
;
592 if (ext_dbg_brk
!= -1)
593 xscale
->external_debug_break
= ext_dbg_brk
;
595 xscale_jtag_set_instr(target
->tap
,
596 XSCALE_SELDCSR
<< xscale
->xscale_variant
,
599 buf_set_u32(&field0
, 1, 1, xscale
->hold_rst
);
600 buf_set_u32(&field0
, 2, 1, xscale
->external_debug_break
);
602 memset(&fields
, 0, sizeof fields
);
604 fields
[0].num_bits
= 3;
605 fields
[0].out_value
= &field0
;
607 fields
[0].in_value
= &tmp
;
609 fields
[1].num_bits
= 32;
610 fields
[1].out_value
= xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
;
612 fields
[2].num_bits
= 1;
613 fields
[2].out_value
= &field2
;
615 fields
[2].in_value
= &tmp2
;
617 jtag_add_dr_scan(target
->tap
, 3, fields
, TAP_IDLE
);
619 jtag_check_value_mask(fields
+ 0, &field0_check_value
, &field0_check_mask
);
620 jtag_check_value_mask(fields
+ 2, &field2_check_value
, &field2_check_mask
);
622 retval
= jtag_execute_queue();
623 if (retval
!= ERROR_OK
) {
624 LOG_ERROR("JTAG error while writing DCSR");
628 xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].dirty
= 0;
629 xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].valid
= 1;
634 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
635 static unsigned int parity(unsigned int v
)
637 /* unsigned int ov = v; */
642 /* LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1); */
643 return (0x6996 >> v
) & 1;
646 static int xscale_load_ic(struct target
*target
, uint32_t va
, uint32_t buffer
[8])
648 struct xscale_common
*xscale
= target_to_xscale(target
);
652 struct scan_field fields
[2];
654 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32
"", va
);
657 xscale_jtag_set_instr(target
->tap
,
658 XSCALE_LDIC
<< xscale
->xscale_variant
,
661 /* CMD is b011 to load a cacheline into the Mini ICache.
662 * Loading into the main ICache is deprecated, and unused.
663 * It's followed by three zero bits, and 27 address bits.
665 buf_set_u32(&cmd
, 0, 6, 0x3);
667 /* virtual address of desired cache line */
668 buf_set_u32(packet
, 0, 27, va
>> 5);
670 memset(&fields
, 0, sizeof fields
);
672 fields
[0].num_bits
= 6;
673 fields
[0].out_value
= &cmd
;
675 fields
[1].num_bits
= 27;
676 fields
[1].out_value
= packet
;
678 jtag_add_dr_scan(target
->tap
, 2, fields
, TAP_IDLE
);
680 /* rest of packet is a cacheline: 8 instructions, with parity */
681 fields
[0].num_bits
= 32;
682 fields
[0].out_value
= packet
;
684 fields
[1].num_bits
= 1;
685 fields
[1].out_value
= &cmd
;
687 for (word
= 0; word
< 8; word
++) {
688 buf_set_u32(packet
, 0, 32, buffer
[word
]);
691 memcpy(&value
, packet
, sizeof(uint32_t));
694 jtag_add_dr_scan(target
->tap
, 2, fields
, TAP_IDLE
);
697 return jtag_execute_queue();
700 static int xscale_invalidate_ic_line(struct target
*target
, uint32_t va
)
702 struct xscale_common
*xscale
= target_to_xscale(target
);
705 struct scan_field fields
[2];
707 xscale_jtag_set_instr(target
->tap
,
708 XSCALE_LDIC
<< xscale
->xscale_variant
,
711 /* CMD for invalidate IC line b000, bits [6:4] b000 */
712 buf_set_u32(&cmd
, 0, 6, 0x0);
714 /* virtual address of desired cache line */
715 buf_set_u32(packet
, 0, 27, va
>> 5);
717 memset(&fields
, 0, sizeof fields
);
719 fields
[0].num_bits
= 6;
720 fields
[0].out_value
= &cmd
;
722 fields
[1].num_bits
= 27;
723 fields
[1].out_value
= packet
;
725 jtag_add_dr_scan(target
->tap
, 2, fields
, TAP_IDLE
);
730 static int xscale_update_vectors(struct target
*target
)
732 struct xscale_common
*xscale
= target_to_xscale(target
);
736 uint32_t low_reset_branch
, high_reset_branch
;
738 for (i
= 1; i
< 8; i
++) {
739 /* if there's a static vector specified for this exception, override */
740 if (xscale
->static_high_vectors_set
& (1 << i
))
741 xscale
->high_vectors
[i
] = xscale
->static_high_vectors
[i
];
743 retval
= target_read_u32(target
, 0xffff0000 + 4*i
, &xscale
->high_vectors
[i
]);
744 if (retval
== ERROR_TARGET_TIMEOUT
)
746 if (retval
!= ERROR_OK
) {
747 /* Some of these reads will fail as part of normal execution */
748 xscale
->high_vectors
[i
] = ARMV4_5_B(0xfffffe, 0);
753 for (i
= 1; i
< 8; i
++) {
754 if (xscale
->static_low_vectors_set
& (1 << i
))
755 xscale
->low_vectors
[i
] = xscale
->static_low_vectors
[i
];
757 retval
= target_read_u32(target
, 0x0 + 4*i
, &xscale
->low_vectors
[i
]);
758 if (retval
== ERROR_TARGET_TIMEOUT
)
760 if (retval
!= ERROR_OK
) {
761 /* Some of these reads will fail as part of normal execution */
762 xscale
->low_vectors
[i
] = ARMV4_5_B(0xfffffe, 0);
767 /* calculate branches to debug handler */
768 low_reset_branch
= (xscale
->handler_address
+ 0x20 - 0x0 - 0x8) >> 2;
769 high_reset_branch
= (xscale
->handler_address
+ 0x20 - 0xffff0000 - 0x8) >> 2;
771 xscale
->low_vectors
[0] = ARMV4_5_B((low_reset_branch
& 0xffffff), 0);
772 xscale
->high_vectors
[0] = ARMV4_5_B((high_reset_branch
& 0xffffff), 0);
774 /* invalidate and load exception vectors in mini i-cache */
775 xscale_invalidate_ic_line(target
, 0x0);
776 xscale_invalidate_ic_line(target
, 0xffff0000);
778 xscale_load_ic(target
, 0x0, xscale
->low_vectors
);
779 xscale_load_ic(target
, 0xffff0000, xscale
->high_vectors
);
784 static int xscale_arch_state(struct target
*target
)
786 struct xscale_common
*xscale
= target_to_xscale(target
);
787 struct arm
*arm
= &xscale
->arm
;
789 static const char *state
[] = {
790 "disabled", "enabled"
793 static const char *arch_dbg_reason
[] = {
794 "", "\n(processor reset)", "\n(trace buffer full)"
797 if (arm
->common_magic
!= ARM_COMMON_MAGIC
) {
798 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
799 return ERROR_COMMAND_SYNTAX_ERROR
;
802 arm_arch_state(target
);
803 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
804 state
[xscale
->armv4_5_mmu
.mmu_enabled
],
805 state
[xscale
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
],
806 state
[xscale
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
],
807 arch_dbg_reason
[xscale
->arch_debug_reason
]);
812 static int xscale_poll(struct target
*target
)
814 int retval
= ERROR_OK
;
816 if ((target
->state
== TARGET_RUNNING
) || (target
->state
== TARGET_DEBUG_RUNNING
)) {
817 enum target_state previous_state
= target
->state
;
818 retval
= xscale_read_tx(target
, 0);
819 if (retval
== ERROR_OK
) {
821 /* there's data to read from the tx register, we entered debug state */
822 target
->state
= TARGET_HALTED
;
824 /* process debug entry, fetching current mode regs */
825 retval
= xscale_debug_entry(target
);
826 } else if (retval
!= ERROR_TARGET_RESOURCE_NOT_AVAILABLE
) {
827 LOG_USER("error while polling TX register, reset CPU");
828 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
829 target
->state
= TARGET_HALTED
;
832 /* debug_entry could have overwritten target state (i.e. immediate resume)
833 * don't signal event handlers in that case
835 if (target
->state
!= TARGET_HALTED
)
838 /* if target was running, signal that we halted
839 * otherwise we reentered from debug execution */
840 if (previous_state
== TARGET_RUNNING
)
841 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
843 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_HALTED
);
849 static int xscale_debug_entry(struct target
*target
)
851 struct xscale_common
*xscale
= target_to_xscale(target
);
852 struct arm
*arm
= &xscale
->arm
;
859 /* clear external dbg break (will be written on next DCSR read) */
860 xscale
->external_debug_break
= 0;
861 retval
= xscale_read_dcsr(target
);
862 if (retval
!= ERROR_OK
)
865 /* get r0, pc, r1 to r7 and cpsr */
866 retval
= xscale_receive(target
, buffer
, 10);
867 if (retval
!= ERROR_OK
)
870 /* move r0 from buffer to register cache */
871 buf_set_u32(arm
->core_cache
->reg_list
[0].value
, 0, 32, buffer
[0]);
872 arm
->core_cache
->reg_list
[0].dirty
= 1;
873 arm
->core_cache
->reg_list
[0].valid
= 1;
874 LOG_DEBUG("r0: 0x%8.8" PRIx32
"", buffer
[0]);
876 /* move pc from buffer to register cache */
877 buf_set_u32(arm
->pc
->value
, 0, 32, buffer
[1]);
880 LOG_DEBUG("pc: 0x%8.8" PRIx32
"", buffer
[1]);
882 /* move data from buffer to register cache */
883 for (i
= 1; i
<= 7; i
++) {
884 buf_set_u32(arm
->core_cache
->reg_list
[i
].value
, 0, 32, buffer
[1 + i
]);
885 arm
->core_cache
->reg_list
[i
].dirty
= 1;
886 arm
->core_cache
->reg_list
[i
].valid
= 1;
887 LOG_DEBUG("r%i: 0x%8.8" PRIx32
"", i
, buffer
[i
+ 1]);
890 arm_set_cpsr(arm
, buffer
[9]);
891 LOG_DEBUG("cpsr: 0x%8.8" PRIx32
"", buffer
[9]);
893 if (!is_arm_mode(arm
->core_mode
)) {
894 target
->state
= TARGET_UNKNOWN
;
895 LOG_ERROR("cpsr contains invalid mode value - communication failure");
896 return ERROR_TARGET_FAILURE
;
898 LOG_DEBUG("target entered debug state in %s mode",
899 arm_mode_name(arm
->core_mode
));
901 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
903 xscale_receive(target
, buffer
, 8);
904 buf_set_u32(arm
->spsr
->value
, 0, 32, buffer
[7]);
905 arm
->spsr
->dirty
= false;
906 arm
->spsr
->valid
= true;
908 /* r8 to r14, but no spsr */
909 xscale_receive(target
, buffer
, 7);
912 /* move data from buffer to right banked register in cache */
913 for (i
= 8; i
<= 14; i
++) {
914 struct reg
*r
= arm_reg_current(arm
, i
);
916 buf_set_u32(r
->value
, 0, 32, buffer
[i
- 8]);
921 /* mark xscale regs invalid to ensure they are retrieved from the
922 * debug handler if requested */
923 for (i
= 0; i
< xscale
->reg_cache
->num_regs
; i
++)
924 xscale
->reg_cache
->reg_list
[i
].valid
= 0;
926 /* examine debug reason */
927 xscale_read_dcsr(target
);
928 moe
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 2, 3);
930 /* stored PC (for calculating fixup) */
931 pc
= buf_get_u32(arm
->pc
->value
, 0, 32);
934 case 0x0: /* Processor reset */
935 target
->debug_reason
= DBG_REASON_DBGRQ
;
936 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_RESET
;
939 case 0x1: /* Instruction breakpoint hit */
940 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
941 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_GENERIC
;
944 case 0x2: /* Data breakpoint hit */
945 target
->debug_reason
= DBG_REASON_WATCHPOINT
;
946 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_GENERIC
;
949 case 0x3: /* BKPT instruction executed */
950 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
951 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_GENERIC
;
954 case 0x4: /* Ext. debug event */
955 target
->debug_reason
= DBG_REASON_DBGRQ
;
956 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_GENERIC
;
959 case 0x5: /* Vector trap occured */
960 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
961 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_GENERIC
;
964 case 0x6: /* Trace buffer full break */
965 target
->debug_reason
= DBG_REASON_DBGRQ
;
966 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_TB_FULL
;
969 case 0x7: /* Reserved (may flag Hot-Debug support) */
971 LOG_ERROR("Method of Entry is 'Reserved'");
977 buf_set_u32(arm
->pc
->value
, 0, 32, pc
);
979 /* on the first debug entry, identify cache type */
980 if (xscale
->armv4_5_mmu
.armv4_5_cache
.ctype
== -1) {
981 uint32_t cache_type_reg
;
983 /* read cp15 cache type register */
984 xscale_get_reg(&xscale
->reg_cache
->reg_list
[XSCALE_CACHETYPE
]);
985 cache_type_reg
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_CACHETYPE
].value
,
989 armv4_5_identify_cache(cache_type_reg
, &xscale
->armv4_5_mmu
.armv4_5_cache
);
992 /* examine MMU and Cache settings
993 * read cp15 control register */
994 xscale_get_reg(&xscale
->reg_cache
->reg_list
[XSCALE_CTRL
]);
995 xscale
->cp15_control_reg
=
996 buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_CTRL
].value
, 0, 32);
997 xscale
->armv4_5_mmu
.mmu_enabled
= (xscale
->cp15_control_reg
& 0x1U
) ? 1 : 0;
998 xscale
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
=
999 (xscale
->cp15_control_reg
& 0x4U
) ? 1 : 0;
1000 xscale
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
=
1001 (xscale
->cp15_control_reg
& 0x1000U
) ? 1 : 0;
1003 /* tracing enabled, read collected trace data */
1004 if (xscale
->trace
.mode
!= XSCALE_TRACE_DISABLED
) {
1005 xscale_read_trace(target
);
1007 /* Resume if entered debug due to buffer fill and we're still collecting
1008 * trace data. Note that a debug exception due to trace buffer full
1009 * can only happen in fill mode. */
1010 if (xscale
->arch_debug_reason
== XSCALE_DBG_REASON_TB_FULL
) {
1011 if (--xscale
->trace
.fill_counter
> 0)
1012 xscale_resume(target
, 1, 0x0, 1, 0);
1013 } else /* entered debug for other reason; reset counter */
1014 xscale
->trace
.fill_counter
= 0;
1020 static int xscale_halt(struct target
*target
)
1022 struct xscale_common
*xscale
= target_to_xscale(target
);
1024 LOG_DEBUG("target->state: %s",
1025 target_state_name(target
));
1027 if (target
->state
== TARGET_HALTED
) {
1028 LOG_DEBUG("target was already halted");
1030 } else if (target
->state
== TARGET_UNKNOWN
) {
1031 /* this must not happen for a xscale target */
1032 LOG_ERROR("target was in unknown state when halt was requested");
1033 return ERROR_TARGET_INVALID
;
1034 } else if (target
->state
== TARGET_RESET
)
1035 LOG_DEBUG("target->state == TARGET_RESET");
1037 /* assert external dbg break */
1038 xscale
->external_debug_break
= 1;
1039 xscale_read_dcsr(target
);
1041 target
->debug_reason
= DBG_REASON_DBGRQ
;
1047 static int xscale_enable_single_step(struct target
*target
, uint32_t next_pc
)
1049 struct xscale_common
*xscale
= target_to_xscale(target
);
1050 struct reg
*ibcr0
= &xscale
->reg_cache
->reg_list
[XSCALE_IBCR0
];
1053 if (xscale
->ibcr0_used
) {
1054 struct breakpoint
*ibcr0_bp
=
1055 breakpoint_find(target
, buf_get_u32(ibcr0
->value
, 0, 32) & 0xfffffffe);
1058 xscale_unset_breakpoint(target
, ibcr0_bp
);
1061 "BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1066 retval
= xscale_set_reg_u32(ibcr0
, next_pc
| 0x1);
1067 if (retval
!= ERROR_OK
)
1073 static int xscale_disable_single_step(struct target
*target
)
1075 struct xscale_common
*xscale
= target_to_xscale(target
);
1076 struct reg
*ibcr0
= &xscale
->reg_cache
->reg_list
[XSCALE_IBCR0
];
1079 retval
= xscale_set_reg_u32(ibcr0
, 0x0);
1080 if (retval
!= ERROR_OK
)
1086 static void xscale_enable_watchpoints(struct target
*target
)
1088 struct watchpoint
*watchpoint
= target
->watchpoints
;
1090 while (watchpoint
) {
1091 if (watchpoint
->set
== 0)
1092 xscale_set_watchpoint(target
, watchpoint
);
1093 watchpoint
= watchpoint
->next
;
1097 static void xscale_enable_breakpoints(struct target
*target
)
1099 struct breakpoint
*breakpoint
= target
->breakpoints
;
1101 /* set any pending breakpoints */
1102 while (breakpoint
) {
1103 if (breakpoint
->set
== 0)
1104 xscale_set_breakpoint(target
, breakpoint
);
1105 breakpoint
= breakpoint
->next
;
1109 static void xscale_free_trace_data(struct xscale_common
*xscale
)
1111 struct xscale_trace_data
*td
= xscale
->trace
.data
;
1113 struct xscale_trace_data
*next_td
= td
->next
;
1119 xscale
->trace
.data
= NULL
;
1122 static int xscale_resume(struct target
*target
, int current
,
1123 uint32_t address
, int handle_breakpoints
, int debug_execution
)
1125 struct xscale_common
*xscale
= target_to_xscale(target
);
1126 struct arm
*arm
= &xscale
->arm
;
1127 uint32_t current_pc
;
1133 if (target
->state
!= TARGET_HALTED
) {
1134 LOG_WARNING("target not halted");
1135 return ERROR_TARGET_NOT_HALTED
;
1138 if (!debug_execution
)
1139 target_free_all_working_areas(target
);
1141 /* update vector tables */
1142 retval
= xscale_update_vectors(target
);
1143 if (retval
!= ERROR_OK
)
1146 /* current = 1: continue on current pc, otherwise continue at <address> */
1148 buf_set_u32(arm
->pc
->value
, 0, 32, address
);
1150 current_pc
= buf_get_u32(arm
->pc
->value
, 0, 32);
1152 /* if we're at the reset vector, we have to simulate the branch */
1153 if (current_pc
== 0x0) {
1154 arm_simulate_step(target
, NULL
);
1155 current_pc
= buf_get_u32(arm
->pc
->value
, 0, 32);
1158 /* the front-end may request us not to handle breakpoints */
1159 if (handle_breakpoints
) {
1160 struct breakpoint
*breakpoint
;
1161 breakpoint
= breakpoint_find(target
,
1162 buf_get_u32(arm
->pc
->value
, 0, 32));
1163 if (breakpoint
!= NULL
) {
1165 enum trace_mode saved_trace_mode
;
1167 /* there's a breakpoint at the current PC, we have to step over it */
1168 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32
"", breakpoint
->address
);
1169 xscale_unset_breakpoint(target
, breakpoint
);
1171 /* calculate PC of next instruction */
1172 retval
= arm_simulate_step(target
, &next_pc
);
1173 if (retval
!= ERROR_OK
) {
1174 uint32_t current_opcode
;
1175 target_read_u32(target
, current_pc
, ¤t_opcode
);
1177 "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32
"",
1181 LOG_DEBUG("enable single-step");
1182 xscale_enable_single_step(target
, next_pc
);
1184 /* restore banked registers */
1185 retval
= xscale_restore_banked(target
);
1186 if (retval
!= ERROR_OK
)
1189 /* send resume request */
1190 xscale_send_u32(target
, 0x30);
1193 xscale_send_u32(target
,
1194 buf_get_u32(arm
->cpsr
->value
, 0, 32));
1195 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32
,
1196 buf_get_u32(arm
->cpsr
->value
, 0, 32));
1198 for (i
= 7; i
>= 0; i
--) {
1200 xscale_send_u32(target
,
1201 buf_get_u32(arm
->core_cache
->reg_list
[i
].value
, 0, 32));
1202 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32
"",
1203 i
, buf_get_u32(arm
->core_cache
->reg_list
[i
].value
, 0, 32));
1207 xscale_send_u32(target
,
1208 buf_get_u32(arm
->pc
->value
, 0, 32));
1209 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32
,
1210 buf_get_u32(arm
->pc
->value
, 0, 32));
1212 /* disable trace data collection in xscale_debug_entry() */
1213 saved_trace_mode
= xscale
->trace
.mode
;
1214 xscale
->trace
.mode
= XSCALE_TRACE_DISABLED
;
1216 /* wait for and process debug entry */
1217 xscale_debug_entry(target
);
1219 /* re-enable trace buffer, if enabled previously */
1220 xscale
->trace
.mode
= saved_trace_mode
;
1222 LOG_DEBUG("disable single-step");
1223 xscale_disable_single_step(target
);
1225 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32
"", breakpoint
->address
);
1226 xscale_set_breakpoint(target
, breakpoint
);
1230 /* enable any pending breakpoints and watchpoints */
1231 xscale_enable_breakpoints(target
);
1232 xscale_enable_watchpoints(target
);
1234 /* restore banked registers */
1235 retval
= xscale_restore_banked(target
);
1236 if (retval
!= ERROR_OK
)
1239 /* send resume request (command 0x30 or 0x31)
1240 * clean the trace buffer if it is to be enabled (0x62) */
1241 if (xscale
->trace
.mode
!= XSCALE_TRACE_DISABLED
) {
1242 if (xscale
->trace
.mode
== XSCALE_TRACE_FILL
) {
1243 /* If trace enabled in fill mode and starting collection of new set
1244 * of buffers, initialize buffer counter and free previous buffers */
1245 if (xscale
->trace
.fill_counter
== 0) {
1246 xscale
->trace
.fill_counter
= xscale
->trace
.buffer_fill
;
1247 xscale_free_trace_data(xscale
);
1249 } else /* wrap mode; free previous buffer */
1250 xscale_free_trace_data(xscale
);
1252 xscale_send_u32(target
, 0x62);
1253 xscale_send_u32(target
, 0x31);
1255 xscale_send_u32(target
, 0x30);
1258 xscale_send_u32(target
, buf_get_u32(arm
->cpsr
->value
, 0, 32));
1259 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32
,
1260 buf_get_u32(arm
->cpsr
->value
, 0, 32));
1262 for (i
= 7; i
>= 0; i
--) {
1264 xscale_send_u32(target
, buf_get_u32(arm
->core_cache
->reg_list
[i
].value
, 0, 32));
1265 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32
"",
1266 i
, buf_get_u32(arm
->core_cache
->reg_list
[i
].value
, 0, 32));
1270 xscale_send_u32(target
, buf_get_u32(arm
->pc
->value
, 0, 32));
1271 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32
,
1272 buf_get_u32(arm
->pc
->value
, 0, 32));
1274 target
->debug_reason
= DBG_REASON_NOTHALTED
;
1276 if (!debug_execution
) {
1277 /* registers are now invalid */
1278 register_cache_invalidate(arm
->core_cache
);
1279 target
->state
= TARGET_RUNNING
;
1280 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1282 target
->state
= TARGET_DEBUG_RUNNING
;
1283 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
1286 LOG_DEBUG("target resumed");
1291 static int xscale_step_inner(struct target
*target
, int current
,
1292 uint32_t address
, int handle_breakpoints
)
1294 struct xscale_common
*xscale
= target_to_xscale(target
);
1295 struct arm
*arm
= &xscale
->arm
;
1300 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1302 /* calculate PC of next instruction */
1303 retval
= arm_simulate_step(target
, &next_pc
);
1304 if (retval
!= ERROR_OK
) {
1305 uint32_t current_opcode
, current_pc
;
1306 current_pc
= buf_get_u32(arm
->pc
->value
, 0, 32);
1308 target_read_u32(target
, current_pc
, ¤t_opcode
);
1310 "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32
"",
1315 LOG_DEBUG("enable single-step");
1316 retval
= xscale_enable_single_step(target
, next_pc
);
1317 if (retval
!= ERROR_OK
)
1320 /* restore banked registers */
1321 retval
= xscale_restore_banked(target
);
1322 if (retval
!= ERROR_OK
)
1325 /* send resume request (command 0x30 or 0x31)
1326 * clean the trace buffer if it is to be enabled (0x62) */
1327 if (xscale
->trace
.mode
!= XSCALE_TRACE_DISABLED
) {
1328 retval
= xscale_send_u32(target
, 0x62);
1329 if (retval
!= ERROR_OK
)
1331 retval
= xscale_send_u32(target
, 0x31);
1332 if (retval
!= ERROR_OK
)
1335 retval
= xscale_send_u32(target
, 0x30);
1336 if (retval
!= ERROR_OK
)
1341 retval
= xscale_send_u32(target
,
1342 buf_get_u32(arm
->cpsr
->value
, 0, 32));
1343 if (retval
!= ERROR_OK
)
1345 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32
,
1346 buf_get_u32(arm
->cpsr
->value
, 0, 32));
1348 for (i
= 7; i
>= 0; i
--) {
1350 retval
= xscale_send_u32(target
,
1351 buf_get_u32(arm
->core_cache
->reg_list
[i
].value
, 0, 32));
1352 if (retval
!= ERROR_OK
)
1354 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32
"", i
,
1355 buf_get_u32(arm
->core_cache
->reg_list
[i
].value
, 0, 32));
1359 retval
= xscale_send_u32(target
,
1360 buf_get_u32(arm
->pc
->value
, 0, 32));
1361 if (retval
!= ERROR_OK
)
1363 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32
,
1364 buf_get_u32(arm
->pc
->value
, 0, 32));
1366 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1368 /* registers are now invalid */
1369 register_cache_invalidate(arm
->core_cache
);
1371 /* wait for and process debug entry */
1372 retval
= xscale_debug_entry(target
);
1373 if (retval
!= ERROR_OK
)
1376 LOG_DEBUG("disable single-step");
1377 retval
= xscale_disable_single_step(target
);
1378 if (retval
!= ERROR_OK
)
1381 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
1386 static int xscale_step(struct target
*target
, int current
,
1387 uint32_t address
, int handle_breakpoints
)
1389 struct arm
*arm
= target_to_arm(target
);
1390 struct breakpoint
*breakpoint
= NULL
;
1392 uint32_t current_pc
;
1395 if (target
->state
!= TARGET_HALTED
) {
1396 LOG_WARNING("target not halted");
1397 return ERROR_TARGET_NOT_HALTED
;
1400 /* current = 1: continue on current pc, otherwise continue at <address> */
1402 buf_set_u32(arm
->pc
->value
, 0, 32, address
);
1404 current_pc
= buf_get_u32(arm
->pc
->value
, 0, 32);
1406 /* if we're at the reset vector, we have to simulate the step */
1407 if (current_pc
== 0x0) {
1408 retval
= arm_simulate_step(target
, NULL
);
1409 if (retval
!= ERROR_OK
)
1411 current_pc
= buf_get_u32(arm
->pc
->value
, 0, 32);
1412 LOG_DEBUG("current pc %" PRIx32
, current_pc
);
1414 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1415 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
1420 /* the front-end may request us not to handle breakpoints */
1421 if (handle_breakpoints
)
1422 breakpoint
= breakpoint_find(target
,
1423 buf_get_u32(arm
->pc
->value
, 0, 32));
1424 if (breakpoint
!= NULL
) {
1425 retval
= xscale_unset_breakpoint(target
, breakpoint
);
1426 if (retval
!= ERROR_OK
)
1430 retval
= xscale_step_inner(target
, current
, address
, handle_breakpoints
);
1431 if (retval
!= ERROR_OK
)
1435 xscale_set_breakpoint(target
, breakpoint
);
1437 LOG_DEBUG("target stepped");
1443 static int xscale_assert_reset(struct target
*target
)
1445 struct xscale_common
*xscale
= target_to_xscale(target
);
1447 LOG_DEBUG("target->state: %s",
1448 target_state_name(target
));
1450 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1451 * end up in T-L-R, which would reset JTAG
1453 xscale_jtag_set_instr(target
->tap
,
1454 XSCALE_SELDCSR
<< xscale
->xscale_variant
,
1457 /* set Hold reset, Halt mode and Trap Reset */
1458 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 30, 1, 0x1);
1459 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 16, 1, 0x1);
1460 xscale_write_dcsr(target
, 1, 0);
1462 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1463 xscale_jtag_set_instr(target
->tap
, ~0, TAP_IDLE
);
1464 jtag_execute_queue();
1467 jtag_add_reset(0, 1);
1469 /* sleep 1ms, to be sure we fulfill any requirements */
1470 jtag_add_sleep(1000);
1471 jtag_execute_queue();
1473 target
->state
= TARGET_RESET
;
1475 if (target
->reset_halt
) {
1476 int retval
= target_halt(target
);
1477 if (retval
!= ERROR_OK
)
1484 static int xscale_deassert_reset(struct target
*target
)
1486 struct xscale_common
*xscale
= target_to_xscale(target
);
1487 struct breakpoint
*breakpoint
= target
->breakpoints
;
1491 xscale
->ibcr_available
= 2;
1492 xscale
->ibcr0_used
= 0;
1493 xscale
->ibcr1_used
= 0;
1495 xscale
->dbr_available
= 2;
1496 xscale
->dbr0_used
= 0;
1497 xscale
->dbr1_used
= 0;
1499 /* mark all hardware breakpoints as unset */
1500 while (breakpoint
) {
1501 if (breakpoint
->type
== BKPT_HARD
)
1502 breakpoint
->set
= 0;
1503 breakpoint
= breakpoint
->next
;
1506 xscale
->trace
.mode
= XSCALE_TRACE_DISABLED
;
1507 xscale_free_trace_data(xscale
);
1509 register_cache_invalidate(xscale
->arm
.core_cache
);
1511 /* FIXME mark hardware watchpoints got unset too. Also,
1512 * at least some of the XScale registers are invalid...
1516 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1517 * contents got invalidated. Safer to force that, so writing new
1518 * contents can't ever fail..
1523 const uint8_t *buffer
= xscale_debug_handler
;
1527 jtag_add_reset(0, 0);
1529 /* wait 300ms; 150 and 100ms were not enough */
1530 jtag_add_sleep(300*1000);
1532 jtag_add_runtest(2030, TAP_IDLE
);
1533 jtag_execute_queue();
1535 /* set Hold reset, Halt mode and Trap Reset */
1536 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 30, 1, 0x1);
1537 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 16, 1, 0x1);
1538 xscale_write_dcsr(target
, 1, 0);
1540 /* Load the debug handler into the mini-icache. Since
1541 * it's using halt mode (not monitor mode), it runs in
1542 * "Special Debug State" for access to registers, memory,
1543 * coprocessors, trace data, etc.
1545 address
= xscale
->handler_address
;
1546 for (unsigned binary_size
= sizeof xscale_debug_handler
- 1;
1548 binary_size
-= buf_cnt
, buffer
+= buf_cnt
) {
1549 uint32_t cache_line
[8];
1552 buf_cnt
= binary_size
;
1556 for (i
= 0; i
< buf_cnt
; i
+= 4) {
1557 /* convert LE buffer to host-endian uint32_t */
1558 cache_line
[i
/ 4] = le_to_h_u32(&buffer
[i
]);
1561 for (; i
< 32; i
+= 4)
1562 cache_line
[i
/ 4] = 0xe1a08008;
1564 /* only load addresses other than the reset vectors */
1565 if ((address
% 0x400) != 0x0) {
1566 retval
= xscale_load_ic(target
, address
,
1568 if (retval
!= ERROR_OK
)
1576 retval
= xscale_load_ic(target
, 0x0,
1577 xscale
->low_vectors
);
1578 if (retval
!= ERROR_OK
)
1580 retval
= xscale_load_ic(target
, 0xffff0000,
1581 xscale
->high_vectors
);
1582 if (retval
!= ERROR_OK
)
1585 jtag_add_runtest(30, TAP_IDLE
);
1587 jtag_add_sleep(100000);
1589 /* set Hold reset, Halt mode and Trap Reset */
1590 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 30, 1, 0x1);
1591 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 16, 1, 0x1);
1592 xscale_write_dcsr(target
, 1, 0);
1594 /* clear Hold reset to let the target run (should enter debug handler) */
1595 xscale_write_dcsr(target
, 0, 1);
1596 target
->state
= TARGET_RUNNING
;
1598 if (!target
->reset_halt
) {
1599 jtag_add_sleep(10000);
1601 /* we should have entered debug now */
1602 xscale_debug_entry(target
);
1603 target
->state
= TARGET_HALTED
;
1605 /* resume the target */
1606 xscale_resume(target
, 1, 0x0, 1, 0);
1613 static int xscale_read_core_reg(struct target
*target
, struct reg
*r
,
1614 int num
, enum arm_mode mode
)
1616 /** \todo add debug handler support for core register reads */
1617 LOG_ERROR("not implemented");
1621 static int xscale_write_core_reg(struct target
*target
, struct reg
*r
,
1622 int num
, enum arm_mode mode
, uint32_t value
)
1624 /** \todo add debug handler support for core register writes */
1625 LOG_ERROR("not implemented");
1629 static int xscale_full_context(struct target
*target
)
1631 struct arm
*arm
= target_to_arm(target
);
1639 if (target
->state
!= TARGET_HALTED
) {
1640 LOG_WARNING("target not halted");
1641 return ERROR_TARGET_NOT_HALTED
;
1644 buffer
= malloc(4 * 8);
1646 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1647 * we can't enter User mode on an XScale (unpredictable),
1648 * but User shares registers with SYS
1650 for (i
= 1; i
< 7; i
++) {
1651 enum arm_mode mode
= armv4_5_number_to_mode(i
);
1655 if (mode
== ARM_MODE_USR
)
1658 /* check if there are invalid registers in the current mode
1660 for (j
= 0; valid
&& j
<= 16; j
++) {
1661 if (!ARMV4_5_CORE_REG_MODE(arm
->core_cache
,
1668 /* request banked registers */
1669 xscale_send_u32(target
, 0x0);
1671 /* send CPSR for desired bank mode */
1672 xscale_send_u32(target
, mode
| 0xc0 /* I/F bits */);
1674 /* get banked registers: r8 to r14; and SPSR
1675 * except in USR/SYS mode
1677 if (mode
!= ARM_MODE_SYS
) {
1679 r
= &ARMV4_5_CORE_REG_MODE(arm
->core_cache
,
1682 xscale_receive(target
, buffer
, 8);
1684 buf_set_u32(r
->value
, 0, 32, buffer
[7]);
1688 xscale_receive(target
, buffer
, 7);
1690 /* move data from buffer to register cache */
1691 for (j
= 8; j
<= 14; j
++) {
1692 r
= &ARMV4_5_CORE_REG_MODE(arm
->core_cache
,
1695 buf_set_u32(r
->value
, 0, 32, buffer
[j
- 8]);
1706 static int xscale_restore_banked(struct target
*target
)
1708 struct arm
*arm
= target_to_arm(target
);
1712 if (target
->state
!= TARGET_HALTED
) {
1713 LOG_WARNING("target not halted");
1714 return ERROR_TARGET_NOT_HALTED
;
1717 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1718 * and check if any banked registers need to be written. Ignore
1719 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1720 * an XScale (unpredictable), but they share all registers.
1722 for (i
= 1; i
< 7; i
++) {
1723 enum arm_mode mode
= armv4_5_number_to_mode(i
);
1726 if (mode
== ARM_MODE_USR
)
1729 /* check if there are dirty registers in this mode */
1730 for (j
= 8; j
<= 14; j
++) {
1731 if (ARMV4_5_CORE_REG_MODE(arm
->core_cache
,
1736 /* if not USR/SYS, check if the SPSR needs to be written */
1737 if (mode
!= ARM_MODE_SYS
) {
1738 if (ARMV4_5_CORE_REG_MODE(arm
->core_cache
,
1743 /* there's nothing to flush for this mode */
1747 /* command 0x1: "send banked registers" */
1748 xscale_send_u32(target
, 0x1);
1750 /* send CPSR for desired mode */
1751 xscale_send_u32(target
, mode
| 0xc0 /* I/F bits */);
1753 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1754 * but this protocol doesn't understand that nuance.
1756 for (j
= 8; j
<= 14; j
++) {
1757 r
= &ARMV4_5_CORE_REG_MODE(arm
->core_cache
,
1759 xscale_send_u32(target
, buf_get_u32(r
->value
, 0, 32));
1763 /* send spsr if not in USR/SYS mode */
1764 if (mode
!= ARM_MODE_SYS
) {
1765 r
= &ARMV4_5_CORE_REG_MODE(arm
->core_cache
,
1767 xscale_send_u32(target
, buf_get_u32(r
->value
, 0, 32));
1775 static int xscale_read_memory(struct target
*target
, uint32_t address
,
1776 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1778 struct xscale_common
*xscale
= target_to_xscale(target
);
1783 LOG_DEBUG("address: 0x%8.8" PRIx32
", size: 0x%8.8" PRIx32
", count: 0x%8.8" PRIx32
,
1788 if (target
->state
!= TARGET_HALTED
) {
1789 LOG_WARNING("target not halted");
1790 return ERROR_TARGET_NOT_HALTED
;
1793 /* sanitize arguments */
1794 if (((size
!= 4) && (size
!= 2) && (size
!= 1)) || (count
== 0) || !(buffer
))
1795 return ERROR_COMMAND_SYNTAX_ERROR
;
1797 if (((size
== 4) && (address
& 0x3u
)) || ((size
== 2) && (address
& 0x1u
)))
1798 return ERROR_TARGET_UNALIGNED_ACCESS
;
1800 /* send memory read request (command 0x1n, n: access size) */
1801 retval
= xscale_send_u32(target
, 0x10 | size
);
1802 if (retval
!= ERROR_OK
)
1805 /* send base address for read request */
1806 retval
= xscale_send_u32(target
, address
);
1807 if (retval
!= ERROR_OK
)
1810 /* send number of requested data words */
1811 retval
= xscale_send_u32(target
, count
);
1812 if (retval
!= ERROR_OK
)
1815 /* receive data from target (count times 32-bit words in host endianness) */
1816 buf32
= malloc(4 * count
);
1817 retval
= xscale_receive(target
, buf32
, count
);
1818 if (retval
!= ERROR_OK
)
1821 /* extract data from host-endian buffer into byte stream */
1822 for (i
= 0; i
< count
; i
++) {
1825 target_buffer_set_u32(target
, buffer
, buf32
[i
]);
1829 target_buffer_set_u16(target
, buffer
, buf32
[i
] & 0xffff);
1833 *buffer
++ = buf32
[i
] & 0xff;
1836 LOG_ERROR("invalid read size");
1837 return ERROR_COMMAND_SYNTAX_ERROR
;
1843 /* examine DCSR, to see if Sticky Abort (SA) got set */
1844 retval
= xscale_read_dcsr(target
);
1845 if (retval
!= ERROR_OK
)
1847 if (buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 5, 1) == 1) {
1849 retval
= xscale_send_u32(target
, 0x60);
1850 if (retval
!= ERROR_OK
)
1853 return ERROR_TARGET_DATA_ABORT
;
1859 static int xscale_read_phys_memory(struct target
*target
, uint32_t address
,
1860 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1862 struct xscale_common
*xscale
= target_to_xscale(target
);
1864 /* with MMU inactive, there are only physical addresses */
1865 if (!xscale
->armv4_5_mmu
.mmu_enabled
)
1866 return xscale_read_memory(target
, address
, size
, count
, buffer
);
1868 /** \todo: provide a non-stub implementation of this routine. */
1869 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1870 target_name(target
), __func__
);
1874 static int xscale_write_memory(struct target
*target
, uint32_t address
,
1875 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
1877 struct xscale_common
*xscale
= target_to_xscale(target
);
1880 LOG_DEBUG("address: 0x%8.8" PRIx32
", size: 0x%8.8" PRIx32
", count: 0x%8.8" PRIx32
,
1885 if (target
->state
!= TARGET_HALTED
) {
1886 LOG_WARNING("target not halted");
1887 return ERROR_TARGET_NOT_HALTED
;
1890 /* sanitize arguments */
1891 if (((size
!= 4) && (size
!= 2) && (size
!= 1)) || (count
== 0) || !(buffer
))
1892 return ERROR_COMMAND_SYNTAX_ERROR
;
1894 if (((size
== 4) && (address
& 0x3u
)) || ((size
== 2) && (address
& 0x1u
)))
1895 return ERROR_TARGET_UNALIGNED_ACCESS
;
1897 /* send memory write request (command 0x2n, n: access size) */
1898 retval
= xscale_send_u32(target
, 0x20 | size
);
1899 if (retval
!= ERROR_OK
)
1902 /* send base address for read request */
1903 retval
= xscale_send_u32(target
, address
);
1904 if (retval
!= ERROR_OK
)
1907 /* send number of requested data words to be written*/
1908 retval
= xscale_send_u32(target
, count
);
1909 if (retval
!= ERROR_OK
)
1912 /* extract data from host-endian buffer into byte stream */
1914 for (i
= 0; i
< count
; i
++) {
1917 value
= target_buffer_get_u32(target
, buffer
);
1918 xscale_send_u32(target
, value
);
1922 value
= target_buffer_get_u16(target
, buffer
);
1923 xscale_send_u32(target
, value
);
1928 xscale_send_u32(target
, value
);
1932 LOG_ERROR("should never get here");
1937 retval
= xscale_send(target
, buffer
, count
, size
);
1938 if (retval
!= ERROR_OK
)
1941 /* examine DCSR, to see if Sticky Abort (SA) got set */
1942 retval
= xscale_read_dcsr(target
);
1943 if (retval
!= ERROR_OK
)
1945 if (buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 5, 1) == 1) {
1947 retval
= xscale_send_u32(target
, 0x60);
1948 if (retval
!= ERROR_OK
)
1951 LOG_ERROR("data abort writing memory");
1952 return ERROR_TARGET_DATA_ABORT
;
1958 static int xscale_write_phys_memory(struct target
*target
, uint32_t address
,
1959 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
1961 struct xscale_common
*xscale
= target_to_xscale(target
);
1963 /* with MMU inactive, there are only physical addresses */
1964 if (!xscale
->armv4_5_mmu
.mmu_enabled
)
1965 return xscale_write_memory(target
, address
, size
, count
, buffer
);
1967 /** \todo: provide a non-stub implementation of this routine. */
1968 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1969 target_name(target
), __func__
);
1973 static int xscale_bulk_write_memory(struct target
*target
, uint32_t address
,
1974 uint32_t count
, const uint8_t *buffer
)
1976 return xscale_write_memory(target
, address
, 4, count
, buffer
);
1979 static int xscale_get_ttb(struct target
*target
, uint32_t *result
)
1981 struct xscale_common
*xscale
= target_to_xscale(target
);
1985 retval
= xscale_get_reg(&xscale
->reg_cache
->reg_list
[XSCALE_TTB
]);
1986 if (retval
!= ERROR_OK
)
1988 ttb
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_TTB
].value
, 0, 32);
1995 static int xscale_disable_mmu_caches(struct target
*target
, int mmu
,
1996 int d_u_cache
, int i_cache
)
1998 struct xscale_common
*xscale
= target_to_xscale(target
);
1999 uint32_t cp15_control
;
2002 /* read cp15 control register */
2003 retval
= xscale_get_reg(&xscale
->reg_cache
->reg_list
[XSCALE_CTRL
]);
2004 if (retval
!= ERROR_OK
)
2006 cp15_control
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_CTRL
].value
, 0, 32);
2009 cp15_control
&= ~0x1U
;
2013 retval
= xscale_send_u32(target
, 0x50);
2014 if (retval
!= ERROR_OK
)
2016 retval
= xscale_send_u32(target
, xscale
->cache_clean_address
);
2017 if (retval
!= ERROR_OK
)
2020 /* invalidate DCache */
2021 retval
= xscale_send_u32(target
, 0x51);
2022 if (retval
!= ERROR_OK
)
2025 cp15_control
&= ~0x4U
;
2029 /* invalidate ICache */
2030 retval
= xscale_send_u32(target
, 0x52);
2031 if (retval
!= ERROR_OK
)
2033 cp15_control
&= ~0x1000U
;
2036 /* write new cp15 control register */
2037 retval
= xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_CTRL
], cp15_control
);
2038 if (retval
!= ERROR_OK
)
2041 /* execute cpwait to ensure outstanding operations complete */
2042 retval
= xscale_send_u32(target
, 0x53);
2046 static int xscale_enable_mmu_caches(struct target
*target
, int mmu
,
2047 int d_u_cache
, int i_cache
)
2049 struct xscale_common
*xscale
= target_to_xscale(target
);
2050 uint32_t cp15_control
;
2053 /* read cp15 control register */
2054 retval
= xscale_get_reg(&xscale
->reg_cache
->reg_list
[XSCALE_CTRL
]);
2055 if (retval
!= ERROR_OK
)
2057 cp15_control
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_CTRL
].value
, 0, 32);
2060 cp15_control
|= 0x1U
;
2063 cp15_control
|= 0x4U
;
2066 cp15_control
|= 0x1000U
;
2068 /* write new cp15 control register */
2069 retval
= xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_CTRL
], cp15_control
);
2070 if (retval
!= ERROR_OK
)
2073 /* execute cpwait to ensure outstanding operations complete */
2074 retval
= xscale_send_u32(target
, 0x53);
2078 static int xscale_set_breakpoint(struct target
*target
,
2079 struct breakpoint
*breakpoint
)
2082 struct xscale_common
*xscale
= target_to_xscale(target
);
2084 if (target
->state
!= TARGET_HALTED
) {
2085 LOG_WARNING("target not halted");
2086 return ERROR_TARGET_NOT_HALTED
;
2089 if (breakpoint
->set
) {
2090 LOG_WARNING("breakpoint already set");
2094 if (breakpoint
->type
== BKPT_HARD
) {
2095 uint32_t value
= breakpoint
->address
| 1;
2096 if (!xscale
->ibcr0_used
) {
2097 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_IBCR0
], value
);
2098 xscale
->ibcr0_used
= 1;
2099 breakpoint
->set
= 1; /* breakpoint set on first breakpoint register */
2100 } else if (!xscale
->ibcr1_used
) {
2101 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_IBCR1
], value
);
2102 xscale
->ibcr1_used
= 1;
2103 breakpoint
->set
= 2; /* breakpoint set on second breakpoint register */
2104 } else {/* bug: availability previously verified in xscale_add_breakpoint() */
2105 LOG_ERROR("BUG: no hardware comparator available");
2106 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2108 } else if (breakpoint
->type
== BKPT_SOFT
) {
2109 if (breakpoint
->length
== 4) {
2110 /* keep the original instruction in target endianness */
2111 retval
= target_read_memory(target
, breakpoint
->address
, 4, 1,
2112 breakpoint
->orig_instr
);
2113 if (retval
!= ERROR_OK
)
2115 /* write the bkpt instruction in target endianness
2116 *(arm7_9->arm_bkpt is host endian) */
2117 retval
= target_write_u32(target
, breakpoint
->address
,
2119 if (retval
!= ERROR_OK
)
2122 /* keep the original instruction in target endianness */
2123 retval
= target_read_memory(target
, breakpoint
->address
, 2, 1,
2124 breakpoint
->orig_instr
);
2125 if (retval
!= ERROR_OK
)
2127 /* write the bkpt instruction in target endianness
2128 *(arm7_9->arm_bkpt is host endian) */
2129 retval
= target_write_u16(target
, breakpoint
->address
,
2130 xscale
->thumb_bkpt
);
2131 if (retval
!= ERROR_OK
)
2134 breakpoint
->set
= 1;
2136 xscale_send_u32(target
, 0x50); /* clean dcache */
2137 xscale_send_u32(target
, xscale
->cache_clean_address
);
2138 xscale_send_u32(target
, 0x51); /* invalidate dcache */
2139 xscale_send_u32(target
, 0x52); /* invalidate icache and flush fetch buffers */
2145 static int xscale_add_breakpoint(struct target
*target
,
2146 struct breakpoint
*breakpoint
)
2148 struct xscale_common
*xscale
= target_to_xscale(target
);
2150 if ((breakpoint
->type
== BKPT_HARD
) && (xscale
->ibcr_available
< 1)) {
2151 LOG_ERROR("no breakpoint unit available for hardware breakpoint");
2152 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2155 if ((breakpoint
->length
!= 2) && (breakpoint
->length
!= 4)) {
2156 LOG_ERROR("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2157 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2160 if (breakpoint
->type
== BKPT_HARD
)
2161 xscale
->ibcr_available
--;
2163 return xscale_set_breakpoint(target
, breakpoint
);
2166 static int xscale_unset_breakpoint(struct target
*target
,
2167 struct breakpoint
*breakpoint
)
2170 struct xscale_common
*xscale
= target_to_xscale(target
);
2172 if (target
->state
!= TARGET_HALTED
) {
2173 LOG_WARNING("target not halted");
2174 return ERROR_TARGET_NOT_HALTED
;
2177 if (!breakpoint
->set
) {
2178 LOG_WARNING("breakpoint not set");
2182 if (breakpoint
->type
== BKPT_HARD
) {
2183 if (breakpoint
->set
== 1) {
2184 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_IBCR0
], 0x0);
2185 xscale
->ibcr0_used
= 0;
2186 } else if (breakpoint
->set
== 2) {
2187 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_IBCR1
], 0x0);
2188 xscale
->ibcr1_used
= 0;
2190 breakpoint
->set
= 0;
2192 /* restore original instruction (kept in target endianness) */
2193 if (breakpoint
->length
== 4) {
2194 retval
= target_write_memory(target
, breakpoint
->address
, 4, 1,
2195 breakpoint
->orig_instr
);
2196 if (retval
!= ERROR_OK
)
2199 retval
= target_write_memory(target
, breakpoint
->address
, 2, 1,
2200 breakpoint
->orig_instr
);
2201 if (retval
!= ERROR_OK
)
2204 breakpoint
->set
= 0;
2206 xscale_send_u32(target
, 0x50); /* clean dcache */
2207 xscale_send_u32(target
, xscale
->cache_clean_address
);
2208 xscale_send_u32(target
, 0x51); /* invalidate dcache */
2209 xscale_send_u32(target
, 0x52); /* invalidate icache and flush fetch buffers */
2215 static int xscale_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
2217 struct xscale_common
*xscale
= target_to_xscale(target
);
2219 if (target
->state
!= TARGET_HALTED
) {
2220 LOG_ERROR("target not halted");
2221 return ERROR_TARGET_NOT_HALTED
;
2224 if (breakpoint
->set
)
2225 xscale_unset_breakpoint(target
, breakpoint
);
2227 if (breakpoint
->type
== BKPT_HARD
)
2228 xscale
->ibcr_available
++;
2233 static int xscale_set_watchpoint(struct target
*target
,
2234 struct watchpoint
*watchpoint
)
2236 struct xscale_common
*xscale
= target_to_xscale(target
);
2237 uint32_t enable
= 0;
2238 struct reg
*dbcon
= &xscale
->reg_cache
->reg_list
[XSCALE_DBCON
];
2239 uint32_t dbcon_value
= buf_get_u32(dbcon
->value
, 0, 32);
2241 if (target
->state
!= TARGET_HALTED
) {
2242 LOG_ERROR("target not halted");
2243 return ERROR_TARGET_NOT_HALTED
;
2246 switch (watchpoint
->rw
) {
2257 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2260 /* For watchpoint across more than one word, both DBR registers must
2261 be enlisted, with the second used as a mask. */
2262 if (watchpoint
->length
> 4) {
2263 if (xscale
->dbr0_used
|| xscale
->dbr1_used
) {
2264 LOG_ERROR("BUG: sufficient hardware comparators unavailable");
2265 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2268 /* Write mask value to DBR1, based on the length argument.
2269 * Address bits ignored by the comparator are those set in mask. */
2270 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_DBR1
],
2271 watchpoint
->length
- 1);
2272 xscale
->dbr1_used
= 1;
2273 enable
|= 0x100; /* DBCON[M] */
2276 if (!xscale
->dbr0_used
) {
2277 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_DBR0
], watchpoint
->address
);
2278 dbcon_value
|= enable
;
2279 xscale_set_reg_u32(dbcon
, dbcon_value
);
2280 watchpoint
->set
= 1;
2281 xscale
->dbr0_used
= 1;
2282 } else if (!xscale
->dbr1_used
) {
2283 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_DBR1
], watchpoint
->address
);
2284 dbcon_value
|= enable
<< 2;
2285 xscale_set_reg_u32(dbcon
, dbcon_value
);
2286 watchpoint
->set
= 2;
2287 xscale
->dbr1_used
= 1;
2289 LOG_ERROR("BUG: no hardware comparator available");
2290 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2296 static int xscale_add_watchpoint(struct target
*target
,
2297 struct watchpoint
*watchpoint
)
2299 struct xscale_common
*xscale
= target_to_xscale(target
);
2301 if (xscale
->dbr_available
< 1) {
2302 LOG_ERROR("no more watchpoint registers available");
2303 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2306 if (watchpoint
->value
)
2307 LOG_WARNING("xscale does not support value, mask arguments; ignoring");
2309 /* check that length is a power of two */
2310 for (uint32_t len
= watchpoint
->length
; len
!= 1; len
/= 2) {
2312 LOG_ERROR("xscale requires that watchpoint length is a power of two");
2313 return ERROR_COMMAND_ARGUMENT_INVALID
;
2317 if (watchpoint
->length
== 4) { /* single word watchpoint */
2318 xscale
->dbr_available
--;/* one DBR reg used */
2322 /* watchpoints across multiple words require both DBR registers */
2323 if (xscale
->dbr_available
< 2) {
2324 LOG_ERROR("insufficient watchpoint registers available");
2325 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2328 if (watchpoint
->length
> watchpoint
->address
) {
2329 LOG_ERROR("xscale does not support watchpoints with length "
2330 "greater than address");
2331 return ERROR_COMMAND_ARGUMENT_INVALID
;
2334 xscale
->dbr_available
= 0;
2338 static int xscale_unset_watchpoint(struct target
*target
,
2339 struct watchpoint
*watchpoint
)
2341 struct xscale_common
*xscale
= target_to_xscale(target
);
2342 struct reg
*dbcon
= &xscale
->reg_cache
->reg_list
[XSCALE_DBCON
];
2343 uint32_t dbcon_value
= buf_get_u32(dbcon
->value
, 0, 32);
2345 if (target
->state
!= TARGET_HALTED
) {
2346 LOG_WARNING("target not halted");
2347 return ERROR_TARGET_NOT_HALTED
;
2350 if (!watchpoint
->set
) {
2351 LOG_WARNING("breakpoint not set");
2355 if (watchpoint
->set
== 1) {
2356 if (watchpoint
->length
> 4) {
2357 dbcon_value
&= ~0x103; /* clear DBCON[M] as well */
2358 xscale
->dbr1_used
= 0; /* DBR1 was used for mask */
2360 dbcon_value
&= ~0x3;
2362 xscale_set_reg_u32(dbcon
, dbcon_value
);
2363 xscale
->dbr0_used
= 0;
2364 } else if (watchpoint
->set
== 2) {
2365 dbcon_value
&= ~0xc;
2366 xscale_set_reg_u32(dbcon
, dbcon_value
);
2367 xscale
->dbr1_used
= 0;
2369 watchpoint
->set
= 0;
2374 static int xscale_remove_watchpoint(struct target
*target
, struct watchpoint
*watchpoint
)
2376 struct xscale_common
*xscale
= target_to_xscale(target
);
2378 if (target
->state
!= TARGET_HALTED
) {
2379 LOG_ERROR("target not halted");
2380 return ERROR_TARGET_NOT_HALTED
;
2383 if (watchpoint
->set
)
2384 xscale_unset_watchpoint(target
, watchpoint
);
2386 if (watchpoint
->length
> 4)
2387 xscale
->dbr_available
++;/* both DBR regs now available */
2389 xscale
->dbr_available
++;
2394 static int xscale_get_reg(struct reg
*reg
)
2396 struct xscale_reg
*arch_info
= reg
->arch_info
;
2397 struct target
*target
= arch_info
->target
;
2398 struct xscale_common
*xscale
= target_to_xscale(target
);
2400 /* DCSR, TX and RX are accessible via JTAG */
2401 if (strcmp(reg
->name
, "XSCALE_DCSR") == 0)
2402 return xscale_read_dcsr(arch_info
->target
);
2403 else if (strcmp(reg
->name
, "XSCALE_TX") == 0) {
2404 /* 1 = consume register content */
2405 return xscale_read_tx(arch_info
->target
, 1);
2406 } else if (strcmp(reg
->name
, "XSCALE_RX") == 0) {
2407 /* can't read from RX register (host -> debug handler) */
2409 } else if (strcmp(reg
->name
, "XSCALE_TXRXCTRL") == 0) {
2410 /* can't (explicitly) read from TXRXCTRL register */
2412 } else {/* Other DBG registers have to be transfered by the debug handler
2413 * send CP read request (command 0x40) */
2414 xscale_send_u32(target
, 0x40);
2416 /* send CP register number */
2417 xscale_send_u32(target
, arch_info
->dbg_handler_number
);
2419 /* read register value */
2420 xscale_read_tx(target
, 1);
2421 buf_cpy(xscale
->reg_cache
->reg_list
[XSCALE_TX
].value
, reg
->value
, 32);
2430 static int xscale_set_reg(struct reg
*reg
, uint8_t *buf
)
2432 struct xscale_reg
*arch_info
= reg
->arch_info
;
2433 struct target
*target
= arch_info
->target
;
2434 struct xscale_common
*xscale
= target_to_xscale(target
);
2435 uint32_t value
= buf_get_u32(buf
, 0, 32);
2437 /* DCSR, TX and RX are accessible via JTAG */
2438 if (strcmp(reg
->name
, "XSCALE_DCSR") == 0) {
2439 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 0, 32, value
);
2440 return xscale_write_dcsr(arch_info
->target
, -1, -1);
2441 } else if (strcmp(reg
->name
, "XSCALE_RX") == 0) {
2442 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_RX
].value
, 0, 32, value
);
2443 return xscale_write_rx(arch_info
->target
);
2444 } else if (strcmp(reg
->name
, "XSCALE_TX") == 0) {
2445 /* can't write to TX register (debug-handler -> host) */
2447 } else if (strcmp(reg
->name
, "XSCALE_TXRXCTRL") == 0) {
2448 /* can't (explicitly) write to TXRXCTRL register */
2450 } else {/* Other DBG registers have to be transfered by the debug handler
2451 * send CP write request (command 0x41) */
2452 xscale_send_u32(target
, 0x41);
2454 /* send CP register number */
2455 xscale_send_u32(target
, arch_info
->dbg_handler_number
);
2457 /* send CP register value */
2458 xscale_send_u32(target
, value
);
2459 buf_set_u32(reg
->value
, 0, 32, value
);
2465 static int xscale_write_dcsr_sw(struct target
*target
, uint32_t value
)
2467 struct xscale_common
*xscale
= target_to_xscale(target
);
2468 struct reg
*dcsr
= &xscale
->reg_cache
->reg_list
[XSCALE_DCSR
];
2469 struct xscale_reg
*dcsr_arch_info
= dcsr
->arch_info
;
2471 /* send CP write request (command 0x41) */
2472 xscale_send_u32(target
, 0x41);
2474 /* send CP register number */
2475 xscale_send_u32(target
, dcsr_arch_info
->dbg_handler_number
);
2477 /* send CP register value */
2478 xscale_send_u32(target
, value
);
2479 buf_set_u32(dcsr
->value
, 0, 32, value
);
2484 static int xscale_read_trace(struct target
*target
)
2486 struct xscale_common
*xscale
= target_to_xscale(target
);
2487 struct arm
*arm
= &xscale
->arm
;
2488 struct xscale_trace_data
**trace_data_p
;
2490 /* 258 words from debug handler
2491 * 256 trace buffer entries
2492 * 2 checkpoint addresses
2494 uint32_t trace_buffer
[258];
2495 int is_address
[256];
2497 unsigned int num_checkpoints
= 0;
2499 if (target
->state
!= TARGET_HALTED
) {
2500 LOG_WARNING("target must be stopped to read trace data");
2501 return ERROR_TARGET_NOT_HALTED
;
2504 /* send read trace buffer command (command 0x61) */
2505 xscale_send_u32(target
, 0x61);
2507 /* receive trace buffer content */
2508 xscale_receive(target
, trace_buffer
, 258);
2510 /* parse buffer backwards to identify address entries */
2511 for (i
= 255; i
>= 0; i
--) {
2512 /* also count number of checkpointed entries */
2513 if ((trace_buffer
[i
] & 0xe0) == 0xc0)
2517 if (((trace_buffer
[i
] & 0xf0) == 0x90) ||
2518 ((trace_buffer
[i
] & 0xf0) == 0xd0)) {
2520 is_address
[--i
] = 1;
2522 is_address
[--i
] = 1;
2524 is_address
[--i
] = 1;
2526 is_address
[--i
] = 1;
2531 /* search first non-zero entry that is not part of an address */
2532 for (j
= 0; (j
< 256) && (trace_buffer
[j
] == 0) && (!is_address
[j
]); j
++)
2536 LOG_DEBUG("no trace data collected");
2537 return ERROR_XSCALE_NO_TRACE_DATA
;
2540 /* account for possible partial address at buffer start (wrap mode only) */
2541 if (is_address
[0]) { /* first entry is address; complete set of 4? */
2544 if (!is_address
[i
++])
2547 j
+= i
; /* partial address; can't use it */
2550 /* if first valid entry is indirect branch, can't use that either (no address) */
2551 if (((trace_buffer
[j
] & 0xf0) == 0x90) || ((trace_buffer
[j
] & 0xf0) == 0xd0))
2554 /* walk linked list to terminating entry */
2555 for (trace_data_p
= &xscale
->trace
.data
; *trace_data_p
;
2556 trace_data_p
= &(*trace_data_p
)->next
)
2559 *trace_data_p
= malloc(sizeof(struct xscale_trace_data
));
2560 (*trace_data_p
)->next
= NULL
;
2561 (*trace_data_p
)->chkpt0
= trace_buffer
[256];
2562 (*trace_data_p
)->chkpt1
= trace_buffer
[257];
2563 (*trace_data_p
)->last_instruction
= buf_get_u32(arm
->pc
->value
, 0, 32);
2564 (*trace_data_p
)->entries
= malloc(sizeof(struct xscale_trace_entry
) * (256 - j
));
2565 (*trace_data_p
)->depth
= 256 - j
;
2566 (*trace_data_p
)->num_checkpoints
= num_checkpoints
;
2568 for (i
= j
; i
< 256; i
++) {
2569 (*trace_data_p
)->entries
[i
- j
].data
= trace_buffer
[i
];
2571 (*trace_data_p
)->entries
[i
- j
].type
= XSCALE_TRACE_ADDRESS
;
2573 (*trace_data_p
)->entries
[i
- j
].type
= XSCALE_TRACE_MESSAGE
;
2579 static int xscale_read_instruction(struct target
*target
, uint32_t pc
,
2580 struct arm_instruction
*instruction
)
2582 struct xscale_common
*const xscale
= target_to_xscale(target
);
2589 if (!xscale
->trace
.image
)
2590 return ERROR_TRACE_IMAGE_UNAVAILABLE
;
2592 /* search for the section the current instruction belongs to */
2593 for (i
= 0; i
< xscale
->trace
.image
->num_sections
; i
++) {
2594 if ((xscale
->trace
.image
->sections
[i
].base_address
<= pc
) &&
2595 (xscale
->trace
.image
->sections
[i
].base_address
+
2596 xscale
->trace
.image
->sections
[i
].size
> pc
)) {
2602 if (section
== -1) {
2603 /* current instruction couldn't be found in the image */
2604 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE
;
2607 if (xscale
->trace
.core_state
== ARM_STATE_ARM
) {
2609 retval
= image_read_section(xscale
->trace
.image
, section
,
2610 pc
- xscale
->trace
.image
->sections
[section
].base_address
,
2611 4, buf
, &size_read
);
2612 if (retval
!= ERROR_OK
) {
2613 LOG_ERROR("error while reading instruction");
2614 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE
;
2616 opcode
= target_buffer_get_u32(target
, buf
);
2617 arm_evaluate_opcode(opcode
, pc
, instruction
);
2618 } else if (xscale
->trace
.core_state
== ARM_STATE_THUMB
) {
2620 retval
= image_read_section(xscale
->trace
.image
, section
,
2621 pc
- xscale
->trace
.image
->sections
[section
].base_address
,
2622 2, buf
, &size_read
);
2623 if (retval
!= ERROR_OK
) {
2624 LOG_ERROR("error while reading instruction");
2625 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE
;
2627 opcode
= target_buffer_get_u16(target
, buf
);
2628 thumb_evaluate_opcode(opcode
, pc
, instruction
);
2630 LOG_ERROR("BUG: unknown core state encountered");
2637 /* Extract address encoded into trace data.
2638 * Write result to address referenced by argument 'target', or 0 if incomplete. */
2639 static inline void xscale_branch_address(struct xscale_trace_data
*trace_data
,
2640 int i
, uint32_t *target
)
2642 /* if there are less than four entries prior to the indirect branch message
2643 * we can't extract the address */
2647 *target
= (trace_data
->entries
[i
-1].data
) | (trace_data
->entries
[i
-2].data
<< 8) |
2648 (trace_data
->entries
[i
-3].data
<< 16) | (trace_data
->entries
[i
-4].data
<< 24);
2652 static inline void xscale_display_instruction(struct target
*target
, uint32_t pc
,
2653 struct arm_instruction
*instruction
,
2654 struct command_context
*cmd_ctx
)
2656 int retval
= xscale_read_instruction(target
, pc
, instruction
);
2657 if (retval
== ERROR_OK
)
2658 command_print(cmd_ctx
, "%s", instruction
->text
);
2660 command_print(cmd_ctx
, "0x%8.8" PRIx32
"\t<not found in image>", pc
);
2663 static int xscale_analyze_trace(struct target
*target
, struct command_context
*cmd_ctx
)
2665 struct xscale_common
*xscale
= target_to_xscale(target
);
2666 struct xscale_trace_data
*trace_data
= xscale
->trace
.data
;
2668 uint32_t breakpoint_pc
;
2669 struct arm_instruction instruction
;
2670 uint32_t current_pc
= 0;/* initialized when address determined */
2672 if (!xscale
->trace
.image
)
2673 LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
2675 /* loop for each trace buffer that was loaded from target */
2676 while (trace_data
) {
2677 int chkpt
= 0; /* incremented as checkpointed entries found */
2680 /* FIXME: set this to correct mode when trace buffer is first enabled */
2681 xscale
->trace
.core_state
= ARM_STATE_ARM
;
2683 /* loop for each entry in this trace buffer */
2684 for (i
= 0; i
< trace_data
->depth
; i
++) {
2686 uint32_t chkpt_reg
= 0x0;
2687 uint32_t branch_target
= 0;
2690 /* trace entry type is upper nybble of 'message byte' */
2691 int trace_msg_type
= (trace_data
->entries
[i
].data
& 0xf0) >> 4;
2693 /* Target addresses of indirect branches are written into buffer
2694 * before the message byte representing the branch. Skip past it */
2695 if (trace_data
->entries
[i
].type
== XSCALE_TRACE_ADDRESS
)
2698 switch (trace_msg_type
) {
2699 case 0: /* Exceptions */
2707 exception
= (trace_data
->entries
[i
].data
& 0x70) >> 4;
2709 /* FIXME: vector table may be at ffff0000 */
2710 branch_target
= (trace_data
->entries
[i
].data
& 0xf0) >> 2;
2713 case 8: /* Direct Branch */
2716 case 9: /* Indirect Branch */
2717 xscale_branch_address(trace_data
, i
, &branch_target
);
2720 case 13: /* Checkpointed Indirect Branch */
2721 xscale_branch_address(trace_data
, i
, &branch_target
);
2722 if ((trace_data
->num_checkpoints
== 2) && (chkpt
== 0))
2723 chkpt_reg
= trace_data
->chkpt1
; /* 2 chkpts, this is
2726 chkpt_reg
= trace_data
->chkpt0
; /* 1 chkpt, or 2 and
2732 case 12: /* Checkpointed Direct Branch */
2733 if ((trace_data
->num_checkpoints
== 2) && (chkpt
== 0))
2734 chkpt_reg
= trace_data
->chkpt1
; /* 2 chkpts, this is
2737 chkpt_reg
= trace_data
->chkpt0
; /* 1 chkpt, or 2 and
2740 /* if no current_pc, checkpoint will be starting point */
2741 if (current_pc
== 0)
2742 branch_target
= chkpt_reg
;
2747 case 15:/* Roll-over */
2750 default:/* Reserved */
2751 LOG_WARNING("trace is suspect: invalid trace message byte");
2756 /* If we don't have the current_pc yet, but we did get the branch target
2757 * (either from the trace buffer on indirect branch, or from a checkpoint reg),
2758 * then we can start displaying instructions at the next iteration, with
2759 * branch_target as the starting point.
2761 if (current_pc
== 0) {
2762 current_pc
= branch_target
; /* remains 0 unless branch_target *obtained */
2766 /* We have current_pc. Read and display the instructions from the image.
2767 * First, display count instructions (lower nybble of message byte). */
2768 count
= trace_data
->entries
[i
].data
& 0x0f;
2769 for (j
= 0; j
< count
; j
++) {
2770 xscale_display_instruction(target
, current_pc
, &instruction
,
2772 current_pc
+= xscale
->trace
.core_state
== ARM_STATE_ARM
? 4 : 2;
2775 /* An additional instruction is implicitly added to count for
2776 * rollover and some exceptions: undef, swi, prefetch abort. */
2777 if ((trace_msg_type
== 15) || (exception
> 0 && exception
< 4)) {
2778 xscale_display_instruction(target
, current_pc
, &instruction
,
2780 current_pc
+= xscale
->trace
.core_state
== ARM_STATE_ARM
? 4 : 2;
2783 if (trace_msg_type
== 15) /* rollover */
2787 command_print(cmd_ctx
, "--- exception %i ---", exception
);
2791 /* not exception or rollover; next instruction is a branch and is
2792 * not included in the count */
2793 xscale_display_instruction(target
, current_pc
, &instruction
, cmd_ctx
);
2795 /* for direct branches, extract branch destination from instruction */
2796 if ((trace_msg_type
== 8) || (trace_msg_type
== 12)) {
2797 retval
= xscale_read_instruction(target
, current_pc
, &instruction
);
2798 if (retval
== ERROR_OK
)
2799 current_pc
= instruction
.info
.b_bl_bx_blx
.target_address
;
2801 current_pc
= 0; /* branch destination unknown */
2803 /* direct branch w/ checkpoint; can also get from checkpoint reg */
2804 if (trace_msg_type
== 12) {
2805 if (current_pc
== 0)
2806 current_pc
= chkpt_reg
;
2807 else if (current_pc
!= chkpt_reg
) /* sanity check */
2808 LOG_WARNING("trace is suspect: checkpoint register "
2809 "inconsistent with adddress from image");
2812 if (current_pc
== 0)
2813 command_print(cmd_ctx
, "address unknown");
2818 /* indirect branch; the branch destination was read from trace buffer */
2819 if ((trace_msg_type
== 9) || (trace_msg_type
== 13)) {
2820 current_pc
= branch_target
;
2822 /* sanity check (checkpoint reg is redundant) */
2823 if ((trace_msg_type
== 13) && (chkpt_reg
!= branch_target
))
2824 LOG_WARNING("trace is suspect: checkpoint register "
2825 "inconsistent with address from trace buffer");
2828 } /* END: for (i = 0; i < trace_data->depth; i++) */
2830 breakpoint_pc
= trace_data
->last_instruction
; /* used below */
2831 trace_data
= trace_data
->next
;
2833 } /* END: while (trace_data) */
2835 /* Finally... display all instructions up to the value of the pc when the
2836 * debug break occurred (saved when trace data was collected from target).
2837 * This is necessary because the trace only records execution branches and 16
2838 * consecutive instructions (rollovers), so last few typically missed.
2840 if (current_pc
== 0)
2841 return ERROR_OK
;/* current_pc was never found */
2843 /* how many instructions remaining? */
2844 int gap_count
= (breakpoint_pc
- current_pc
) /
2845 (xscale
->trace
.core_state
== ARM_STATE_ARM
? 4 : 2);
2847 /* should never be negative or over 16, but verify */
2848 if (gap_count
< 0 || gap_count
> 16) {
2849 LOG_WARNING("trace is suspect: excessive gap at end of trace");
2850 return ERROR_OK
;/* bail; large number or negative value no good */
2853 /* display remaining instructions */
2854 for (i
= 0; i
< gap_count
; i
++) {
2855 xscale_display_instruction(target
, current_pc
, &instruction
, cmd_ctx
);
2856 current_pc
+= xscale
->trace
.core_state
== ARM_STATE_ARM
? 4 : 2;
2862 static const struct reg_arch_type xscale_reg_type
= {
2863 .get
= xscale_get_reg
,
2864 .set
= xscale_set_reg
,
2867 static void xscale_build_reg_cache(struct target
*target
)
2869 struct xscale_common
*xscale
= target_to_xscale(target
);
2870 struct arm
*arm
= &xscale
->arm
;
2871 struct reg_cache
**cache_p
= register_get_last_cache_p(&target
->reg_cache
);
2872 struct xscale_reg
*arch_info
= malloc(sizeof(xscale_reg_arch_info
));
2874 int num_regs
= ARRAY_SIZE(xscale_reg_arch_info
);
2876 (*cache_p
) = arm_build_reg_cache(target
, arm
);
2878 (*cache_p
)->next
= malloc(sizeof(struct reg_cache
));
2879 cache_p
= &(*cache_p
)->next
;
2881 /* fill in values for the xscale reg cache */
2882 (*cache_p
)->name
= "XScale registers";
2883 (*cache_p
)->next
= NULL
;
2884 (*cache_p
)->reg_list
= malloc(num_regs
* sizeof(struct reg
));
2885 (*cache_p
)->num_regs
= num_regs
;
2887 for (i
= 0; i
< num_regs
; i
++) {
2888 (*cache_p
)->reg_list
[i
].name
= xscale_reg_list
[i
];
2889 (*cache_p
)->reg_list
[i
].value
= calloc(4, 1);
2890 (*cache_p
)->reg_list
[i
].dirty
= 0;
2891 (*cache_p
)->reg_list
[i
].valid
= 0;
2892 (*cache_p
)->reg_list
[i
].size
= 32;
2893 (*cache_p
)->reg_list
[i
].arch_info
= &arch_info
[i
];
2894 (*cache_p
)->reg_list
[i
].type
= &xscale_reg_type
;
2895 arch_info
[i
] = xscale_reg_arch_info
[i
];
2896 arch_info
[i
].target
= target
;
2899 xscale
->reg_cache
= (*cache_p
);
2902 static int xscale_init_target(struct command_context
*cmd_ctx
,
2903 struct target
*target
)
2905 xscale_build_reg_cache(target
);
2909 static int xscale_init_arch_info(struct target
*target
,
2910 struct xscale_common
*xscale
, struct jtag_tap
*tap
, const char *variant
)
2913 uint32_t high_reset_branch
, low_reset_branch
;
2918 /* store architecture specfic data */
2919 xscale
->common_magic
= XSCALE_COMMON_MAGIC
;
2921 /* we don't really *need* a variant param ... */
2925 if (strcmp(variant
, "pxa250") == 0
2926 || strcmp(variant
, "pxa255") == 0
2927 || strcmp(variant
, "pxa26x") == 0)
2929 else if (strcmp(variant
, "pxa27x") == 0
2930 || strcmp(variant
, "ixp42x") == 0
2931 || strcmp(variant
, "ixp45x") == 0
2932 || strcmp(variant
, "ixp46x") == 0)
2934 else if (strcmp(variant
, "pxa3xx") == 0)
2937 LOG_WARNING("%s: unrecognized variant %s",
2938 tap
->dotted_name
, variant
);
2940 if (ir_length
&& ir_length
!= tap
->ir_length
) {
2941 LOG_WARNING("%s: IR length for %s is %d; fixing",
2942 tap
->dotted_name
, variant
, ir_length
);
2943 tap
->ir_length
= ir_length
;
2947 /* PXA3xx shifts the JTAG instructions */
2948 if (tap
->ir_length
== 11)
2949 xscale
->xscale_variant
= XSCALE_PXA3XX
;
2951 xscale
->xscale_variant
= XSCALE_IXP4XX_PXA2XX
;
2953 /* the debug handler isn't installed (and thus not running) at this time */
2954 xscale
->handler_address
= 0xfe000800;
2956 /* clear the vectors we keep locally for reference */
2957 memset(xscale
->low_vectors
, 0, sizeof(xscale
->low_vectors
));
2958 memset(xscale
->high_vectors
, 0, sizeof(xscale
->high_vectors
));
2960 /* no user-specified vectors have been configured yet */
2961 xscale
->static_low_vectors_set
= 0x0;
2962 xscale
->static_high_vectors_set
= 0x0;
2964 /* calculate branches to debug handler */
2965 low_reset_branch
= (xscale
->handler_address
+ 0x20 - 0x0 - 0x8) >> 2;
2966 high_reset_branch
= (xscale
->handler_address
+ 0x20 - 0xffff0000 - 0x8) >> 2;
2968 xscale
->low_vectors
[0] = ARMV4_5_B((low_reset_branch
& 0xffffff), 0);
2969 xscale
->high_vectors
[0] = ARMV4_5_B((high_reset_branch
& 0xffffff), 0);
2971 for (i
= 1; i
<= 7; i
++) {
2972 xscale
->low_vectors
[i
] = ARMV4_5_B(0xfffffe, 0);
2973 xscale
->high_vectors
[i
] = ARMV4_5_B(0xfffffe, 0);
2976 /* 64kB aligned region used for DCache cleaning */
2977 xscale
->cache_clean_address
= 0xfffe0000;
2979 xscale
->hold_rst
= 0;
2980 xscale
->external_debug_break
= 0;
2982 xscale
->ibcr_available
= 2;
2983 xscale
->ibcr0_used
= 0;
2984 xscale
->ibcr1_used
= 0;
2986 xscale
->dbr_available
= 2;
2987 xscale
->dbr0_used
= 0;
2988 xscale
->dbr1_used
= 0;
2990 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
2991 target_name(target
));
2993 xscale
->arm_bkpt
= ARMV5_BKPT(0x0);
2994 xscale
->thumb_bkpt
= ARMV5_T_BKPT(0x0) & 0xffff;
2996 xscale
->vector_catch
= 0x1;
2998 xscale
->trace
.data
= NULL
;
2999 xscale
->trace
.image
= NULL
;
3000 xscale
->trace
.mode
= XSCALE_TRACE_DISABLED
;
3001 xscale
->trace
.buffer_fill
= 0;
3002 xscale
->trace
.fill_counter
= 0;
3004 /* prepare ARMv4/5 specific information */
3005 arm
->arch_info
= xscale
;
3006 arm
->read_core_reg
= xscale_read_core_reg
;
3007 arm
->write_core_reg
= xscale_write_core_reg
;
3008 arm
->full_context
= xscale_full_context
;
3010 arm_init_arch_info(target
, arm
);
3012 xscale
->armv4_5_mmu
.armv4_5_cache
.ctype
= -1;
3013 xscale
->armv4_5_mmu
.get_ttb
= xscale_get_ttb
;
3014 xscale
->armv4_5_mmu
.read_memory
= xscale_read_memory
;
3015 xscale
->armv4_5_mmu
.write_memory
= xscale_write_memory
;
3016 xscale
->armv4_5_mmu
.disable_mmu_caches
= xscale_disable_mmu_caches
;
3017 xscale
->armv4_5_mmu
.enable_mmu_caches
= xscale_enable_mmu_caches
;
3018 xscale
->armv4_5_mmu
.has_tiny_pages
= 1;
3019 xscale
->armv4_5_mmu
.mmu_enabled
= 0;
3024 static int xscale_target_create(struct target
*target
, Jim_Interp
*interp
)
3026 struct xscale_common
*xscale
;
3028 if (sizeof xscale_debug_handler
- 1 > 0x800) {
3029 LOG_ERROR("debug_handler.bin: larger than 2kb");
3033 xscale
= calloc(1, sizeof(*xscale
));
3037 return xscale_init_arch_info(target
, xscale
, target
->tap
,
3041 COMMAND_HANDLER(xscale_handle_debug_handler_command
)
3043 struct target
*target
= NULL
;
3044 struct xscale_common
*xscale
;
3046 uint32_t handler_address
;
3049 return ERROR_COMMAND_SYNTAX_ERROR
;
3051 target
= get_target(CMD_ARGV
[0]);
3052 if (target
== NULL
) {
3053 LOG_ERROR("target '%s' not defined", CMD_ARGV
[0]);
3057 xscale
= target_to_xscale(target
);
3058 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3059 if (retval
!= ERROR_OK
)
3062 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[1], handler_address
);
3064 if (((handler_address
>= 0x800) && (handler_address
<= 0x1fef800)) ||
3065 ((handler_address
>= 0xfe000800) && (handler_address
<= 0xfffff800)))
3066 xscale
->handler_address
= handler_address
;
3069 "xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3076 COMMAND_HANDLER(xscale_handle_cache_clean_address_command
)
3078 struct target
*target
= NULL
;
3079 struct xscale_common
*xscale
;
3081 uint32_t cache_clean_address
;
3084 return ERROR_COMMAND_SYNTAX_ERROR
;
3086 target
= get_target(CMD_ARGV
[0]);
3087 if (target
== NULL
) {
3088 LOG_ERROR("target '%s' not defined", CMD_ARGV
[0]);
3091 xscale
= target_to_xscale(target
);
3092 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3093 if (retval
!= ERROR_OK
)
3096 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[1], cache_clean_address
);
3098 if (cache_clean_address
& 0xffff)
3099 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3101 xscale
->cache_clean_address
= cache_clean_address
;
3106 COMMAND_HANDLER(xscale_handle_cache_info_command
)
3108 struct target
*target
= get_current_target(CMD_CTX
);
3109 struct xscale_common
*xscale
= target_to_xscale(target
);
3112 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3113 if (retval
!= ERROR_OK
)
3116 return armv4_5_handle_cache_info_command(CMD_CTX
, &xscale
->armv4_5_mmu
.armv4_5_cache
);
3119 static int xscale_virt2phys(struct target
*target
,
3120 uint32_t virtual, uint32_t *physical
)
3122 struct xscale_common
*xscale
= target_to_xscale(target
);
3125 if (xscale
->common_magic
!= XSCALE_COMMON_MAGIC
) {
3126 LOG_ERROR(xscale_not
);
3127 return ERROR_TARGET_INVALID
;
3131 int retval
= armv4_5_mmu_translate_va(target
, &xscale
->armv4_5_mmu
,
3132 virtual, &cb
, &ret
);
3133 if (retval
!= ERROR_OK
)
3139 static int xscale_mmu(struct target
*target
, int *enabled
)
3141 struct xscale_common
*xscale
= target_to_xscale(target
);
3143 if (target
->state
!= TARGET_HALTED
) {
3144 LOG_ERROR("Target not halted");
3145 return ERROR_TARGET_INVALID
;
3147 *enabled
= xscale
->armv4_5_mmu
.mmu_enabled
;
3151 COMMAND_HANDLER(xscale_handle_mmu_command
)
3153 struct target
*target
= get_current_target(CMD_CTX
);
3154 struct xscale_common
*xscale
= target_to_xscale(target
);
3157 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3158 if (retval
!= ERROR_OK
)
3161 if (target
->state
!= TARGET_HALTED
) {
3162 command_print(CMD_CTX
, "target must be stopped for \"%s\" command", CMD_NAME
);
3166 if (CMD_ARGC
>= 1) {
3168 COMMAND_PARSE_ENABLE(CMD_ARGV
[0], enable
);
3170 xscale_enable_mmu_caches(target
, 1, 0, 0);
3172 xscale_disable_mmu_caches(target
, 1, 0, 0);
3173 xscale
->armv4_5_mmu
.mmu_enabled
= enable
;
3176 command_print(CMD_CTX
, "mmu %s",
3177 (xscale
->armv4_5_mmu
.mmu_enabled
) ? "enabled" : "disabled");
3182 COMMAND_HANDLER(xscale_handle_idcache_command
)
3184 struct target
*target
= get_current_target(CMD_CTX
);
3185 struct xscale_common
*xscale
= target_to_xscale(target
);
3187 int retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3188 if (retval
!= ERROR_OK
)
3191 if (target
->state
!= TARGET_HALTED
) {
3192 command_print(CMD_CTX
, "target must be stopped for \"%s\" command", CMD_NAME
);
3196 bool icache
= false;
3197 if (strcmp(CMD_NAME
, "icache") == 0)
3199 if (CMD_ARGC
>= 1) {
3201 COMMAND_PARSE_ENABLE(CMD_ARGV
[0], enable
);
3203 xscale
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
= enable
;
3205 xscale_enable_mmu_caches(target
, 0, 0, 1);
3207 xscale_disable_mmu_caches(target
, 0, 0, 1);
3209 xscale
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
= enable
;
3211 xscale_enable_mmu_caches(target
, 0, 1, 0);
3213 xscale_disable_mmu_caches(target
, 0, 1, 0);
3217 bool enabled
= icache
?
3218 xscale
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
:
3219 xscale
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
;
3220 const char *msg
= enabled
? "enabled" : "disabled";
3221 command_print(CMD_CTX
, "%s %s", CMD_NAME
, msg
);
3226 COMMAND_HANDLER(xscale_handle_vector_catch_command
)
3228 struct target
*target
= get_current_target(CMD_CTX
);
3229 struct xscale_common
*xscale
= target_to_xscale(target
);
3232 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3233 if (retval
!= ERROR_OK
)
3237 return ERROR_COMMAND_SYNTAX_ERROR
;
3239 COMMAND_PARSE_NUMBER(u8
, CMD_ARGV
[0], xscale
->vector_catch
);
3240 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
,
3243 xscale
->vector_catch
);
3244 xscale_write_dcsr(target
, -1, -1);
3247 command_print(CMD_CTX
, "vector catch mask: 0x%2.2x", xscale
->vector_catch
);
3253 COMMAND_HANDLER(xscale_handle_vector_table_command
)
3255 struct target
*target
= get_current_target(CMD_CTX
);
3256 struct xscale_common
*xscale
= target_to_xscale(target
);
3260 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3261 if (retval
!= ERROR_OK
)
3264 if (CMD_ARGC
== 0) { /* print current settings */
3267 command_print(CMD_CTX
, "active user-set static vectors:");
3268 for (idx
= 1; idx
< 8; idx
++)
3269 if (xscale
->static_low_vectors_set
& (1 << idx
))
3270 command_print(CMD_CTX
,
3271 "low %d: 0x%" PRIx32
,
3273 xscale
->static_low_vectors
[idx
]);
3274 for (idx
= 1; idx
< 8; idx
++)
3275 if (xscale
->static_high_vectors_set
& (1 << idx
))
3276 command_print(CMD_CTX
,
3277 "high %d: 0x%" PRIx32
,
3279 xscale
->static_high_vectors
[idx
]);
3287 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[1], idx
);
3289 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[2], vec
);
3291 if (idx
< 1 || idx
>= 8)
3294 if (!err
&& strcmp(CMD_ARGV
[0], "low") == 0) {
3295 xscale
->static_low_vectors_set
|= (1<<idx
);
3296 xscale
->static_low_vectors
[idx
] = vec
;
3297 } else if (!err
&& (strcmp(CMD_ARGV
[0], "high") == 0)) {
3298 xscale
->static_high_vectors_set
|= (1<<idx
);
3299 xscale
->static_high_vectors
[idx
] = vec
;
3305 return ERROR_COMMAND_SYNTAX_ERROR
;
3311 COMMAND_HANDLER(xscale_handle_trace_buffer_command
)
3313 struct target
*target
= get_current_target(CMD_CTX
);
3314 struct xscale_common
*xscale
= target_to_xscale(target
);
3315 uint32_t dcsr_value
;
3318 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3319 if (retval
!= ERROR_OK
)
3322 if (target
->state
!= TARGET_HALTED
) {
3323 command_print(CMD_CTX
, "target must be stopped for \"%s\" command", CMD_NAME
);
3327 if (CMD_ARGC
>= 1) {
3328 if (strcmp("enable", CMD_ARGV
[0]) == 0)
3329 xscale
->trace
.mode
= XSCALE_TRACE_WRAP
; /* default */
3330 else if (strcmp("disable", CMD_ARGV
[0]) == 0)
3331 xscale
->trace
.mode
= XSCALE_TRACE_DISABLED
;
3333 return ERROR_COMMAND_SYNTAX_ERROR
;
3336 if (CMD_ARGC
>= 2 && xscale
->trace
.mode
!= XSCALE_TRACE_DISABLED
) {
3337 if (strcmp("fill", CMD_ARGV
[1]) == 0) {
3338 int buffcount
= 1; /* default */
3340 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[2], buffcount
);
3341 if (buffcount
< 1) { /* invalid */
3342 command_print(CMD_CTX
, "fill buffer count must be > 0");
3343 xscale
->trace
.mode
= XSCALE_TRACE_DISABLED
;
3344 return ERROR_COMMAND_SYNTAX_ERROR
;
3346 xscale
->trace
.buffer_fill
= buffcount
;
3347 xscale
->trace
.mode
= XSCALE_TRACE_FILL
;
3348 } else if (strcmp("wrap", CMD_ARGV
[1]) == 0)
3349 xscale
->trace
.mode
= XSCALE_TRACE_WRAP
;
3351 xscale
->trace
.mode
= XSCALE_TRACE_DISABLED
;
3352 return ERROR_COMMAND_SYNTAX_ERROR
;
3356 if (xscale
->trace
.mode
!= XSCALE_TRACE_DISABLED
) {
3357 char fill_string
[12];
3358 sprintf(fill_string
, "fill %" PRId32
, xscale
->trace
.buffer_fill
);
3359 command_print(CMD_CTX
, "trace buffer enabled (%s)",
3360 (xscale
->trace
.mode
== XSCALE_TRACE_FILL
)
3361 ? fill_string
: "wrap");
3363 command_print(CMD_CTX
, "trace buffer disabled");
3365 dcsr_value
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 0, 32);
3366 if (xscale
->trace
.mode
== XSCALE_TRACE_FILL
)
3367 xscale_write_dcsr_sw(target
, (dcsr_value
& 0xfffffffc) | 2);
3369 xscale_write_dcsr_sw(target
, dcsr_value
& 0xfffffffc);
3374 COMMAND_HANDLER(xscale_handle_trace_image_command
)
3376 struct target
*target
= get_current_target(CMD_CTX
);
3377 struct xscale_common
*xscale
= target_to_xscale(target
);
3381 return ERROR_COMMAND_SYNTAX_ERROR
;
3383 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3384 if (retval
!= ERROR_OK
)
3387 if (xscale
->trace
.image
) {
3388 image_close(xscale
->trace
.image
);
3389 free(xscale
->trace
.image
);
3390 command_print(CMD_CTX
, "previously loaded image found and closed");
3393 xscale
->trace
.image
= malloc(sizeof(struct image
));
3394 xscale
->trace
.image
->base_address_set
= 0;
3395 xscale
->trace
.image
->start_address_set
= 0;
3397 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3398 if (CMD_ARGC
>= 2) {
3399 xscale
->trace
.image
->base_address_set
= 1;
3400 COMMAND_PARSE_NUMBER(llong
, CMD_ARGV
[1], xscale
->trace
.image
->base_address
);
3402 xscale
->trace
.image
->base_address_set
= 0;
3404 if (image_open(xscale
->trace
.image
, CMD_ARGV
[0],
3405 (CMD_ARGC
>= 3) ? CMD_ARGV
[2] : NULL
) != ERROR_OK
) {
3406 free(xscale
->trace
.image
);
3407 xscale
->trace
.image
= NULL
;
3414 COMMAND_HANDLER(xscale_handle_dump_trace_command
)
3416 struct target
*target
= get_current_target(CMD_CTX
);
3417 struct xscale_common
*xscale
= target_to_xscale(target
);
3418 struct xscale_trace_data
*trace_data
;
3422 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3423 if (retval
!= ERROR_OK
)
3426 if (target
->state
!= TARGET_HALTED
) {
3427 command_print(CMD_CTX
, "target must be stopped for \"%s\" command", CMD_NAME
);
3432 return ERROR_COMMAND_SYNTAX_ERROR
;
3434 trace_data
= xscale
->trace
.data
;
3437 command_print(CMD_CTX
, "no trace data collected");
3441 if (fileio_open(&file
, CMD_ARGV
[0], FILEIO_WRITE
, FILEIO_BINARY
) != ERROR_OK
)
3444 while (trace_data
) {
3447 fileio_write_u32(&file
, trace_data
->chkpt0
);
3448 fileio_write_u32(&file
, trace_data
->chkpt1
);
3449 fileio_write_u32(&file
, trace_data
->last_instruction
);
3450 fileio_write_u32(&file
, trace_data
->depth
);
3452 for (i
= 0; i
< trace_data
->depth
; i
++)
3453 fileio_write_u32(&file
, trace_data
->entries
[i
].data
|
3454 ((trace_data
->entries
[i
].type
& 0xffff) << 16));
3456 trace_data
= trace_data
->next
;
3459 fileio_close(&file
);
3464 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command
)
3466 struct target
*target
= get_current_target(CMD_CTX
);
3467 struct xscale_common
*xscale
= target_to_xscale(target
);
3470 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3471 if (retval
!= ERROR_OK
)
3474 xscale_analyze_trace(target
, CMD_CTX
);
3479 COMMAND_HANDLER(xscale_handle_cp15
)
3481 struct target
*target
= get_current_target(CMD_CTX
);
3482 struct xscale_common
*xscale
= target_to_xscale(target
);
3485 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3486 if (retval
!= ERROR_OK
)
3489 if (target
->state
!= TARGET_HALTED
) {
3490 command_print(CMD_CTX
, "target must be stopped for \"%s\" command", CMD_NAME
);
3493 uint32_t reg_no
= 0;
3494 struct reg
*reg
= NULL
;
3496 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[0], reg_no
);
3497 /*translate from xscale cp15 register no to openocd register*/
3500 reg_no
= XSCALE_MAINID
;
3503 reg_no
= XSCALE_CTRL
;
3506 reg_no
= XSCALE_TTB
;
3509 reg_no
= XSCALE_DAC
;
3512 reg_no
= XSCALE_FSR
;
3515 reg_no
= XSCALE_FAR
;
3518 reg_no
= XSCALE_PID
;
3521 reg_no
= XSCALE_CPACCESS
;
3524 command_print(CMD_CTX
, "invalid register number");
3525 return ERROR_COMMAND_SYNTAX_ERROR
;
3527 reg
= &xscale
->reg_cache
->reg_list
[reg_no
];
3530 if (CMD_ARGC
== 1) {
3533 /* read cp15 control register */
3534 xscale_get_reg(reg
);
3535 value
= buf_get_u32(reg
->value
, 0, 32);
3536 command_print(CMD_CTX
, "%s (/%i): 0x%" PRIx32
"", reg
->name
, (int)(reg
->size
),
3538 } else if (CMD_ARGC
== 2) {
3540 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[1], value
);
3542 /* send CP write request (command 0x41) */
3543 xscale_send_u32(target
, 0x41);
3545 /* send CP register number */
3546 xscale_send_u32(target
, reg_no
);
3548 /* send CP register value */
3549 xscale_send_u32(target
, value
);
3551 /* execute cpwait to ensure outstanding operations complete */
3552 xscale_send_u32(target
, 0x53);
3554 return ERROR_COMMAND_SYNTAX_ERROR
;
3559 static const struct command_registration xscale_exec_command_handlers
[] = {
3561 .name
= "cache_info",
3562 .handler
= xscale_handle_cache_info_command
,
3563 .mode
= COMMAND_EXEC
,
3564 .help
= "display information about CPU caches",
3568 .handler
= xscale_handle_mmu_command
,
3569 .mode
= COMMAND_EXEC
,
3570 .help
= "enable or disable the MMU",
3571 .usage
= "['enable'|'disable']",
3575 .handler
= xscale_handle_idcache_command
,
3576 .mode
= COMMAND_EXEC
,
3577 .help
= "display ICache state, optionally enabling or "
3579 .usage
= "['enable'|'disable']",
3583 .handler
= xscale_handle_idcache_command
,
3584 .mode
= COMMAND_EXEC
,
3585 .help
= "display DCache state, optionally enabling or "
3587 .usage
= "['enable'|'disable']",
3590 .name
= "vector_catch",
3591 .handler
= xscale_handle_vector_catch_command
,
3592 .mode
= COMMAND_EXEC
,
3593 .help
= "set or display 8-bit mask of vectors "
3594 "that should trigger debug entry",
3598 .name
= "vector_table",
3599 .handler
= xscale_handle_vector_table_command
,
3600 .mode
= COMMAND_EXEC
,
3601 .help
= "set vector table entry in mini-ICache, "
3602 "or display current tables",
3603 .usage
= "[('high'|'low') index code]",
3606 .name
= "trace_buffer",
3607 .handler
= xscale_handle_trace_buffer_command
,
3608 .mode
= COMMAND_EXEC
,
3609 .help
= "display trace buffer status, enable or disable "
3610 "tracing, and optionally reconfigure trace mode",
3611 .usage
= "['enable'|'disable' ['fill' [number]|'wrap']]",
3614 .name
= "dump_trace",
3615 .handler
= xscale_handle_dump_trace_command
,
3616 .mode
= COMMAND_EXEC
,
3617 .help
= "dump content of trace buffer to file",
3618 .usage
= "filename",
3621 .name
= "analyze_trace",
3622 .handler
= xscale_handle_analyze_trace_buffer_command
,
3623 .mode
= COMMAND_EXEC
,
3624 .help
= "analyze content of trace buffer",
3628 .name
= "trace_image",
3629 .handler
= xscale_handle_trace_image_command
,
3630 .mode
= COMMAND_EXEC
,
3631 .help
= "load image from file to address (default 0)",
3632 .usage
= "filename [offset [filetype]]",
3636 .handler
= xscale_handle_cp15
,
3637 .mode
= COMMAND_EXEC
,
3638 .help
= "Read or write coprocessor 15 register.",
3639 .usage
= "register [value]",
3641 COMMAND_REGISTRATION_DONE
3643 static const struct command_registration xscale_any_command_handlers
[] = {
3645 .name
= "debug_handler",
3646 .handler
= xscale_handle_debug_handler_command
,
3647 .mode
= COMMAND_ANY
,
3648 .help
= "Change address used for debug handler.",
3649 .usage
= "<target> <address>",
3652 .name
= "cache_clean_address",
3653 .handler
= xscale_handle_cache_clean_address_command
,
3654 .mode
= COMMAND_ANY
,
3655 .help
= "Change address used for cleaning data cache.",
3659 .chain
= xscale_exec_command_handlers
,
3661 COMMAND_REGISTRATION_DONE
3663 static const struct command_registration xscale_command_handlers
[] = {
3665 .chain
= arm_command_handlers
,
3669 .mode
= COMMAND_ANY
,
3670 .help
= "xscale command group",
3672 .chain
= xscale_any_command_handlers
,
3674 COMMAND_REGISTRATION_DONE
3677 struct target_type xscale_target
= {
3680 .poll
= xscale_poll
,
3681 .arch_state
= xscale_arch_state
,
3683 .target_request_data
= NULL
,
3685 .halt
= xscale_halt
,
3686 .resume
= xscale_resume
,
3687 .step
= xscale_step
,
3689 .assert_reset
= xscale_assert_reset
,
3690 .deassert_reset
= xscale_deassert_reset
,
3691 .soft_reset_halt
= NULL
,
3693 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3694 .get_gdb_reg_list
= arm_get_gdb_reg_list
,
3696 .read_memory
= xscale_read_memory
,
3697 .read_phys_memory
= xscale_read_phys_memory
,
3698 .write_memory
= xscale_write_memory
,
3699 .write_phys_memory
= xscale_write_phys_memory
,
3700 .bulk_write_memory
= xscale_bulk_write_memory
,
3702 .checksum_memory
= arm_checksum_memory
,
3703 .blank_check_memory
= arm_blank_check_memory
,
3705 .run_algorithm
= armv4_5_run_algorithm
,
3707 .add_breakpoint
= xscale_add_breakpoint
,
3708 .remove_breakpoint
= xscale_remove_breakpoint
,
3709 .add_watchpoint
= xscale_add_watchpoint
,
3710 .remove_watchpoint
= xscale_remove_watchpoint
,
3712 .commands
= xscale_command_handlers
,
3713 .target_create
= xscale_target_create
,
3714 .init_target
= xscale_init_target
,
3716 .virt2phys
= xscale_virt2phys
,