1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
30 #include "breakpoints.h"
32 #include "target_type.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
39 #include "arm_opcodes.h"
44 * Important XScale documents available as of October 2009 include:
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
59 * Chip-specific microarchitecture documents may also be useful.
63 /* forward declarations */
64 static int xscale_resume(struct target
*, int current
,
65 uint32_t address
, int handle_breakpoints
, int debug_execution
);
66 static int xscale_debug_entry(struct target
*);
67 static int xscale_restore_banked(struct target
*);
68 static int xscale_get_reg(struct reg
*reg
);
69 static int xscale_set_reg(struct reg
*reg
, uint8_t *buf
);
70 static int xscale_set_breakpoint(struct target
*, struct breakpoint
*);
71 static int xscale_set_watchpoint(struct target
*, struct watchpoint
*);
72 static int xscale_unset_breakpoint(struct target
*, struct breakpoint
*);
73 static int xscale_read_trace(struct target
*);
76 /* This XScale "debug handler" is loaded into the processor's
77 * mini-ICache, which is 2K of code writable only via JTAG.
79 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
80 * binary files cleanly. It's string oriented, and terminates them
81 * with a NUL character. Better would be to generate the constants
82 * and let other code decide names, scoping, and other housekeeping.
84 static /* unsigned const char xscale_debug_handler[] = ... */
85 #include "xscale_debug.h"
87 static char *const xscale_reg_list
[] =
89 "XSCALE_MAINID", /* 0 */
99 "XSCALE_IBCR0", /* 10 */
109 "XSCALE_RX", /* 20 */
113 static const struct xscale_reg xscale_reg_arch_info
[] =
115 {XSCALE_MAINID
, NULL
},
116 {XSCALE_CACHETYPE
, NULL
},
118 {XSCALE_AUXCTRL
, NULL
},
124 {XSCALE_CPACCESS
, NULL
},
125 {XSCALE_IBCR0
, NULL
},
126 {XSCALE_IBCR1
, NULL
},
129 {XSCALE_DBCON
, NULL
},
130 {XSCALE_TBREG
, NULL
},
131 {XSCALE_CHKPT0
, NULL
},
132 {XSCALE_CHKPT1
, NULL
},
133 {XSCALE_DCSR
, NULL
}, /* DCSR accessed via JTAG or SW */
134 {-1, NULL
}, /* TX accessed via JTAG */
135 {-1, NULL
}, /* RX accessed via JTAG */
136 {-1, NULL
}, /* TXRXCTRL implicit access via JTAG */
139 /* convenience wrapper to access XScale specific registers */
140 static int xscale_set_reg_u32(struct reg
*reg
, uint32_t value
)
144 buf_set_u32(buf
, 0, 32, value
);
146 return xscale_set_reg(reg
, buf
);
149 static const char xscale_not
[] = "target is not an XScale";
151 static int xscale_verify_pointer(struct command_context
*cmd_ctx
,
152 struct xscale_common
*xscale
)
154 if (xscale
->common_magic
!= XSCALE_COMMON_MAGIC
) {
155 command_print(cmd_ctx
, xscale_not
);
156 return ERROR_TARGET_INVALID
;
161 static int xscale_jtag_set_instr(struct jtag_tap
*tap
, uint32_t new_instr
, tap_state_t end_state
)
163 assert (tap
!= NULL
);
165 if (buf_get_u32(tap
->cur_instr
, 0, tap
->ir_length
) != new_instr
)
167 struct scan_field field
;
170 memset(&field
, 0, sizeof field
);
171 field
.num_bits
= tap
->ir_length
;
172 field
.out_value
= scratch
;
173 buf_set_u32(scratch
, 0, field
.num_bits
, new_instr
);
175 jtag_add_ir_scan(tap
, &field
, end_state
);
181 static int xscale_read_dcsr(struct target
*target
)
183 struct xscale_common
*xscale
= target_to_xscale(target
);
185 struct scan_field fields
[3];
186 uint8_t field0
= 0x0;
187 uint8_t field0_check_value
= 0x2;
188 uint8_t field0_check_mask
= 0x7;
189 uint8_t field2
= 0x0;
190 uint8_t field2_check_value
= 0x0;
191 uint8_t field2_check_mask
= 0x1;
193 xscale_jtag_set_instr(target
->tap
,
194 XSCALE_SELDCSR
<< xscale
->xscale_variant
,
197 buf_set_u32(&field0
, 1, 1, xscale
->hold_rst
);
198 buf_set_u32(&field0
, 2, 1, xscale
->external_debug_break
);
200 memset(&fields
, 0, sizeof fields
);
202 fields
[0].num_bits
= 3;
203 fields
[0].out_value
= &field0
;
205 fields
[0].in_value
= &tmp
;
207 fields
[1].num_bits
= 32;
208 fields
[1].in_value
= xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
;
210 fields
[2].num_bits
= 1;
211 fields
[2].out_value
= &field2
;
213 fields
[2].in_value
= &tmp2
;
215 jtag_add_dr_scan(target
->tap
, 3, fields
, TAP_DRPAUSE
);
217 jtag_check_value_mask(fields
+ 0, &field0_check_value
, &field0_check_mask
);
218 jtag_check_value_mask(fields
+ 2, &field2_check_value
, &field2_check_mask
);
220 if ((retval
= jtag_execute_queue()) != ERROR_OK
)
222 LOG_ERROR("JTAG error while reading DCSR");
226 xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].dirty
= 0;
227 xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].valid
= 1;
229 /* write the register with the value we just read
230 * on this second pass, only the first bit of field0 is guaranteed to be 0)
232 field0_check_mask
= 0x1;
233 fields
[1].out_value
= xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
;
234 fields
[1].in_value
= NULL
;
236 jtag_add_dr_scan(target
->tap
, 3, fields
, TAP_DRPAUSE
);
238 /* DANGER!!! this must be here. It will make sure that the arguments
239 * to jtag_set_check_value() does not go out of scope! */
240 return jtag_execute_queue();
244 static void xscale_getbuf(jtag_callback_data_t arg
)
246 uint8_t *in
= (uint8_t *)arg
;
247 *((uint32_t *)arg
) = buf_get_u32(in
, 0, 32);
250 static int xscale_receive(struct target
*target
, uint32_t *buffer
, int num_words
)
253 return ERROR_COMMAND_SYNTAX_ERROR
;
255 struct xscale_common
*xscale
= target_to_xscale(target
);
256 int retval
= ERROR_OK
;
258 struct scan_field fields
[3];
259 uint8_t *field0
= malloc(num_words
* 1);
260 uint8_t field0_check_value
= 0x2;
261 uint8_t field0_check_mask
= 0x6;
262 uint32_t *field1
= malloc(num_words
* 4);
263 uint8_t field2_check_value
= 0x0;
264 uint8_t field2_check_mask
= 0x1;
266 int words_scheduled
= 0;
269 path
[0] = TAP_DRSELECT
;
270 path
[1] = TAP_DRCAPTURE
;
271 path
[2] = TAP_DRSHIFT
;
273 memset(&fields
, 0, sizeof fields
);
275 fields
[0].num_bits
= 3;
277 fields
[0].in_value
= &tmp
;
278 fields
[0].check_value
= &field0_check_value
;
279 fields
[0].check_mask
= &field0_check_mask
;
281 fields
[1].num_bits
= 32;
283 fields
[2].num_bits
= 1;
285 fields
[2].in_value
= &tmp2
;
286 fields
[2].check_value
= &field2_check_value
;
287 fields
[2].check_mask
= &field2_check_mask
;
289 xscale_jtag_set_instr(target
->tap
,
290 XSCALE_DBGTX
<< xscale
->xscale_variant
,
292 jtag_add_runtest(1, TAP_IDLE
); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
294 /* repeat until all words have been collected */
296 while (words_done
< num_words
)
300 for (i
= words_done
; i
< num_words
; i
++)
302 fields
[0].in_value
= &field0
[i
];
304 jtag_add_pathmove(3, path
);
306 fields
[1].in_value
= (uint8_t *)(field1
+ i
);
308 jtag_add_dr_scan_check(target
->tap
, 3, fields
, TAP_IDLE
);
310 jtag_add_callback(xscale_getbuf
, (jtag_callback_data_t
)(field1
+ i
));
315 if ((retval
= jtag_execute_queue()) != ERROR_OK
)
317 LOG_ERROR("JTAG error while receiving data from debug handler");
321 /* examine results */
322 for (i
= words_done
; i
< num_words
; i
++)
324 if (!(field0
[i
] & 1))
326 /* move backwards if necessary */
328 for (j
= i
; j
< num_words
- 1; j
++)
330 field0
[j
] = field0
[j
+ 1];
331 field1
[j
] = field1
[j
+ 1];
336 if (words_scheduled
== 0)
338 if (attempts
++==1000)
340 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
341 retval
= ERROR_TARGET_TIMEOUT
;
346 words_done
+= words_scheduled
;
349 for (i
= 0; i
< num_words
; i
++)
350 *(buffer
++) = buf_get_u32((uint8_t*)&field1
[i
], 0, 32);
357 static int xscale_read_tx(struct target
*target
, int consume
)
359 struct xscale_common
*xscale
= target_to_xscale(target
);
361 tap_state_t noconsume_path
[6];
363 struct timeval timeout
, now
;
364 struct scan_field fields
[3];
365 uint8_t field0_in
= 0x0;
366 uint8_t field0_check_value
= 0x2;
367 uint8_t field0_check_mask
= 0x6;
368 uint8_t field2_check_value
= 0x0;
369 uint8_t field2_check_mask
= 0x1;
371 xscale_jtag_set_instr(target
->tap
,
372 XSCALE_DBGTX
<< xscale
->xscale_variant
,
375 path
[0] = TAP_DRSELECT
;
376 path
[1] = TAP_DRCAPTURE
;
377 path
[2] = TAP_DRSHIFT
;
379 noconsume_path
[0] = TAP_DRSELECT
;
380 noconsume_path
[1] = TAP_DRCAPTURE
;
381 noconsume_path
[2] = TAP_DREXIT1
;
382 noconsume_path
[3] = TAP_DRPAUSE
;
383 noconsume_path
[4] = TAP_DREXIT2
;
384 noconsume_path
[5] = TAP_DRSHIFT
;
386 memset(&fields
, 0, sizeof fields
);
388 fields
[0].num_bits
= 3;
389 fields
[0].in_value
= &field0_in
;
391 fields
[1].num_bits
= 32;
392 fields
[1].in_value
= xscale
->reg_cache
->reg_list
[XSCALE_TX
].value
;
394 fields
[2].num_bits
= 1;
396 fields
[2].in_value
= &tmp
;
398 gettimeofday(&timeout
, NULL
);
399 timeval_add_time(&timeout
, 1, 0);
403 /* if we want to consume the register content (i.e. clear TX_READY),
404 * we have to go straight from Capture-DR to Shift-DR
405 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
408 jtag_add_pathmove(3, path
);
411 jtag_add_pathmove(ARRAY_SIZE(noconsume_path
), noconsume_path
);
414 jtag_add_dr_scan(target
->tap
, 3, fields
, TAP_IDLE
);
416 jtag_check_value_mask(fields
+ 0, &field0_check_value
, &field0_check_mask
);
417 jtag_check_value_mask(fields
+ 2, &field2_check_value
, &field2_check_mask
);
419 if ((retval
= jtag_execute_queue()) != ERROR_OK
)
421 LOG_ERROR("JTAG error while reading TX");
422 return ERROR_TARGET_TIMEOUT
;
425 gettimeofday(&now
, NULL
);
426 if ((now
.tv_sec
> timeout
.tv_sec
) || ((now
.tv_sec
== timeout
.tv_sec
)&& (now
.tv_usec
> timeout
.tv_usec
)))
428 LOG_ERROR("time out reading TX register");
429 return ERROR_TARGET_TIMEOUT
;
431 if (!((!(field0_in
& 1)) && consume
))
435 if (debug_level
>= 3)
437 LOG_DEBUG("waiting 100ms");
438 alive_sleep(100); /* avoid flooding the logs */
446 if (!(field0_in
& 1))
447 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
452 static int xscale_write_rx(struct target
*target
)
454 struct xscale_common
*xscale
= target_to_xscale(target
);
456 struct timeval timeout
, now
;
457 struct scan_field fields
[3];
458 uint8_t field0_out
= 0x0;
459 uint8_t field0_in
= 0x0;
460 uint8_t field0_check_value
= 0x2;
461 uint8_t field0_check_mask
= 0x6;
462 uint8_t field2
= 0x0;
463 uint8_t field2_check_value
= 0x0;
464 uint8_t field2_check_mask
= 0x1;
466 xscale_jtag_set_instr(target
->tap
,
467 XSCALE_DBGRX
<< xscale
->xscale_variant
,
470 memset(&fields
, 0, sizeof fields
);
472 fields
[0].num_bits
= 3;
473 fields
[0].out_value
= &field0_out
;
474 fields
[0].in_value
= &field0_in
;
476 fields
[1].num_bits
= 32;
477 fields
[1].out_value
= xscale
->reg_cache
->reg_list
[XSCALE_RX
].value
;
479 fields
[2].num_bits
= 1;
480 fields
[2].out_value
= &field2
;
482 fields
[2].in_value
= &tmp
;
484 gettimeofday(&timeout
, NULL
);
485 timeval_add_time(&timeout
, 1, 0);
487 /* poll until rx_read is low */
488 LOG_DEBUG("polling RX");
491 jtag_add_dr_scan(target
->tap
, 3, fields
, TAP_IDLE
);
493 jtag_check_value_mask(fields
+ 0, &field0_check_value
, &field0_check_mask
);
494 jtag_check_value_mask(fields
+ 2, &field2_check_value
, &field2_check_mask
);
496 if ((retval
= jtag_execute_queue()) != ERROR_OK
)
498 LOG_ERROR("JTAG error while writing RX");
502 gettimeofday(&now
, NULL
);
503 if ((now
.tv_sec
> timeout
.tv_sec
) || ((now
.tv_sec
== timeout
.tv_sec
)&& (now
.tv_usec
> timeout
.tv_usec
)))
505 LOG_ERROR("time out writing RX register");
506 return ERROR_TARGET_TIMEOUT
;
508 if (!(field0_in
& 1))
510 if (debug_level
>= 3)
512 LOG_DEBUG("waiting 100ms");
513 alive_sleep(100); /* avoid flooding the logs */
523 jtag_add_dr_scan(target
->tap
, 3, fields
, TAP_IDLE
);
525 if ((retval
= jtag_execute_queue()) != ERROR_OK
)
527 LOG_ERROR("JTAG error while writing RX");
534 /* send count elements of size byte to the debug handler */
535 static int xscale_send(struct target
*target
, const uint8_t *buffer
, int count
, int size
)
537 struct xscale_common
*xscale
= target_to_xscale(target
);
543 xscale_jtag_set_instr(target
->tap
,
544 XSCALE_DBGRX
<< xscale
->xscale_variant
,
552 int endianness
= target
->endianness
;
553 while (done_count
++ < count
)
558 if (endianness
== TARGET_LITTLE_ENDIAN
)
560 t
[1]=le_to_h_u32(buffer
);
563 t
[1]=be_to_h_u32(buffer
);
567 if (endianness
== TARGET_LITTLE_ENDIAN
)
569 t
[1]=le_to_h_u16(buffer
);
572 t
[1]=be_to_h_u16(buffer
);
579 LOG_ERROR("BUG: size neither 4, 2 nor 1");
580 return ERROR_COMMAND_SYNTAX_ERROR
;
582 jtag_add_dr_out(target
->tap
,
590 if ((retval
= jtag_execute_queue()) != ERROR_OK
)
592 LOG_ERROR("JTAG error while sending data to debug handler");
599 static int xscale_send_u32(struct target
*target
, uint32_t value
)
601 struct xscale_common
*xscale
= target_to_xscale(target
);
603 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_RX
].value
, 0, 32, value
);
604 return xscale_write_rx(target
);
607 static int xscale_write_dcsr(struct target
*target
, int hold_rst
, int ext_dbg_brk
)
609 struct xscale_common
*xscale
= target_to_xscale(target
);
611 struct scan_field fields
[3];
612 uint8_t field0
= 0x0;
613 uint8_t field0_check_value
= 0x2;
614 uint8_t field0_check_mask
= 0x7;
615 uint8_t field2
= 0x0;
616 uint8_t field2_check_value
= 0x0;
617 uint8_t field2_check_mask
= 0x1;
620 xscale
->hold_rst
= hold_rst
;
622 if (ext_dbg_brk
!= -1)
623 xscale
->external_debug_break
= ext_dbg_brk
;
625 xscale_jtag_set_instr(target
->tap
,
626 XSCALE_SELDCSR
<< xscale
->xscale_variant
,
629 buf_set_u32(&field0
, 1, 1, xscale
->hold_rst
);
630 buf_set_u32(&field0
, 2, 1, xscale
->external_debug_break
);
632 memset(&fields
, 0, sizeof fields
);
634 fields
[0].num_bits
= 3;
635 fields
[0].out_value
= &field0
;
637 fields
[0].in_value
= &tmp
;
639 fields
[1].num_bits
= 32;
640 fields
[1].out_value
= xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
;
642 fields
[2].num_bits
= 1;
643 fields
[2].out_value
= &field2
;
645 fields
[2].in_value
= &tmp2
;
647 jtag_add_dr_scan(target
->tap
, 3, fields
, TAP_IDLE
);
649 jtag_check_value_mask(fields
+ 0, &field0_check_value
, &field0_check_mask
);
650 jtag_check_value_mask(fields
+ 2, &field2_check_value
, &field2_check_mask
);
652 if ((retval
= jtag_execute_queue()) != ERROR_OK
)
654 LOG_ERROR("JTAG error while writing DCSR");
658 xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].dirty
= 0;
659 xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].valid
= 1;
664 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
665 static unsigned int parity (unsigned int v
)
667 // unsigned int ov = v;
672 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
673 return (0x6996 >> v
) & 1;
676 static int xscale_load_ic(struct target
*target
, uint32_t va
, uint32_t buffer
[8])
678 struct xscale_common
*xscale
= target_to_xscale(target
);
682 struct scan_field fields
[2];
684 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32
"", va
);
687 xscale_jtag_set_instr(target
->tap
,
688 XSCALE_LDIC
<< xscale
->xscale_variant
,
691 /* CMD is b011 to load a cacheline into the Mini ICache.
692 * Loading into the main ICache is deprecated, and unused.
693 * It's followed by three zero bits, and 27 address bits.
695 buf_set_u32(&cmd
, 0, 6, 0x3);
697 /* virtual address of desired cache line */
698 buf_set_u32(packet
, 0, 27, va
>> 5);
700 memset(&fields
, 0, sizeof fields
);
702 fields
[0].num_bits
= 6;
703 fields
[0].out_value
= &cmd
;
705 fields
[1].num_bits
= 27;
706 fields
[1].out_value
= packet
;
708 jtag_add_dr_scan(target
->tap
, 2, fields
, TAP_IDLE
);
710 /* rest of packet is a cacheline: 8 instructions, with parity */
711 fields
[0].num_bits
= 32;
712 fields
[0].out_value
= packet
;
714 fields
[1].num_bits
= 1;
715 fields
[1].out_value
= &cmd
;
717 for (word
= 0; word
< 8; word
++)
719 buf_set_u32(packet
, 0, 32, buffer
[word
]);
722 memcpy(&value
, packet
, sizeof(uint32_t));
725 jtag_add_dr_scan(target
->tap
, 2, fields
, TAP_IDLE
);
728 return jtag_execute_queue();
731 static int xscale_invalidate_ic_line(struct target
*target
, uint32_t va
)
733 struct xscale_common
*xscale
= target_to_xscale(target
);
736 struct scan_field fields
[2];
738 xscale_jtag_set_instr(target
->tap
,
739 XSCALE_LDIC
<< xscale
->xscale_variant
,
742 /* CMD for invalidate IC line b000, bits [6:4] b000 */
743 buf_set_u32(&cmd
, 0, 6, 0x0);
745 /* virtual address of desired cache line */
746 buf_set_u32(packet
, 0, 27, va
>> 5);
748 memset(&fields
, 0, sizeof fields
);
750 fields
[0].num_bits
= 6;
751 fields
[0].out_value
= &cmd
;
753 fields
[1].num_bits
= 27;
754 fields
[1].out_value
= packet
;
756 jtag_add_dr_scan(target
->tap
, 2, fields
, TAP_IDLE
);
761 static int xscale_update_vectors(struct target
*target
)
763 struct xscale_common
*xscale
= target_to_xscale(target
);
767 uint32_t low_reset_branch
, high_reset_branch
;
769 for (i
= 1; i
< 8; i
++)
771 /* if there's a static vector specified for this exception, override */
772 if (xscale
->static_high_vectors_set
& (1 << i
))
774 xscale
->high_vectors
[i
] = xscale
->static_high_vectors
[i
];
778 retval
= target_read_u32(target
, 0xffff0000 + 4*i
, &xscale
->high_vectors
[i
]);
779 if (retval
== ERROR_TARGET_TIMEOUT
)
781 if (retval
!= ERROR_OK
)
783 /* Some of these reads will fail as part of normal execution */
784 xscale
->high_vectors
[i
] = ARMV4_5_B(0xfffffe, 0);
789 for (i
= 1; i
< 8; i
++)
791 if (xscale
->static_low_vectors_set
& (1 << i
))
793 xscale
->low_vectors
[i
] = xscale
->static_low_vectors
[i
];
797 retval
= target_read_u32(target
, 0x0 + 4*i
, &xscale
->low_vectors
[i
]);
798 if (retval
== ERROR_TARGET_TIMEOUT
)
800 if (retval
!= ERROR_OK
)
802 /* Some of these reads will fail as part of normal execution */
803 xscale
->low_vectors
[i
] = ARMV4_5_B(0xfffffe, 0);
808 /* calculate branches to debug handler */
809 low_reset_branch
= (xscale
->handler_address
+ 0x20 - 0x0 - 0x8) >> 2;
810 high_reset_branch
= (xscale
->handler_address
+ 0x20 - 0xffff0000 - 0x8) >> 2;
812 xscale
->low_vectors
[0] = ARMV4_5_B((low_reset_branch
& 0xffffff), 0);
813 xscale
->high_vectors
[0] = ARMV4_5_B((high_reset_branch
& 0xffffff), 0);
815 /* invalidate and load exception vectors in mini i-cache */
816 xscale_invalidate_ic_line(target
, 0x0);
817 xscale_invalidate_ic_line(target
, 0xffff0000);
819 xscale_load_ic(target
, 0x0, xscale
->low_vectors
);
820 xscale_load_ic(target
, 0xffff0000, xscale
->high_vectors
);
825 static int xscale_arch_state(struct target
*target
)
827 struct xscale_common
*xscale
= target_to_xscale(target
);
828 struct arm
*armv4_5
= &xscale
->armv4_5_common
;
830 static const char *state
[] =
832 "disabled", "enabled"
835 static const char *arch_dbg_reason
[] =
837 "", "\n(processor reset)", "\n(trace buffer full)"
840 if (armv4_5
->common_magic
!= ARM_COMMON_MAGIC
)
842 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
843 return ERROR_COMMAND_SYNTAX_ERROR
;
846 arm_arch_state(target
);
847 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
848 state
[xscale
->armv4_5_mmu
.mmu_enabled
],
849 state
[xscale
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
],
850 state
[xscale
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
],
851 arch_dbg_reason
[xscale
->arch_debug_reason
]);
856 static int xscale_poll(struct target
*target
)
858 int retval
= ERROR_OK
;
860 if ((target
->state
== TARGET_RUNNING
) || (target
->state
== TARGET_DEBUG_RUNNING
))
862 enum target_state previous_state
= target
->state
;
863 if ((retval
= xscale_read_tx(target
, 0)) == ERROR_OK
)
866 /* there's data to read from the tx register, we entered debug state */
867 target
->state
= TARGET_HALTED
;
869 /* process debug entry, fetching current mode regs */
870 retval
= xscale_debug_entry(target
);
872 else if (retval
!= ERROR_TARGET_RESOURCE_NOT_AVAILABLE
)
874 LOG_USER("error while polling TX register, reset CPU");
875 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
876 target
->state
= TARGET_HALTED
;
879 /* debug_entry could have overwritten target state (i.e. immediate resume)
880 * don't signal event handlers in that case
882 if (target
->state
!= TARGET_HALTED
)
885 /* if target was running, signal that we halted
886 * otherwise we reentered from debug execution */
887 if (previous_state
== TARGET_RUNNING
)
888 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
890 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_HALTED
);
896 static int xscale_debug_entry(struct target
*target
)
898 struct xscale_common
*xscale
= target_to_xscale(target
);
899 struct arm
*armv4_5
= &xscale
->armv4_5_common
;
906 /* clear external dbg break (will be written on next DCSR read) */
907 xscale
->external_debug_break
= 0;
908 if ((retval
= xscale_read_dcsr(target
)) != ERROR_OK
)
911 /* get r0, pc, r1 to r7 and cpsr */
912 if ((retval
= xscale_receive(target
, buffer
, 10)) != ERROR_OK
)
915 /* move r0 from buffer to register cache */
916 buf_set_u32(armv4_5
->core_cache
->reg_list
[0].value
, 0, 32, buffer
[0]);
917 armv4_5
->core_cache
->reg_list
[0].dirty
= 1;
918 armv4_5
->core_cache
->reg_list
[0].valid
= 1;
919 LOG_DEBUG("r0: 0x%8.8" PRIx32
"", buffer
[0]);
921 /* move pc from buffer to register cache */
922 buf_set_u32(armv4_5
->pc
->value
, 0, 32, buffer
[1]);
923 armv4_5
->pc
->dirty
= 1;
924 armv4_5
->pc
->valid
= 1;
925 LOG_DEBUG("pc: 0x%8.8" PRIx32
"", buffer
[1]);
927 /* move data from buffer to register cache */
928 for (i
= 1; i
<= 7; i
++)
930 buf_set_u32(armv4_5
->core_cache
->reg_list
[i
].value
, 0, 32, buffer
[1 + i
]);
931 armv4_5
->core_cache
->reg_list
[i
].dirty
= 1;
932 armv4_5
->core_cache
->reg_list
[i
].valid
= 1;
933 LOG_DEBUG("r%i: 0x%8.8" PRIx32
"", i
, buffer
[i
+ 1]);
936 arm_set_cpsr(armv4_5
, buffer
[9]);
937 LOG_DEBUG("cpsr: 0x%8.8" PRIx32
"", buffer
[9]);
939 if (!is_arm_mode(armv4_5
->core_mode
))
941 target
->state
= TARGET_UNKNOWN
;
942 LOG_ERROR("cpsr contains invalid mode value - communication failure");
943 return ERROR_TARGET_FAILURE
;
945 LOG_DEBUG("target entered debug state in %s mode",
946 arm_mode_name(armv4_5
->core_mode
));
948 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
950 xscale_receive(target
, buffer
, 8);
951 buf_set_u32(armv4_5
->spsr
->value
, 0, 32, buffer
[7]);
952 armv4_5
->spsr
->dirty
= false;
953 armv4_5
->spsr
->valid
= true;
957 /* r8 to r14, but no spsr */
958 xscale_receive(target
, buffer
, 7);
961 /* move data from buffer to right banked register in cache */
962 for (i
= 8; i
<= 14; i
++)
964 struct reg
*r
= arm_reg_current(armv4_5
, i
);
966 buf_set_u32(r
->value
, 0, 32, buffer
[i
- 8]);
971 /* mark xscale regs invalid to ensure they are retrieved from the
972 * debug handler if requested */
973 for (i
= 0; i
< xscale
->reg_cache
->num_regs
; i
++)
974 xscale
->reg_cache
->reg_list
[i
].valid
= 0;
976 /* examine debug reason */
977 xscale_read_dcsr(target
);
978 moe
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 2, 3);
980 /* stored PC (for calculating fixup) */
981 pc
= buf_get_u32(armv4_5
->pc
->value
, 0, 32);
985 case 0x0: /* Processor reset */
986 target
->debug_reason
= DBG_REASON_DBGRQ
;
987 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_RESET
;
990 case 0x1: /* Instruction breakpoint hit */
991 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
992 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_GENERIC
;
995 case 0x2: /* Data breakpoint hit */
996 target
->debug_reason
= DBG_REASON_WATCHPOINT
;
997 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_GENERIC
;
1000 case 0x3: /* BKPT instruction executed */
1001 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
1002 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_GENERIC
;
1005 case 0x4: /* Ext. debug event */
1006 target
->debug_reason
= DBG_REASON_DBGRQ
;
1007 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_GENERIC
;
1010 case 0x5: /* Vector trap occured */
1011 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
1012 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_GENERIC
;
1015 case 0x6: /* Trace buffer full break */
1016 target
->debug_reason
= DBG_REASON_DBGRQ
;
1017 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_TB_FULL
;
1020 case 0x7: /* Reserved (may flag Hot-Debug support) */
1022 LOG_ERROR("Method of Entry is 'Reserved'");
1027 /* apply PC fixup */
1028 buf_set_u32(armv4_5
->pc
->value
, 0, 32, pc
);
1030 /* on the first debug entry, identify cache type */
1031 if (xscale
->armv4_5_mmu
.armv4_5_cache
.ctype
== -1)
1033 uint32_t cache_type_reg
;
1035 /* read cp15 cache type register */
1036 xscale_get_reg(&xscale
->reg_cache
->reg_list
[XSCALE_CACHETYPE
]);
1037 cache_type_reg
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_CACHETYPE
].value
, 0, 32);
1039 armv4_5_identify_cache(cache_type_reg
, &xscale
->armv4_5_mmu
.armv4_5_cache
);
1042 /* examine MMU and Cache settings */
1043 /* read cp15 control register */
1044 xscale_get_reg(&xscale
->reg_cache
->reg_list
[XSCALE_CTRL
]);
1045 xscale
->cp15_control_reg
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_CTRL
].value
, 0, 32);
1046 xscale
->armv4_5_mmu
.mmu_enabled
= (xscale
->cp15_control_reg
& 0x1U
) ? 1 : 0;
1047 xscale
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
= (xscale
->cp15_control_reg
& 0x4U
) ? 1 : 0;
1048 xscale
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
= (xscale
->cp15_control_reg
& 0x1000U
) ? 1 : 0;
1050 /* tracing enabled, read collected trace data */
1051 if (xscale
->trace
.mode
!= XSCALE_TRACE_DISABLED
)
1053 xscale_read_trace(target
);
1055 /* Resume if entered debug due to buffer fill and we're still collecting
1056 * trace data. Note that a debug exception due to trace buffer full
1057 * can only happen in fill mode. */
1058 if (xscale
->arch_debug_reason
== XSCALE_DBG_REASON_TB_FULL
)
1060 if (--xscale
->trace
.fill_counter
> 0)
1061 xscale_resume(target
, 1, 0x0, 1, 0);
1063 else /* entered debug for other reason; reset counter */
1064 xscale
->trace
.fill_counter
= 0;
1070 static int xscale_halt(struct target
*target
)
1072 struct xscale_common
*xscale
= target_to_xscale(target
);
1074 LOG_DEBUG("target->state: %s",
1075 target_state_name(target
));
1077 if (target
->state
== TARGET_HALTED
)
1079 LOG_DEBUG("target was already halted");
1082 else if (target
->state
== TARGET_UNKNOWN
)
1084 /* this must not happen for a xscale target */
1085 LOG_ERROR("target was in unknown state when halt was requested");
1086 return ERROR_TARGET_INVALID
;
1088 else if (target
->state
== TARGET_RESET
)
1090 LOG_DEBUG("target->state == TARGET_RESET");
1094 /* assert external dbg break */
1095 xscale
->external_debug_break
= 1;
1096 xscale_read_dcsr(target
);
1098 target
->debug_reason
= DBG_REASON_DBGRQ
;
1104 static int xscale_enable_single_step(struct target
*target
, uint32_t next_pc
)
1106 struct xscale_common
*xscale
= target_to_xscale(target
);
1107 struct reg
*ibcr0
= &xscale
->reg_cache
->reg_list
[XSCALE_IBCR0
];
1110 if (xscale
->ibcr0_used
)
1112 struct breakpoint
*ibcr0_bp
= breakpoint_find(target
, buf_get_u32(ibcr0
->value
, 0, 32) & 0xfffffffe);
1116 xscale_unset_breakpoint(target
, ibcr0_bp
);
1120 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1125 if ((retval
= xscale_set_reg_u32(ibcr0
, next_pc
| 0x1)) != ERROR_OK
)
1131 static int xscale_disable_single_step(struct target
*target
)
1133 struct xscale_common
*xscale
= target_to_xscale(target
);
1134 struct reg
*ibcr0
= &xscale
->reg_cache
->reg_list
[XSCALE_IBCR0
];
1137 if ((retval
= xscale_set_reg_u32(ibcr0
, 0x0)) != ERROR_OK
)
1143 static void xscale_enable_watchpoints(struct target
*target
)
1145 struct watchpoint
*watchpoint
= target
->watchpoints
;
1149 if (watchpoint
->set
== 0)
1150 xscale_set_watchpoint(target
, watchpoint
);
1151 watchpoint
= watchpoint
->next
;
1155 static void xscale_enable_breakpoints(struct target
*target
)
1157 struct breakpoint
*breakpoint
= target
->breakpoints
;
1159 /* set any pending breakpoints */
1162 if (breakpoint
->set
== 0)
1163 xscale_set_breakpoint(target
, breakpoint
);
1164 breakpoint
= breakpoint
->next
;
1168 static void xscale_free_trace_data(struct xscale_common
*xscale
)
1170 struct xscale_trace_data
*td
= xscale
->trace
.data
;
1173 struct xscale_trace_data
*next_td
= td
->next
;
1179 xscale
->trace
.data
= NULL
;
1182 static int xscale_resume(struct target
*target
, int current
,
1183 uint32_t address
, int handle_breakpoints
, int debug_execution
)
1185 struct xscale_common
*xscale
= target_to_xscale(target
);
1186 struct arm
*armv4_5
= &xscale
->armv4_5_common
;
1187 uint32_t current_pc
;
1193 if (target
->state
!= TARGET_HALTED
)
1195 LOG_WARNING("target not halted");
1196 return ERROR_TARGET_NOT_HALTED
;
1199 if (!debug_execution
)
1201 target_free_all_working_areas(target
);
1204 /* update vector tables */
1205 if ((retval
= xscale_update_vectors(target
)) != ERROR_OK
)
1208 /* current = 1: continue on current pc, otherwise continue at <address> */
1210 buf_set_u32(armv4_5
->pc
->value
, 0, 32, address
);
1212 current_pc
= buf_get_u32(armv4_5
->pc
->value
, 0, 32);
1214 /* if we're at the reset vector, we have to simulate the branch */
1215 if (current_pc
== 0x0)
1217 arm_simulate_step(target
, NULL
);
1218 current_pc
= buf_get_u32(armv4_5
->pc
->value
, 0, 32);
1221 /* the front-end may request us not to handle breakpoints */
1222 if (handle_breakpoints
)
1224 struct breakpoint
*breakpoint
;
1225 breakpoint
= breakpoint_find(target
,
1226 buf_get_u32(armv4_5
->pc
->value
, 0, 32));
1227 if (breakpoint
!= NULL
)
1230 enum trace_mode saved_trace_mode
;
1232 /* there's a breakpoint at the current PC, we have to step over it */
1233 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32
"", breakpoint
->address
);
1234 xscale_unset_breakpoint(target
, breakpoint
);
1236 /* calculate PC of next instruction */
1237 if ((retval
= arm_simulate_step(target
, &next_pc
)) != ERROR_OK
)
1239 uint32_t current_opcode
;
1240 target_read_u32(target
, current_pc
, ¤t_opcode
);
1241 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32
"", current_opcode
);
1244 LOG_DEBUG("enable single-step");
1245 xscale_enable_single_step(target
, next_pc
);
1247 /* restore banked registers */
1248 retval
= xscale_restore_banked(target
);
1249 if (retval
!= ERROR_OK
)
1252 /* send resume request */
1253 xscale_send_u32(target
, 0x30);
1256 xscale_send_u32(target
,
1257 buf_get_u32(armv4_5
->cpsr
->value
, 0, 32));
1258 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32
,
1259 buf_get_u32(armv4_5
->cpsr
->value
, 0, 32));
1261 for (i
= 7; i
>= 0; i
--)
1264 xscale_send_u32(target
, buf_get_u32(armv4_5
->core_cache
->reg_list
[i
].value
, 0, 32));
1265 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32
"", i
, buf_get_u32(armv4_5
->core_cache
->reg_list
[i
].value
, 0, 32));
1269 xscale_send_u32(target
,
1270 buf_get_u32(armv4_5
->pc
->value
, 0, 32));
1271 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32
,
1272 buf_get_u32(armv4_5
->pc
->value
, 0, 32));
1274 /* disable trace data collection in xscale_debug_entry() */
1275 saved_trace_mode
= xscale
->trace
.mode
;
1276 xscale
->trace
.mode
= XSCALE_TRACE_DISABLED
;
1278 /* wait for and process debug entry */
1279 xscale_debug_entry(target
);
1281 /* re-enable trace buffer, if enabled previously */
1282 xscale
->trace
.mode
= saved_trace_mode
;
1284 LOG_DEBUG("disable single-step");
1285 xscale_disable_single_step(target
);
1287 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32
"", breakpoint
->address
);
1288 xscale_set_breakpoint(target
, breakpoint
);
1292 /* enable any pending breakpoints and watchpoints */
1293 xscale_enable_breakpoints(target
);
1294 xscale_enable_watchpoints(target
);
1296 /* restore banked registers */
1297 retval
= xscale_restore_banked(target
);
1298 if (retval
!= ERROR_OK
)
1301 /* send resume request (command 0x30 or 0x31)
1302 * clean the trace buffer if it is to be enabled (0x62) */
1303 if (xscale
->trace
.mode
!= XSCALE_TRACE_DISABLED
)
1305 if (xscale
->trace
.mode
== XSCALE_TRACE_FILL
)
1307 /* If trace enabled in fill mode and starting collection of new set
1308 * of buffers, initialize buffer counter and free previous buffers */
1309 if (xscale
->trace
.fill_counter
== 0)
1311 xscale
->trace
.fill_counter
= xscale
->trace
.buffer_fill
;
1312 xscale_free_trace_data(xscale
);
1315 else /* wrap mode; free previous buffer */
1316 xscale_free_trace_data(xscale
);
1318 xscale_send_u32(target
, 0x62);
1319 xscale_send_u32(target
, 0x31);
1322 xscale_send_u32(target
, 0x30);
1325 xscale_send_u32(target
, buf_get_u32(armv4_5
->cpsr
->value
, 0, 32));
1326 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32
,
1327 buf_get_u32(armv4_5
->cpsr
->value
, 0, 32));
1329 for (i
= 7; i
>= 0; i
--)
1332 xscale_send_u32(target
, buf_get_u32(armv4_5
->core_cache
->reg_list
[i
].value
, 0, 32));
1333 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32
"", i
, buf_get_u32(armv4_5
->core_cache
->reg_list
[i
].value
, 0, 32));
1337 xscale_send_u32(target
, buf_get_u32(armv4_5
->pc
->value
, 0, 32));
1338 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32
,
1339 buf_get_u32(armv4_5
->pc
->value
, 0, 32));
1341 target
->debug_reason
= DBG_REASON_NOTHALTED
;
1343 if (!debug_execution
)
1345 /* registers are now invalid */
1346 register_cache_invalidate(armv4_5
->core_cache
);
1347 target
->state
= TARGET_RUNNING
;
1348 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1352 target
->state
= TARGET_DEBUG_RUNNING
;
1353 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
1356 LOG_DEBUG("target resumed");
1361 static int xscale_step_inner(struct target
*target
, int current
,
1362 uint32_t address
, int handle_breakpoints
)
1364 struct xscale_common
*xscale
= target_to_xscale(target
);
1365 struct arm
*armv4_5
= &xscale
->armv4_5_common
;
1370 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1372 /* calculate PC of next instruction */
1373 if ((retval
= arm_simulate_step(target
, &next_pc
)) != ERROR_OK
)
1375 uint32_t current_opcode
, current_pc
;
1376 current_pc
= buf_get_u32(armv4_5
->pc
->value
, 0, 32);
1378 target_read_u32(target
, current_pc
, ¤t_opcode
);
1379 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32
"", current_opcode
);
1383 LOG_DEBUG("enable single-step");
1384 if ((retval
= xscale_enable_single_step(target
, next_pc
)) != ERROR_OK
)
1387 /* restore banked registers */
1388 if ((retval
= xscale_restore_banked(target
)) != ERROR_OK
)
1391 /* send resume request (command 0x30 or 0x31)
1392 * clean the trace buffer if it is to be enabled (0x62) */
1393 if (xscale
->trace
.mode
!= XSCALE_TRACE_DISABLED
)
1395 if ((retval
= xscale_send_u32(target
, 0x62)) != ERROR_OK
)
1397 if ((retval
= xscale_send_u32(target
, 0x31)) != ERROR_OK
)
1401 if ((retval
= xscale_send_u32(target
, 0x30)) != ERROR_OK
)
1405 retval
= xscale_send_u32(target
,
1406 buf_get_u32(armv4_5
->cpsr
->value
, 0, 32));
1407 if (retval
!= ERROR_OK
)
1409 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32
,
1410 buf_get_u32(armv4_5
->cpsr
->value
, 0, 32));
1412 for (i
= 7; i
>= 0; i
--)
1415 if ((retval
= xscale_send_u32(target
, buf_get_u32(armv4_5
->core_cache
->reg_list
[i
].value
, 0, 32))) != ERROR_OK
)
1417 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32
"", i
, buf_get_u32(armv4_5
->core_cache
->reg_list
[i
].value
, 0, 32));
1421 retval
= xscale_send_u32(target
,
1422 buf_get_u32(armv4_5
->pc
->value
, 0, 32));
1423 if (retval
!= ERROR_OK
)
1425 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32
,
1426 buf_get_u32(armv4_5
->pc
->value
, 0, 32));
1428 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1430 /* registers are now invalid */
1431 register_cache_invalidate(armv4_5
->core_cache
);
1433 /* wait for and process debug entry */
1434 if ((retval
= xscale_debug_entry(target
)) != ERROR_OK
)
1437 LOG_DEBUG("disable single-step");
1438 if ((retval
= xscale_disable_single_step(target
)) != ERROR_OK
)
1441 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
1446 static int xscale_step(struct target
*target
, int current
,
1447 uint32_t address
, int handle_breakpoints
)
1449 struct arm
*armv4_5
= target_to_arm(target
);
1450 struct breakpoint
*breakpoint
= NULL
;
1452 uint32_t current_pc
;
1455 if (target
->state
!= TARGET_HALTED
)
1457 LOG_WARNING("target not halted");
1458 return ERROR_TARGET_NOT_HALTED
;
1461 /* current = 1: continue on current pc, otherwise continue at <address> */
1463 buf_set_u32(armv4_5
->pc
->value
, 0, 32, address
);
1465 current_pc
= buf_get_u32(armv4_5
->pc
->value
, 0, 32);
1467 /* if we're at the reset vector, we have to simulate the step */
1468 if (current_pc
== 0x0)
1470 if ((retval
= arm_simulate_step(target
, NULL
)) != ERROR_OK
)
1472 current_pc
= buf_get_u32(armv4_5
->pc
->value
, 0, 32);
1473 LOG_DEBUG("current pc %" PRIx32
, current_pc
);
1475 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1476 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
1481 /* the front-end may request us not to handle breakpoints */
1482 if (handle_breakpoints
)
1483 breakpoint
= breakpoint_find(target
,
1484 buf_get_u32(armv4_5
->pc
->value
, 0, 32));
1485 if (breakpoint
!= NULL
) {
1486 retval
= xscale_unset_breakpoint(target
, breakpoint
);
1487 if (retval
!= ERROR_OK
)
1491 retval
= xscale_step_inner(target
, current
, address
, handle_breakpoints
);
1492 if (retval
!= ERROR_OK
)
1497 xscale_set_breakpoint(target
, breakpoint
);
1500 LOG_DEBUG("target stepped");
1506 static int xscale_assert_reset(struct target
*target
)
1508 struct xscale_common
*xscale
= target_to_xscale(target
);
1510 LOG_DEBUG("target->state: %s",
1511 target_state_name(target
));
1513 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1514 * end up in T-L-R, which would reset JTAG
1516 xscale_jtag_set_instr(target
->tap
,
1517 XSCALE_SELDCSR
<< xscale
->xscale_variant
,
1520 /* set Hold reset, Halt mode and Trap Reset */
1521 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 30, 1, 0x1);
1522 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 16, 1, 0x1);
1523 xscale_write_dcsr(target
, 1, 0);
1525 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1526 xscale_jtag_set_instr(target
->tap
, ~0, TAP_IDLE
);
1527 jtag_execute_queue();
1530 jtag_add_reset(0, 1);
1532 /* sleep 1ms, to be sure we fulfill any requirements */
1533 jtag_add_sleep(1000);
1534 jtag_execute_queue();
1536 target
->state
= TARGET_RESET
;
1538 if (target
->reset_halt
)
1541 if ((retval
= target_halt(target
)) != ERROR_OK
)
1548 static int xscale_deassert_reset(struct target
*target
)
1550 struct xscale_common
*xscale
= target_to_xscale(target
);
1551 struct breakpoint
*breakpoint
= target
->breakpoints
;
1555 xscale
->ibcr_available
= 2;
1556 xscale
->ibcr0_used
= 0;
1557 xscale
->ibcr1_used
= 0;
1559 xscale
->dbr_available
= 2;
1560 xscale
->dbr0_used
= 0;
1561 xscale
->dbr1_used
= 0;
1563 /* mark all hardware breakpoints as unset */
1566 if (breakpoint
->type
== BKPT_HARD
)
1568 breakpoint
->set
= 0;
1570 breakpoint
= breakpoint
->next
;
1573 xscale
->trace
.mode
= XSCALE_TRACE_DISABLED
;
1574 xscale_free_trace_data(xscale
);
1576 register_cache_invalidate(xscale
->armv4_5_common
.core_cache
);
1578 /* FIXME mark hardware watchpoints got unset too. Also,
1579 * at least some of the XScale registers are invalid...
1583 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1584 * contents got invalidated. Safer to force that, so writing new
1585 * contents can't ever fail..
1590 const uint8_t *buffer
= xscale_debug_handler
;
1594 jtag_add_reset(0, 0);
1596 /* wait 300ms; 150 and 100ms were not enough */
1597 jtag_add_sleep(300*1000);
1599 jtag_add_runtest(2030, TAP_IDLE
);
1600 jtag_execute_queue();
1602 /* set Hold reset, Halt mode and Trap Reset */
1603 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 30, 1, 0x1);
1604 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 16, 1, 0x1);
1605 xscale_write_dcsr(target
, 1, 0);
1607 /* Load the debug handler into the mini-icache. Since
1608 * it's using halt mode (not monitor mode), it runs in
1609 * "Special Debug State" for access to registers, memory,
1610 * coprocessors, trace data, etc.
1612 address
= xscale
->handler_address
;
1613 for (unsigned binary_size
= sizeof xscale_debug_handler
- 1;
1615 binary_size
-= buf_cnt
, buffer
+= buf_cnt
)
1617 uint32_t cache_line
[8];
1620 buf_cnt
= binary_size
;
1624 for (i
= 0; i
< buf_cnt
; i
+= 4)
1626 /* convert LE buffer to host-endian uint32_t */
1627 cache_line
[i
/ 4] = le_to_h_u32(&buffer
[i
]);
1630 for (; i
< 32; i
+= 4)
1632 cache_line
[i
/ 4] = 0xe1a08008;
1635 /* only load addresses other than the reset vectors */
1636 if ((address
% 0x400) != 0x0)
1638 retval
= xscale_load_ic(target
, address
,
1640 if (retval
!= ERROR_OK
)
1647 retval
= xscale_load_ic(target
, 0x0,
1648 xscale
->low_vectors
);
1649 if (retval
!= ERROR_OK
)
1651 retval
= xscale_load_ic(target
, 0xffff0000,
1652 xscale
->high_vectors
);
1653 if (retval
!= ERROR_OK
)
1656 jtag_add_runtest(30, TAP_IDLE
);
1658 jtag_add_sleep(100000);
1660 /* set Hold reset, Halt mode and Trap Reset */
1661 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 30, 1, 0x1);
1662 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 16, 1, 0x1);
1663 xscale_write_dcsr(target
, 1, 0);
1665 /* clear Hold reset to let the target run (should enter debug handler) */
1666 xscale_write_dcsr(target
, 0, 1);
1667 target
->state
= TARGET_RUNNING
;
1669 if (!target
->reset_halt
)
1671 jtag_add_sleep(10000);
1673 /* we should have entered debug now */
1674 xscale_debug_entry(target
);
1675 target
->state
= TARGET_HALTED
;
1677 /* resume the target */
1678 xscale_resume(target
, 1, 0x0, 1, 0);
1685 static int xscale_read_core_reg(struct target
*target
, struct reg
*r
,
1686 int num
, enum arm_mode mode
)
1688 /** \todo add debug handler support for core register reads */
1689 LOG_ERROR("not implemented");
1693 static int xscale_write_core_reg(struct target
*target
, struct reg
*r
,
1694 int num
, enum arm_mode mode
, uint32_t value
)
1696 /** \todo add debug handler support for core register writes */
1697 LOG_ERROR("not implemented");
1701 static int xscale_full_context(struct target
*target
)
1703 struct arm
*armv4_5
= target_to_arm(target
);
1711 if (target
->state
!= TARGET_HALTED
)
1713 LOG_WARNING("target not halted");
1714 return ERROR_TARGET_NOT_HALTED
;
1717 buffer
= malloc(4 * 8);
1719 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1720 * we can't enter User mode on an XScale (unpredictable),
1721 * but User shares registers with SYS
1723 for (i
= 1; i
< 7; i
++)
1725 enum arm_mode mode
= armv4_5_number_to_mode(i
);
1729 if (mode
== ARM_MODE_USR
)
1732 /* check if there are invalid registers in the current mode
1734 for (j
= 0; valid
&& j
<= 16; j
++)
1736 if (!ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
,
1743 /* request banked registers */
1744 xscale_send_u32(target
, 0x0);
1746 /* send CPSR for desired bank mode */
1747 xscale_send_u32(target
, mode
| 0xc0 /* I/F bits */);
1749 /* get banked registers: r8 to r14; and SPSR
1750 * except in USR/SYS mode
1752 if (mode
!= ARM_MODE_SYS
) {
1754 r
= &ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
,
1757 xscale_receive(target
, buffer
, 8);
1759 buf_set_u32(r
->value
, 0, 32, buffer
[7]);
1763 xscale_receive(target
, buffer
, 7);
1766 /* move data from buffer to register cache */
1767 for (j
= 8; j
<= 14; j
++)
1769 r
= &ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
,
1772 buf_set_u32(r
->value
, 0, 32, buffer
[j
- 8]);
1783 static int xscale_restore_banked(struct target
*target
)
1785 struct arm
*armv4_5
= target_to_arm(target
);
1789 if (target
->state
!= TARGET_HALTED
)
1791 LOG_WARNING("target not halted");
1792 return ERROR_TARGET_NOT_HALTED
;
1795 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1796 * and check if any banked registers need to be written. Ignore
1797 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1798 * an XScale (unpredictable), but they share all registers.
1800 for (i
= 1; i
< 7; i
++)
1802 enum arm_mode mode
= armv4_5_number_to_mode(i
);
1805 if (mode
== ARM_MODE_USR
)
1808 /* check if there are dirty registers in this mode */
1809 for (j
= 8; j
<= 14; j
++)
1811 if (ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
,
1816 /* if not USR/SYS, check if the SPSR needs to be written */
1817 if (mode
!= ARM_MODE_SYS
)
1819 if (ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
,
1824 /* there's nothing to flush for this mode */
1828 /* command 0x1: "send banked registers" */
1829 xscale_send_u32(target
, 0x1);
1831 /* send CPSR for desired mode */
1832 xscale_send_u32(target
, mode
| 0xc0 /* I/F bits */);
1834 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1835 * but this protocol doesn't understand that nuance.
1837 for (j
= 8; j
<= 14; j
++) {
1838 r
= &ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
,
1840 xscale_send_u32(target
, buf_get_u32(r
->value
, 0, 32));
1844 /* send spsr if not in USR/SYS mode */
1845 if (mode
!= ARM_MODE_SYS
) {
1846 r
= &ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
,
1848 xscale_send_u32(target
, buf_get_u32(r
->value
, 0, 32));
1856 static int xscale_read_memory(struct target
*target
, uint32_t address
,
1857 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1859 struct xscale_common
*xscale
= target_to_xscale(target
);
1864 LOG_DEBUG("address: 0x%8.8" PRIx32
", size: 0x%8.8" PRIx32
", count: 0x%8.8" PRIx32
, address
, size
, count
);
1866 if (target
->state
!= TARGET_HALTED
)
1868 LOG_WARNING("target not halted");
1869 return ERROR_TARGET_NOT_HALTED
;
1872 /* sanitize arguments */
1873 if (((size
!= 4) && (size
!= 2) && (size
!= 1)) || (count
== 0) || !(buffer
))
1874 return ERROR_COMMAND_SYNTAX_ERROR
;
1876 if (((size
== 4) && (address
& 0x3u
)) || ((size
== 2) && (address
& 0x1u
)))
1877 return ERROR_TARGET_UNALIGNED_ACCESS
;
1879 /* send memory read request (command 0x1n, n: access size) */
1880 if ((retval
= xscale_send_u32(target
, 0x10 | size
)) != ERROR_OK
)
1883 /* send base address for read request */
1884 if ((retval
= xscale_send_u32(target
, address
)) != ERROR_OK
)
1887 /* send number of requested data words */
1888 if ((retval
= xscale_send_u32(target
, count
)) != ERROR_OK
)
1891 /* receive data from target (count times 32-bit words in host endianness) */
1892 buf32
= malloc(4 * count
);
1893 if ((retval
= xscale_receive(target
, buf32
, count
)) != ERROR_OK
)
1896 /* extract data from host-endian buffer into byte stream */
1897 for (i
= 0; i
< count
; i
++)
1902 target_buffer_set_u32(target
, buffer
, buf32
[i
]);
1906 target_buffer_set_u16(target
, buffer
, buf32
[i
] & 0xffff);
1910 *buffer
++ = buf32
[i
] & 0xff;
1913 LOG_ERROR("invalid read size");
1914 return ERROR_COMMAND_SYNTAX_ERROR
;
1920 /* examine DCSR, to see if Sticky Abort (SA) got set */
1921 if ((retval
= xscale_read_dcsr(target
)) != ERROR_OK
)
1923 if (buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 5, 1) == 1)
1926 if ((retval
= xscale_send_u32(target
, 0x60)) != ERROR_OK
)
1929 return ERROR_TARGET_DATA_ABORT
;
1935 static int xscale_read_phys_memory(struct target
*target
, uint32_t address
,
1936 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1938 struct xscale_common
*xscale
= target_to_xscale(target
);
1940 /* with MMU inactive, there are only physical addresses */
1941 if (!xscale
->armv4_5_mmu
.mmu_enabled
)
1942 return xscale_read_memory(target
, address
, size
, count
, buffer
);
1944 /** \todo: provide a non-stub implementation of this routine. */
1945 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1946 target_name(target
), __func__
);
1950 static int xscale_write_memory(struct target
*target
, uint32_t address
,
1951 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
1953 struct xscale_common
*xscale
= target_to_xscale(target
);
1956 LOG_DEBUG("address: 0x%8.8" PRIx32
", size: 0x%8.8" PRIx32
", count: 0x%8.8" PRIx32
, address
, size
, count
);
1958 if (target
->state
!= TARGET_HALTED
)
1960 LOG_WARNING("target not halted");
1961 return ERROR_TARGET_NOT_HALTED
;
1964 /* sanitize arguments */
1965 if (((size
!= 4) && (size
!= 2) && (size
!= 1)) || (count
== 0) || !(buffer
))
1966 return ERROR_COMMAND_SYNTAX_ERROR
;
1968 if (((size
== 4) && (address
& 0x3u
)) || ((size
== 2) && (address
& 0x1u
)))
1969 return ERROR_TARGET_UNALIGNED_ACCESS
;
1971 /* send memory write request (command 0x2n, n: access size) */
1972 if ((retval
= xscale_send_u32(target
, 0x20 | size
)) != ERROR_OK
)
1975 /* send base address for read request */
1976 if ((retval
= xscale_send_u32(target
, address
)) != ERROR_OK
)
1979 /* send number of requested data words to be written*/
1980 if ((retval
= xscale_send_u32(target
, count
)) != ERROR_OK
)
1983 /* extract data from host-endian buffer into byte stream */
1985 for (i
= 0; i
< count
; i
++)
1990 value
= target_buffer_get_u32(target
, buffer
);
1991 xscale_send_u32(target
, value
);
1995 value
= target_buffer_get_u16(target
, buffer
);
1996 xscale_send_u32(target
, value
);
2001 xscale_send_u32(target
, value
);
2005 LOG_ERROR("should never get here");
2010 if ((retval
= xscale_send(target
, buffer
, count
, size
)) != ERROR_OK
)
2013 /* examine DCSR, to see if Sticky Abort (SA) got set */
2014 if ((retval
= xscale_read_dcsr(target
)) != ERROR_OK
)
2016 if (buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 5, 1) == 1)
2019 if ((retval
= xscale_send_u32(target
, 0x60)) != ERROR_OK
)
2022 LOG_ERROR("data abort writing memory");
2023 return ERROR_TARGET_DATA_ABORT
;
2029 static int xscale_write_phys_memory(struct target
*target
, uint32_t address
,
2030 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2032 struct xscale_common
*xscale
= target_to_xscale(target
);
2034 /* with MMU inactive, there are only physical addresses */
2035 if (!xscale
->armv4_5_mmu
.mmu_enabled
)
2036 return xscale_write_memory(target
, address
, size
, count
, buffer
);
2038 /** \todo: provide a non-stub implementation of this routine. */
2039 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
2040 target_name(target
), __func__
);
2044 static int xscale_bulk_write_memory(struct target
*target
, uint32_t address
,
2045 uint32_t count
, const uint8_t *buffer
)
2047 return xscale_write_memory(target
, address
, 4, count
, buffer
);
2050 static int xscale_get_ttb(struct target
*target
, uint32_t *result
)
2052 struct xscale_common
*xscale
= target_to_xscale(target
);
2056 retval
= xscale_get_reg(&xscale
->reg_cache
->reg_list
[XSCALE_TTB
]);
2057 if (retval
!= ERROR_OK
)
2059 ttb
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_TTB
].value
, 0, 32);
2066 static int xscale_disable_mmu_caches(struct target
*target
, int mmu
,
2067 int d_u_cache
, int i_cache
)
2069 struct xscale_common
*xscale
= target_to_xscale(target
);
2070 uint32_t cp15_control
;
2073 /* read cp15 control register */
2074 retval
= xscale_get_reg(&xscale
->reg_cache
->reg_list
[XSCALE_CTRL
]);
2075 if (retval
!=ERROR_OK
)
2077 cp15_control
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_CTRL
].value
, 0, 32);
2080 cp15_control
&= ~0x1U
;
2085 retval
= xscale_send_u32(target
, 0x50);
2086 if (retval
!=ERROR_OK
)
2088 retval
= xscale_send_u32(target
, xscale
->cache_clean_address
);
2089 if (retval
!=ERROR_OK
)
2092 /* invalidate DCache */
2093 retval
= xscale_send_u32(target
, 0x51);
2094 if (retval
!=ERROR_OK
)
2097 cp15_control
&= ~0x4U
;
2102 /* invalidate ICache */
2103 retval
= xscale_send_u32(target
, 0x52);
2104 if (retval
!=ERROR_OK
)
2106 cp15_control
&= ~0x1000U
;
2109 /* write new cp15 control register */
2110 retval
= xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_CTRL
], cp15_control
);
2111 if (retval
!=ERROR_OK
)
2114 /* execute cpwait to ensure outstanding operations complete */
2115 retval
= xscale_send_u32(target
, 0x53);
2119 static int xscale_enable_mmu_caches(struct target
*target
, int mmu
,
2120 int d_u_cache
, int i_cache
)
2122 struct xscale_common
*xscale
= target_to_xscale(target
);
2123 uint32_t cp15_control
;
2126 /* read cp15 control register */
2127 retval
= xscale_get_reg(&xscale
->reg_cache
->reg_list
[XSCALE_CTRL
]);
2128 if (retval
!=ERROR_OK
)
2130 cp15_control
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_CTRL
].value
, 0, 32);
2133 cp15_control
|= 0x1U
;
2136 cp15_control
|= 0x4U
;
2139 cp15_control
|= 0x1000U
;
2141 /* write new cp15 control register */
2142 retval
= xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_CTRL
], cp15_control
);
2143 if (retval
!=ERROR_OK
)
2146 /* execute cpwait to ensure outstanding operations complete */
2147 retval
= xscale_send_u32(target
, 0x53);
2151 static int xscale_set_breakpoint(struct target
*target
,
2152 struct breakpoint
*breakpoint
)
2155 struct xscale_common
*xscale
= target_to_xscale(target
);
2157 if (target
->state
!= TARGET_HALTED
)
2159 LOG_WARNING("target not halted");
2160 return ERROR_TARGET_NOT_HALTED
;
2163 if (breakpoint
->set
)
2165 LOG_WARNING("breakpoint already set");
2169 if (breakpoint
->type
== BKPT_HARD
)
2171 uint32_t value
= breakpoint
->address
| 1;
2172 if (!xscale
->ibcr0_used
)
2174 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_IBCR0
], value
);
2175 xscale
->ibcr0_used
= 1;
2176 breakpoint
->set
= 1; /* breakpoint set on first breakpoint register */
2178 else if (!xscale
->ibcr1_used
)
2180 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_IBCR1
], value
);
2181 xscale
->ibcr1_used
= 1;
2182 breakpoint
->set
= 2; /* breakpoint set on second breakpoint register */
2185 { /* bug: availability previously verified in xscale_add_breakpoint() */
2186 LOG_ERROR("BUG: no hardware comparator available");
2187 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2190 else if (breakpoint
->type
== BKPT_SOFT
)
2192 if (breakpoint
->length
== 4)
2194 /* keep the original instruction in target endianness */
2195 if ((retval
= target_read_memory(target
, breakpoint
->address
, 4, 1, breakpoint
->orig_instr
)) != ERROR_OK
)
2199 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2200 if ((retval
= target_write_u32(target
, breakpoint
->address
, xscale
->arm_bkpt
)) != ERROR_OK
)
2207 /* keep the original instruction in target endianness */
2208 if ((retval
= target_read_memory(target
, breakpoint
->address
, 2, 1, breakpoint
->orig_instr
)) != ERROR_OK
)
2212 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2213 if ((retval
= target_write_u16(target
, breakpoint
->address
, xscale
->thumb_bkpt
)) != ERROR_OK
)
2218 breakpoint
->set
= 1;
2220 xscale_send_u32(target
, 0x50); /* clean dcache */
2221 xscale_send_u32(target
, xscale
->cache_clean_address
);
2222 xscale_send_u32(target
, 0x51); /* invalidate dcache */
2223 xscale_send_u32(target
, 0x52); /* invalidate icache and flush fetch buffers */
2229 static int xscale_add_breakpoint(struct target
*target
,
2230 struct breakpoint
*breakpoint
)
2232 struct xscale_common
*xscale
= target_to_xscale(target
);
2234 if ((breakpoint
->type
== BKPT_HARD
) && (xscale
->ibcr_available
< 1))
2236 LOG_ERROR("no breakpoint unit available for hardware breakpoint");
2237 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2240 if ((breakpoint
->length
!= 2) && (breakpoint
->length
!= 4))
2242 LOG_ERROR("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2243 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2246 if (breakpoint
->type
== BKPT_HARD
)
2248 xscale
->ibcr_available
--;
2251 return xscale_set_breakpoint(target
, breakpoint
);
2254 static int xscale_unset_breakpoint(struct target
*target
,
2255 struct breakpoint
*breakpoint
)
2258 struct xscale_common
*xscale
= target_to_xscale(target
);
2260 if (target
->state
!= TARGET_HALTED
)
2262 LOG_WARNING("target not halted");
2263 return ERROR_TARGET_NOT_HALTED
;
2266 if (!breakpoint
->set
)
2268 LOG_WARNING("breakpoint not set");
2272 if (breakpoint
->type
== BKPT_HARD
)
2274 if (breakpoint
->set
== 1)
2276 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_IBCR0
], 0x0);
2277 xscale
->ibcr0_used
= 0;
2279 else if (breakpoint
->set
== 2)
2281 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_IBCR1
], 0x0);
2282 xscale
->ibcr1_used
= 0;
2284 breakpoint
->set
= 0;
2288 /* restore original instruction (kept in target endianness) */
2289 if (breakpoint
->length
== 4)
2291 if ((retval
= target_write_memory(target
, breakpoint
->address
, 4, 1, breakpoint
->orig_instr
)) != ERROR_OK
)
2298 if ((retval
= target_write_memory(target
, breakpoint
->address
, 2, 1, breakpoint
->orig_instr
)) != ERROR_OK
)
2303 breakpoint
->set
= 0;
2305 xscale_send_u32(target
, 0x50); /* clean dcache */
2306 xscale_send_u32(target
, xscale
->cache_clean_address
);
2307 xscale_send_u32(target
, 0x51); /* invalidate dcache */
2308 xscale_send_u32(target
, 0x52); /* invalidate icache and flush fetch buffers */
2314 static int xscale_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
2316 struct xscale_common
*xscale
= target_to_xscale(target
);
2318 if (target
->state
!= TARGET_HALTED
)
2320 LOG_ERROR("target not halted");
2321 return ERROR_TARGET_NOT_HALTED
;
2324 if (breakpoint
->set
)
2326 xscale_unset_breakpoint(target
, breakpoint
);
2329 if (breakpoint
->type
== BKPT_HARD
)
2330 xscale
->ibcr_available
++;
2335 static int xscale_set_watchpoint(struct target
*target
,
2336 struct watchpoint
*watchpoint
)
2338 struct xscale_common
*xscale
= target_to_xscale(target
);
2339 uint32_t enable
= 0;
2340 struct reg
*dbcon
= &xscale
->reg_cache
->reg_list
[XSCALE_DBCON
];
2341 uint32_t dbcon_value
= buf_get_u32(dbcon
->value
, 0, 32);
2343 if (target
->state
!= TARGET_HALTED
)
2345 LOG_ERROR("target not halted");
2346 return ERROR_TARGET_NOT_HALTED
;
2349 switch (watchpoint
->rw
)
2361 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2364 /* For watchpoint across more than one word, both DBR registers must
2365 be enlisted, with the second used as a mask. */
2366 if (watchpoint
->length
> 4)
2368 if (xscale
->dbr0_used
|| xscale
->dbr1_used
)
2370 LOG_ERROR("BUG: sufficient hardware comparators unavailable");
2371 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2374 /* Write mask value to DBR1, based on the length argument.
2375 * Address bits ignored by the comparator are those set in mask. */
2376 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_DBR1
],
2377 watchpoint
->length
- 1);
2378 xscale
->dbr1_used
= 1;
2379 enable
|= 0x100; /* DBCON[M] */
2382 if (!xscale
->dbr0_used
)
2384 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_DBR0
], watchpoint
->address
);
2385 dbcon_value
|= enable
;
2386 xscale_set_reg_u32(dbcon
, dbcon_value
);
2387 watchpoint
->set
= 1;
2388 xscale
->dbr0_used
= 1;
2390 else if (!xscale
->dbr1_used
)
2392 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_DBR1
], watchpoint
->address
);
2393 dbcon_value
|= enable
<< 2;
2394 xscale_set_reg_u32(dbcon
, dbcon_value
);
2395 watchpoint
->set
= 2;
2396 xscale
->dbr1_used
= 1;
2400 LOG_ERROR("BUG: no hardware comparator available");
2401 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2407 static int xscale_add_watchpoint(struct target
*target
,
2408 struct watchpoint
*watchpoint
)
2410 struct xscale_common
*xscale
= target_to_xscale(target
);
2412 if (xscale
->dbr_available
< 1)
2414 LOG_ERROR("no more watchpoint registers available");
2415 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2418 if (watchpoint
->value
)
2419 LOG_WARNING("xscale does not support value, mask arguments; ignoring");
2421 /* check that length is a power of two */
2422 for (uint32_t len
= watchpoint
->length
; len
!= 1; len
/= 2)
2426 LOG_ERROR("xscale requires that watchpoint length is a power of two");
2427 return ERROR_COMMAND_ARGUMENT_INVALID
;
2431 if (watchpoint
->length
== 4) /* single word watchpoint */
2433 xscale
->dbr_available
--; /* one DBR reg used */
2437 /* watchpoints across multiple words require both DBR registers */
2438 if (xscale
->dbr_available
< 2)
2440 LOG_ERROR("insufficient watchpoint registers available");
2441 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2444 if (watchpoint
->length
> watchpoint
->address
)
2446 LOG_ERROR("xscale does not support watchpoints with length "
2447 "greater than address");
2448 return ERROR_COMMAND_ARGUMENT_INVALID
;
2451 xscale
->dbr_available
= 0;
2455 static int xscale_unset_watchpoint(struct target
*target
,
2456 struct watchpoint
*watchpoint
)
2458 struct xscale_common
*xscale
= target_to_xscale(target
);
2459 struct reg
*dbcon
= &xscale
->reg_cache
->reg_list
[XSCALE_DBCON
];
2460 uint32_t dbcon_value
= buf_get_u32(dbcon
->value
, 0, 32);
2462 if (target
->state
!= TARGET_HALTED
)
2464 LOG_WARNING("target not halted");
2465 return ERROR_TARGET_NOT_HALTED
;
2468 if (!watchpoint
->set
)
2470 LOG_WARNING("breakpoint not set");
2474 if (watchpoint
->set
== 1)
2476 if (watchpoint
->length
> 4)
2478 dbcon_value
&= ~0x103; /* clear DBCON[M] as well */
2479 xscale
->dbr1_used
= 0; /* DBR1 was used for mask */
2482 dbcon_value
&= ~0x3;
2484 xscale_set_reg_u32(dbcon
, dbcon_value
);
2485 xscale
->dbr0_used
= 0;
2487 else if (watchpoint
->set
== 2)
2489 dbcon_value
&= ~0xc;
2490 xscale_set_reg_u32(dbcon
, dbcon_value
);
2491 xscale
->dbr1_used
= 0;
2493 watchpoint
->set
= 0;
2498 static int xscale_remove_watchpoint(struct target
*target
, struct watchpoint
*watchpoint
)
2500 struct xscale_common
*xscale
= target_to_xscale(target
);
2502 if (target
->state
!= TARGET_HALTED
)
2504 LOG_ERROR("target not halted");
2505 return ERROR_TARGET_NOT_HALTED
;
2508 if (watchpoint
->set
)
2510 xscale_unset_watchpoint(target
, watchpoint
);
2513 if (watchpoint
->length
> 4)
2514 xscale
->dbr_available
++; /* both DBR regs now available */
2516 xscale
->dbr_available
++;
2521 static int xscale_get_reg(struct reg
*reg
)
2523 struct xscale_reg
*arch_info
= reg
->arch_info
;
2524 struct target
*target
= arch_info
->target
;
2525 struct xscale_common
*xscale
= target_to_xscale(target
);
2527 /* DCSR, TX and RX are accessible via JTAG */
2528 if (strcmp(reg
->name
, "XSCALE_DCSR") == 0)
2530 return xscale_read_dcsr(arch_info
->target
);
2532 else if (strcmp(reg
->name
, "XSCALE_TX") == 0)
2534 /* 1 = consume register content */
2535 return xscale_read_tx(arch_info
->target
, 1);
2537 else if (strcmp(reg
->name
, "XSCALE_RX") == 0)
2539 /* can't read from RX register (host -> debug handler) */
2542 else if (strcmp(reg
->name
, "XSCALE_TXRXCTRL") == 0)
2544 /* can't (explicitly) read from TXRXCTRL register */
2547 else /* Other DBG registers have to be transfered by the debug handler */
2549 /* send CP read request (command 0x40) */
2550 xscale_send_u32(target
, 0x40);
2552 /* send CP register number */
2553 xscale_send_u32(target
, arch_info
->dbg_handler_number
);
2555 /* read register value */
2556 xscale_read_tx(target
, 1);
2557 buf_cpy(xscale
->reg_cache
->reg_list
[XSCALE_TX
].value
, reg
->value
, 32);
2566 static int xscale_set_reg(struct reg
*reg
, uint8_t* buf
)
2568 struct xscale_reg
*arch_info
= reg
->arch_info
;
2569 struct target
*target
= arch_info
->target
;
2570 struct xscale_common
*xscale
= target_to_xscale(target
);
2571 uint32_t value
= buf_get_u32(buf
, 0, 32);
2573 /* DCSR, TX and RX are accessible via JTAG */
2574 if (strcmp(reg
->name
, "XSCALE_DCSR") == 0)
2576 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 0, 32, value
);
2577 return xscale_write_dcsr(arch_info
->target
, -1, -1);
2579 else if (strcmp(reg
->name
, "XSCALE_RX") == 0)
2581 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_RX
].value
, 0, 32, value
);
2582 return xscale_write_rx(arch_info
->target
);
2584 else if (strcmp(reg
->name
, "XSCALE_TX") == 0)
2586 /* can't write to TX register (debug-handler -> host) */
2589 else if (strcmp(reg
->name
, "XSCALE_TXRXCTRL") == 0)
2591 /* can't (explicitly) write to TXRXCTRL register */
2594 else /* Other DBG registers have to be transfered by the debug handler */
2596 /* send CP write request (command 0x41) */
2597 xscale_send_u32(target
, 0x41);
2599 /* send CP register number */
2600 xscale_send_u32(target
, arch_info
->dbg_handler_number
);
2602 /* send CP register value */
2603 xscale_send_u32(target
, value
);
2604 buf_set_u32(reg
->value
, 0, 32, value
);
2610 static int xscale_write_dcsr_sw(struct target
*target
, uint32_t value
)
2612 struct xscale_common
*xscale
= target_to_xscale(target
);
2613 struct reg
*dcsr
= &xscale
->reg_cache
->reg_list
[XSCALE_DCSR
];
2614 struct xscale_reg
*dcsr_arch_info
= dcsr
->arch_info
;
2616 /* send CP write request (command 0x41) */
2617 xscale_send_u32(target
, 0x41);
2619 /* send CP register number */
2620 xscale_send_u32(target
, dcsr_arch_info
->dbg_handler_number
);
2622 /* send CP register value */
2623 xscale_send_u32(target
, value
);
2624 buf_set_u32(dcsr
->value
, 0, 32, value
);
2629 static int xscale_read_trace(struct target
*target
)
2631 struct xscale_common
*xscale
= target_to_xscale(target
);
2632 struct arm
*armv4_5
= &xscale
->armv4_5_common
;
2633 struct xscale_trace_data
**trace_data_p
;
2635 /* 258 words from debug handler
2636 * 256 trace buffer entries
2637 * 2 checkpoint addresses
2639 uint32_t trace_buffer
[258];
2640 int is_address
[256];
2642 unsigned int num_checkpoints
= 0;
2644 if (target
->state
!= TARGET_HALTED
)
2646 LOG_WARNING("target must be stopped to read trace data");
2647 return ERROR_TARGET_NOT_HALTED
;
2650 /* send read trace buffer command (command 0x61) */
2651 xscale_send_u32(target
, 0x61);
2653 /* receive trace buffer content */
2654 xscale_receive(target
, trace_buffer
, 258);
2656 /* parse buffer backwards to identify address entries */
2657 for (i
= 255; i
>= 0; i
--)
2659 /* also count number of checkpointed entries */
2660 if ((trace_buffer
[i
] & 0xe0) == 0xc0)
2664 if (((trace_buffer
[i
] & 0xf0) == 0x90) ||
2665 ((trace_buffer
[i
] & 0xf0) == 0xd0))
2668 is_address
[--i
] = 1;
2670 is_address
[--i
] = 1;
2672 is_address
[--i
] = 1;
2674 is_address
[--i
] = 1;
2679 /* search first non-zero entry that is not part of an address */
2680 for (j
= 0; (j
< 256) && (trace_buffer
[j
] == 0) && (!is_address
[j
]); j
++)
2685 LOG_DEBUG("no trace data collected");
2686 return ERROR_XSCALE_NO_TRACE_DATA
;
2689 /* account for possible partial address at buffer start (wrap mode only) */
2691 { /* first entry is address; complete set of 4? */
2694 if (!is_address
[i
++])
2697 j
+= i
; /* partial address; can't use it */
2700 /* if first valid entry is indirect branch, can't use that either (no address) */
2701 if (((trace_buffer
[j
] & 0xf0) == 0x90) || ((trace_buffer
[j
] & 0xf0) == 0xd0))
2704 /* walk linked list to terminating entry */
2705 for (trace_data_p
= &xscale
->trace
.data
; *trace_data_p
; trace_data_p
= &(*trace_data_p
)->next
)
2708 *trace_data_p
= malloc(sizeof(struct xscale_trace_data
));
2709 (*trace_data_p
)->next
= NULL
;
2710 (*trace_data_p
)->chkpt0
= trace_buffer
[256];
2711 (*trace_data_p
)->chkpt1
= trace_buffer
[257];
2712 (*trace_data_p
)->last_instruction
=
2713 buf_get_u32(armv4_5
->pc
->value
, 0, 32);
2714 (*trace_data_p
)->entries
= malloc(sizeof(struct xscale_trace_entry
) * (256 - j
));
2715 (*trace_data_p
)->depth
= 256 - j
;
2716 (*trace_data_p
)->num_checkpoints
= num_checkpoints
;
2718 for (i
= j
; i
< 256; i
++)
2720 (*trace_data_p
)->entries
[i
- j
].data
= trace_buffer
[i
];
2722 (*trace_data_p
)->entries
[i
- j
].type
= XSCALE_TRACE_ADDRESS
;
2724 (*trace_data_p
)->entries
[i
- j
].type
= XSCALE_TRACE_MESSAGE
;
2730 static int xscale_read_instruction(struct target
*target
, uint32_t pc
,
2731 struct arm_instruction
*instruction
)
2733 struct xscale_common
*const xscale
= target_to_xscale(target
);
2740 if (!xscale
->trace
.image
)
2741 return ERROR_TRACE_IMAGE_UNAVAILABLE
;
2743 /* search for the section the current instruction belongs to */
2744 for (i
= 0; i
< xscale
->trace
.image
->num_sections
; i
++)
2746 if ((xscale
->trace
.image
->sections
[i
].base_address
<= pc
) &&
2747 (xscale
->trace
.image
->sections
[i
].base_address
+ xscale
->trace
.image
->sections
[i
].size
> pc
))
2756 /* current instruction couldn't be found in the image */
2757 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE
;
2760 if (xscale
->trace
.core_state
== ARM_STATE_ARM
)
2763 if ((retval
= image_read_section(xscale
->trace
.image
, section
,
2764 pc
- xscale
->trace
.image
->sections
[section
].base_address
,
2765 4, buf
, &size_read
)) != ERROR_OK
)
2767 LOG_ERROR("error while reading instruction");
2768 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE
;
2770 opcode
= target_buffer_get_u32(target
, buf
);
2771 arm_evaluate_opcode(opcode
, pc
, instruction
);
2773 else if (xscale
->trace
.core_state
== ARM_STATE_THUMB
)
2776 if ((retval
= image_read_section(xscale
->trace
.image
, section
,
2777 pc
- xscale
->trace
.image
->sections
[section
].base_address
,
2778 2, buf
, &size_read
)) != ERROR_OK
)
2780 LOG_ERROR("error while reading instruction");
2781 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE
;
2783 opcode
= target_buffer_get_u16(target
, buf
);
2784 thumb_evaluate_opcode(opcode
, pc
, instruction
);
2788 LOG_ERROR("BUG: unknown core state encountered");
2795 /* Extract address encoded into trace data.
2796 * Write result to address referenced by argument 'target', or 0 if incomplete. */
2797 static inline void xscale_branch_address(struct xscale_trace_data
*trace_data
,
2798 int i
, uint32_t *target
)
2800 /* if there are less than four entries prior to the indirect branch message
2801 * we can't extract the address */
2805 *target
= (trace_data
->entries
[i
-1].data
) | (trace_data
->entries
[i
-2].data
<< 8) |
2806 (trace_data
->entries
[i
-3].data
<< 16) | (trace_data
->entries
[i
-4].data
<< 24);
2809 static inline void xscale_display_instruction(struct target
*target
, uint32_t pc
,
2810 struct arm_instruction
*instruction
,
2811 struct command_context
*cmd_ctx
)
2813 int retval
= xscale_read_instruction(target
, pc
, instruction
);
2814 if (retval
== ERROR_OK
)
2815 command_print(cmd_ctx
, "%s", instruction
->text
);
2817 command_print(cmd_ctx
, "0x%8.8" PRIx32
"\t<not found in image>", pc
);
2820 static int xscale_analyze_trace(struct target
*target
, struct command_context
*cmd_ctx
)
2822 struct xscale_common
*xscale
= target_to_xscale(target
);
2823 struct xscale_trace_data
*trace_data
= xscale
->trace
.data
;
2825 uint32_t breakpoint_pc
;
2826 struct arm_instruction instruction
;
2827 uint32_t current_pc
= 0; /* initialized when address determined */
2829 if (!xscale
->trace
.image
)
2830 LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
2832 /* loop for each trace buffer that was loaded from target */
2835 int chkpt
= 0; /* incremented as checkpointed entries found */
2838 /* FIXME: set this to correct mode when trace buffer is first enabled */
2839 xscale
->trace
.core_state
= ARM_STATE_ARM
;
2841 /* loop for each entry in this trace buffer */
2842 for (i
= 0; i
< trace_data
->depth
; i
++)
2845 uint32_t chkpt_reg
= 0x0;
2846 uint32_t branch_target
= 0;
2849 /* trace entry type is upper nybble of 'message byte' */
2850 int trace_msg_type
= (trace_data
->entries
[i
].data
& 0xf0) >> 4;
2852 /* Target addresses of indirect branches are written into buffer
2853 * before the message byte representing the branch. Skip past it */
2854 if (trace_data
->entries
[i
].type
== XSCALE_TRACE_ADDRESS
)
2857 switch (trace_msg_type
)
2859 case 0: /* Exceptions */
2867 exception
= (trace_data
->entries
[i
].data
& 0x70) >> 4;
2869 /* FIXME: vector table may be at ffff0000 */
2870 branch_target
= (trace_data
->entries
[i
].data
& 0xf0) >> 2;
2873 case 8: /* Direct Branch */
2876 case 9: /* Indirect Branch */
2877 xscale_branch_address(trace_data
, i
, &branch_target
);
2880 case 13: /* Checkpointed Indirect Branch */
2881 xscale_branch_address(trace_data
, i
, &branch_target
);
2882 if ((trace_data
->num_checkpoints
== 2) && (chkpt
== 0))
2883 chkpt_reg
= trace_data
->chkpt1
; /* 2 chkpts, this is oldest */
2885 chkpt_reg
= trace_data
->chkpt0
; /* 1 chkpt, or 2 and newest */
2890 case 12: /* Checkpointed Direct Branch */
2891 if ((trace_data
->num_checkpoints
== 2) && (chkpt
== 0))
2892 chkpt_reg
= trace_data
->chkpt1
; /* 2 chkpts, this is oldest */
2894 chkpt_reg
= trace_data
->chkpt0
; /* 1 chkpt, or 2 and newest */
2896 /* if no current_pc, checkpoint will be starting point */
2897 if (current_pc
== 0)
2898 branch_target
= chkpt_reg
;
2903 case 15: /* Roll-over */
2906 default: /* Reserved */
2907 LOG_WARNING("trace is suspect: invalid trace message byte");
2912 /* If we don't have the current_pc yet, but we did get the branch target
2913 * (either from the trace buffer on indirect branch, or from a checkpoint reg),
2914 * then we can start displaying instructions at the next iteration, with
2915 * branch_target as the starting point.
2917 if (current_pc
== 0)
2919 current_pc
= branch_target
; /* remains 0 unless branch_target obtained */
2923 /* We have current_pc. Read and display the instructions from the image.
2924 * First, display count instructions (lower nybble of message byte). */
2925 count
= trace_data
->entries
[i
].data
& 0x0f;
2926 for (j
= 0; j
< count
; j
++)
2928 xscale_display_instruction(target
, current_pc
, &instruction
, cmd_ctx
);
2929 current_pc
+= xscale
->trace
.core_state
== ARM_STATE_ARM
? 4 : 2;
2932 /* An additional instruction is implicitly added to count for
2933 * rollover and some exceptions: undef, swi, prefetch abort. */
2934 if ((trace_msg_type
== 15) || (exception
> 0 && exception
< 4))
2936 xscale_display_instruction(target
, current_pc
, &instruction
, cmd_ctx
);
2937 current_pc
+= xscale
->trace
.core_state
== ARM_STATE_ARM
? 4 : 2;
2940 if (trace_msg_type
== 15) /* rollover */
2945 command_print(cmd_ctx
, "--- exception %i ---", exception
);
2949 /* not exception or rollover; next instruction is a branch and is
2950 * not included in the count */
2951 xscale_display_instruction(target
, current_pc
, &instruction
, cmd_ctx
);
2953 /* for direct branches, extract branch destination from instruction */
2954 if ((trace_msg_type
== 8) || (trace_msg_type
== 12))
2956 retval
= xscale_read_instruction(target
, current_pc
, &instruction
);
2957 if (retval
== ERROR_OK
)
2958 current_pc
= instruction
.info
.b_bl_bx_blx
.target_address
;
2960 current_pc
= 0; /* branch destination unknown */
2962 /* direct branch w/ checkpoint; can also get from checkpoint reg */
2963 if (trace_msg_type
== 12)
2965 if (current_pc
== 0)
2966 current_pc
= chkpt_reg
;
2967 else if (current_pc
!= chkpt_reg
) /* sanity check */
2968 LOG_WARNING("trace is suspect: checkpoint register "
2969 "inconsistent with adddress from image");
2972 if (current_pc
== 0)
2973 command_print(cmd_ctx
, "address unknown");
2978 /* indirect branch; the branch destination was read from trace buffer */
2979 if ((trace_msg_type
== 9) || (trace_msg_type
== 13))
2981 current_pc
= branch_target
;
2983 /* sanity check (checkpoint reg is redundant) */
2984 if ((trace_msg_type
== 13) && (chkpt_reg
!= branch_target
))
2985 LOG_WARNING("trace is suspect: checkpoint register "
2986 "inconsistent with address from trace buffer");
2989 } /* END: for (i = 0; i < trace_data->depth; i++) */
2991 breakpoint_pc
= trace_data
->last_instruction
; /* used below */
2992 trace_data
= trace_data
->next
;
2994 } /* END: while (trace_data) */
2996 /* Finally... display all instructions up to the value of the pc when the
2997 * debug break occurred (saved when trace data was collected from target).
2998 * This is necessary because the trace only records execution branches and 16
2999 * consecutive instructions (rollovers), so last few typically missed.
3001 if (current_pc
== 0)
3002 return ERROR_OK
; /* current_pc was never found */
3004 /* how many instructions remaining? */
3005 int gap_count
= (breakpoint_pc
- current_pc
) /
3006 (xscale
->trace
.core_state
== ARM_STATE_ARM
? 4 : 2);
3008 /* should never be negative or over 16, but verify */
3009 if (gap_count
< 0 || gap_count
> 16)
3011 LOG_WARNING("trace is suspect: excessive gap at end of trace");
3012 return ERROR_OK
; /* bail; large number or negative value no good */
3015 /* display remaining instructions */
3016 for (i
= 0; i
< gap_count
; i
++)
3018 xscale_display_instruction(target
, current_pc
, &instruction
, cmd_ctx
);
3019 current_pc
+= xscale
->trace
.core_state
== ARM_STATE_ARM
? 4 : 2;
3025 static const struct reg_arch_type xscale_reg_type
= {
3026 .get
= xscale_get_reg
,
3027 .set
= xscale_set_reg
,
3030 static void xscale_build_reg_cache(struct target
*target
)
3032 struct xscale_common
*xscale
= target_to_xscale(target
);
3033 struct arm
*armv4_5
= &xscale
->armv4_5_common
;
3034 struct reg_cache
**cache_p
= register_get_last_cache_p(&target
->reg_cache
);
3035 struct xscale_reg
*arch_info
= malloc(sizeof(xscale_reg_arch_info
));
3037 int num_regs
= ARRAY_SIZE(xscale_reg_arch_info
);
3039 (*cache_p
) = arm_build_reg_cache(target
, armv4_5
);
3041 (*cache_p
)->next
= malloc(sizeof(struct reg_cache
));
3042 cache_p
= &(*cache_p
)->next
;
3044 /* fill in values for the xscale reg cache */
3045 (*cache_p
)->name
= "XScale registers";
3046 (*cache_p
)->next
= NULL
;
3047 (*cache_p
)->reg_list
= malloc(num_regs
* sizeof(struct reg
));
3048 (*cache_p
)->num_regs
= num_regs
;
3050 for (i
= 0; i
< num_regs
; i
++)
3052 (*cache_p
)->reg_list
[i
].name
= xscale_reg_list
[i
];
3053 (*cache_p
)->reg_list
[i
].value
= calloc(4, 1);
3054 (*cache_p
)->reg_list
[i
].dirty
= 0;
3055 (*cache_p
)->reg_list
[i
].valid
= 0;
3056 (*cache_p
)->reg_list
[i
].size
= 32;
3057 (*cache_p
)->reg_list
[i
].arch_info
= &arch_info
[i
];
3058 (*cache_p
)->reg_list
[i
].type
= &xscale_reg_type
;
3059 arch_info
[i
] = xscale_reg_arch_info
[i
];
3060 arch_info
[i
].target
= target
;
3063 xscale
->reg_cache
= (*cache_p
);
3066 static int xscale_init_target(struct command_context
*cmd_ctx
,
3067 struct target
*target
)
3069 xscale_build_reg_cache(target
);
3073 static int xscale_init_arch_info(struct target
*target
,
3074 struct xscale_common
*xscale
, struct jtag_tap
*tap
, const char *variant
)
3076 struct arm
*armv4_5
;
3077 uint32_t high_reset_branch
, low_reset_branch
;
3080 armv4_5
= &xscale
->armv4_5_common
;
3082 /* store architecture specfic data */
3083 xscale
->common_magic
= XSCALE_COMMON_MAGIC
;
3085 /* we don't really *need* a variant param ... */
3089 if (strcmp(variant
, "pxa250") == 0
3090 || strcmp(variant
, "pxa255") == 0
3091 || strcmp(variant
, "pxa26x") == 0)
3093 else if (strcmp(variant
, "pxa27x") == 0
3094 || strcmp(variant
, "ixp42x") == 0
3095 || strcmp(variant
, "ixp45x") == 0
3096 || strcmp(variant
, "ixp46x") == 0)
3098 else if (strcmp(variant
, "pxa3xx") == 0)
3101 LOG_WARNING("%s: unrecognized variant %s",
3102 tap
->dotted_name
, variant
);
3104 if (ir_length
&& ir_length
!= tap
->ir_length
) {
3105 LOG_WARNING("%s: IR length for %s is %d; fixing",
3106 tap
->dotted_name
, variant
, ir_length
);
3107 tap
->ir_length
= ir_length
;
3111 /* PXA3xx shifts the JTAG instructions */
3112 if (tap
->ir_length
== 11)
3113 xscale
->xscale_variant
= XSCALE_PXA3XX
;
3115 xscale
->xscale_variant
= XSCALE_IXP4XX_PXA2XX
;
3117 /* the debug handler isn't installed (and thus not running) at this time */
3118 xscale
->handler_address
= 0xfe000800;
3120 /* clear the vectors we keep locally for reference */
3121 memset(xscale
->low_vectors
, 0, sizeof(xscale
->low_vectors
));
3122 memset(xscale
->high_vectors
, 0, sizeof(xscale
->high_vectors
));
3124 /* no user-specified vectors have been configured yet */
3125 xscale
->static_low_vectors_set
= 0x0;
3126 xscale
->static_high_vectors_set
= 0x0;
3128 /* calculate branches to debug handler */
3129 low_reset_branch
= (xscale
->handler_address
+ 0x20 - 0x0 - 0x8) >> 2;
3130 high_reset_branch
= (xscale
->handler_address
+ 0x20 - 0xffff0000 - 0x8) >> 2;
3132 xscale
->low_vectors
[0] = ARMV4_5_B((low_reset_branch
& 0xffffff), 0);
3133 xscale
->high_vectors
[0] = ARMV4_5_B((high_reset_branch
& 0xffffff), 0);
3135 for (i
= 1; i
<= 7; i
++)
3137 xscale
->low_vectors
[i
] = ARMV4_5_B(0xfffffe, 0);
3138 xscale
->high_vectors
[i
] = ARMV4_5_B(0xfffffe, 0);
3141 /* 64kB aligned region used for DCache cleaning */
3142 xscale
->cache_clean_address
= 0xfffe0000;
3144 xscale
->hold_rst
= 0;
3145 xscale
->external_debug_break
= 0;
3147 xscale
->ibcr_available
= 2;
3148 xscale
->ibcr0_used
= 0;
3149 xscale
->ibcr1_used
= 0;
3151 xscale
->dbr_available
= 2;
3152 xscale
->dbr0_used
= 0;
3153 xscale
->dbr1_used
= 0;
3155 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
3156 target_name(target
));
3158 xscale
->arm_bkpt
= ARMV5_BKPT(0x0);
3159 xscale
->thumb_bkpt
= ARMV5_T_BKPT(0x0) & 0xffff;
3161 xscale
->vector_catch
= 0x1;
3163 xscale
->trace
.data
= NULL
;
3164 xscale
->trace
.image
= NULL
;
3165 xscale
->trace
.mode
= XSCALE_TRACE_DISABLED
;
3166 xscale
->trace
.buffer_fill
= 0;
3167 xscale
->trace
.fill_counter
= 0;
3169 /* prepare ARMv4/5 specific information */
3170 armv4_5
->arch_info
= xscale
;
3171 armv4_5
->read_core_reg
= xscale_read_core_reg
;
3172 armv4_5
->write_core_reg
= xscale_write_core_reg
;
3173 armv4_5
->full_context
= xscale_full_context
;
3175 arm_init_arch_info(target
, armv4_5
);
3177 xscale
->armv4_5_mmu
.armv4_5_cache
.ctype
= -1;
3178 xscale
->armv4_5_mmu
.get_ttb
= xscale_get_ttb
;
3179 xscale
->armv4_5_mmu
.read_memory
= xscale_read_memory
;
3180 xscale
->armv4_5_mmu
.write_memory
= xscale_write_memory
;
3181 xscale
->armv4_5_mmu
.disable_mmu_caches
= xscale_disable_mmu_caches
;
3182 xscale
->armv4_5_mmu
.enable_mmu_caches
= xscale_enable_mmu_caches
;
3183 xscale
->armv4_5_mmu
.has_tiny_pages
= 1;
3184 xscale
->armv4_5_mmu
.mmu_enabled
= 0;
3189 static int xscale_target_create(struct target
*target
, Jim_Interp
*interp
)
3191 struct xscale_common
*xscale
;
3193 if (sizeof xscale_debug_handler
- 1 > 0x800) {
3194 LOG_ERROR("debug_handler.bin: larger than 2kb");
3198 xscale
= calloc(1, sizeof(*xscale
));
3202 return xscale_init_arch_info(target
, xscale
, target
->tap
,
3206 COMMAND_HANDLER(xscale_handle_debug_handler_command
)
3208 struct target
*target
= NULL
;
3209 struct xscale_common
*xscale
;
3211 uint32_t handler_address
;
3215 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3219 if ((target
= get_target(CMD_ARGV
[0])) == NULL
)
3221 LOG_ERROR("target '%s' not defined", CMD_ARGV
[0]);
3225 xscale
= target_to_xscale(target
);
3226 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3227 if (retval
!= ERROR_OK
)
3230 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[1], handler_address
);
3232 if (((handler_address
>= 0x800) && (handler_address
<= 0x1fef800)) ||
3233 ((handler_address
>= 0xfe000800) && (handler_address
<= 0xfffff800)))
3235 xscale
->handler_address
= handler_address
;
3239 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3246 COMMAND_HANDLER(xscale_handle_cache_clean_address_command
)
3248 struct target
*target
= NULL
;
3249 struct xscale_common
*xscale
;
3251 uint32_t cache_clean_address
;
3255 return ERROR_COMMAND_SYNTAX_ERROR
;
3258 target
= get_target(CMD_ARGV
[0]);
3261 LOG_ERROR("target '%s' not defined", CMD_ARGV
[0]);
3264 xscale
= target_to_xscale(target
);
3265 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3266 if (retval
!= ERROR_OK
)
3269 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[1], cache_clean_address
);
3271 if (cache_clean_address
& 0xffff)
3273 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3277 xscale
->cache_clean_address
= cache_clean_address
;
3283 COMMAND_HANDLER(xscale_handle_cache_info_command
)
3285 struct target
*target
= get_current_target(CMD_CTX
);
3286 struct xscale_common
*xscale
= target_to_xscale(target
);
3289 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3290 if (retval
!= ERROR_OK
)
3293 return armv4_5_handle_cache_info_command(CMD_CTX
, &xscale
->armv4_5_mmu
.armv4_5_cache
);
3296 static int xscale_virt2phys(struct target
*target
,
3297 uint32_t virtual, uint32_t *physical
)
3299 struct xscale_common
*xscale
= target_to_xscale(target
);
3302 if (xscale
->common_magic
!= XSCALE_COMMON_MAGIC
) {
3303 LOG_ERROR(xscale_not
);
3304 return ERROR_TARGET_INVALID
;
3308 int retval
= armv4_5_mmu_translate_va(target
, &xscale
->armv4_5_mmu
,
3309 virtual, &cb
, &ret
);
3310 if (retval
!= ERROR_OK
)
3316 static int xscale_mmu(struct target
*target
, int *enabled
)
3318 struct xscale_common
*xscale
= target_to_xscale(target
);
3320 if (target
->state
!= TARGET_HALTED
)
3322 LOG_ERROR("Target not halted");
3323 return ERROR_TARGET_INVALID
;
3325 *enabled
= xscale
->armv4_5_mmu
.mmu_enabled
;
3329 COMMAND_HANDLER(xscale_handle_mmu_command
)
3331 struct target
*target
= get_current_target(CMD_CTX
);
3332 struct xscale_common
*xscale
= target_to_xscale(target
);
3335 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3336 if (retval
!= ERROR_OK
)
3339 if (target
->state
!= TARGET_HALTED
)
3341 command_print(CMD_CTX
, "target must be stopped for \"%s\" command", CMD_NAME
);
3348 COMMAND_PARSE_ENABLE(CMD_ARGV
[0], enable
);
3350 xscale_enable_mmu_caches(target
, 1, 0, 0);
3352 xscale_disable_mmu_caches(target
, 1, 0, 0);
3353 xscale
->armv4_5_mmu
.mmu_enabled
= enable
;
3356 command_print(CMD_CTX
, "mmu %s", (xscale
->armv4_5_mmu
.mmu_enabled
) ? "enabled" : "disabled");
3361 COMMAND_HANDLER(xscale_handle_idcache_command
)
3363 struct target
*target
= get_current_target(CMD_CTX
);
3364 struct xscale_common
*xscale
= target_to_xscale(target
);
3366 int retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3367 if (retval
!= ERROR_OK
)
3370 if (target
->state
!= TARGET_HALTED
)
3372 command_print(CMD_CTX
, "target must be stopped for \"%s\" command", CMD_NAME
);
3376 bool icache
= false;
3377 if (strcmp(CMD_NAME
, "icache") == 0)
3382 COMMAND_PARSE_ENABLE(CMD_ARGV
[0], enable
);
3384 xscale
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
= enable
;
3386 xscale_enable_mmu_caches(target
, 0, 0, 1);
3388 xscale_disable_mmu_caches(target
, 0, 0, 1);
3390 xscale
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
= enable
;
3392 xscale_enable_mmu_caches(target
, 0, 1, 0);
3394 xscale_disable_mmu_caches(target
, 0, 1, 0);
3398 bool enabled
= icache
?
3399 xscale
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
:
3400 xscale
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
;
3401 const char *msg
= enabled
? "enabled" : "disabled";
3402 command_print(CMD_CTX
, "%s %s", CMD_NAME
, msg
);
3407 COMMAND_HANDLER(xscale_handle_vector_catch_command
)
3409 struct target
*target
= get_current_target(CMD_CTX
);
3410 struct xscale_common
*xscale
= target_to_xscale(target
);
3413 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3414 if (retval
!= ERROR_OK
)
3419 command_print(CMD_CTX
, "usage: xscale vector_catch [mask]");
3423 COMMAND_PARSE_NUMBER(u8
, CMD_ARGV
[0], xscale
->vector_catch
);
3424 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 16, 8, xscale
->vector_catch
);
3425 xscale_write_dcsr(target
, -1, -1);
3428 command_print(CMD_CTX
, "vector catch mask: 0x%2.2x", xscale
->vector_catch
);
3434 COMMAND_HANDLER(xscale_handle_vector_table_command
)
3436 struct target
*target
= get_current_target(CMD_CTX
);
3437 struct xscale_common
*xscale
= target_to_xscale(target
);
3441 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3442 if (retval
!= ERROR_OK
)
3445 if (CMD_ARGC
== 0) /* print current settings */
3449 command_print(CMD_CTX
, "active user-set static vectors:");
3450 for (idx
= 1; idx
< 8; idx
++)
3451 if (xscale
->static_low_vectors_set
& (1 << idx
))
3452 command_print(CMD_CTX
, "low %d: 0x%" PRIx32
, idx
, xscale
->static_low_vectors
[idx
]);
3453 for (idx
= 1; idx
< 8; idx
++)
3454 if (xscale
->static_high_vectors_set
& (1 << idx
))
3455 command_print(CMD_CTX
, "high %d: 0x%" PRIx32
, idx
, xscale
->static_high_vectors
[idx
]);
3464 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[1], idx
);
3466 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[2], vec
);
3468 if (idx
< 1 || idx
>= 8)
3471 if (!err
&& strcmp(CMD_ARGV
[0], "low") == 0)
3473 xscale
->static_low_vectors_set
|= (1<<idx
);
3474 xscale
->static_low_vectors
[idx
] = vec
;
3476 else if (!err
&& (strcmp(CMD_ARGV
[0], "high") == 0))
3478 xscale
->static_high_vectors_set
|= (1<<idx
);
3479 xscale
->static_high_vectors
[idx
] = vec
;
3486 command_print(CMD_CTX
, "usage: xscale vector_table <high|low> <index> <code>");
3492 COMMAND_HANDLER(xscale_handle_trace_buffer_command
)
3494 struct target
*target
= get_current_target(CMD_CTX
);
3495 struct xscale_common
*xscale
= target_to_xscale(target
);
3496 uint32_t dcsr_value
;
3499 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3500 if (retval
!= ERROR_OK
)
3503 if (target
->state
!= TARGET_HALTED
)
3505 command_print(CMD_CTX
, "target must be stopped for \"%s\" command", CMD_NAME
);
3511 if (strcmp("enable", CMD_ARGV
[0]) == 0)
3512 xscale
->trace
.mode
= XSCALE_TRACE_WRAP
; /* default */
3513 else if (strcmp("disable", CMD_ARGV
[0]) == 0)
3514 xscale
->trace
.mode
= XSCALE_TRACE_DISABLED
;
3516 return ERROR_COMMAND_SYNTAX_ERROR
;
3519 if (CMD_ARGC
>= 2 && xscale
->trace
.mode
!= XSCALE_TRACE_DISABLED
)
3521 if (strcmp("fill", CMD_ARGV
[1]) == 0)
3523 int buffcount
= 1; /* default */
3525 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[2], buffcount
);
3526 if (buffcount
< 1) /* invalid */
3528 command_print(CMD_CTX
, "fill buffer count must be > 0");
3529 xscale
->trace
.mode
= XSCALE_TRACE_DISABLED
;
3530 return ERROR_COMMAND_SYNTAX_ERROR
;
3532 xscale
->trace
.buffer_fill
= buffcount
;
3533 xscale
->trace
.mode
= XSCALE_TRACE_FILL
;
3535 else if (strcmp("wrap", CMD_ARGV
[1]) == 0)
3536 xscale
->trace
.mode
= XSCALE_TRACE_WRAP
;
3539 xscale
->trace
.mode
= XSCALE_TRACE_DISABLED
;
3540 return ERROR_COMMAND_SYNTAX_ERROR
;
3544 if (xscale
->trace
.mode
!= XSCALE_TRACE_DISABLED
)
3546 char fill_string
[12];
3547 sprintf(fill_string
, "fill %" PRId32
, xscale
->trace
.buffer_fill
);
3548 command_print(CMD_CTX
, "trace buffer enabled (%s)",
3549 (xscale
->trace
.mode
== XSCALE_TRACE_FILL
)
3550 ? fill_string
: "wrap");
3553 command_print(CMD_CTX
, "trace buffer disabled");
3555 dcsr_value
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 0, 32);
3556 if (xscale
->trace
.mode
== XSCALE_TRACE_FILL
)
3557 xscale_write_dcsr_sw(target
, (dcsr_value
& 0xfffffffc) | 2);
3559 xscale_write_dcsr_sw(target
, dcsr_value
& 0xfffffffc);
3564 COMMAND_HANDLER(xscale_handle_trace_image_command
)
3566 struct target
*target
= get_current_target(CMD_CTX
);
3567 struct xscale_common
*xscale
= target_to_xscale(target
);
3572 command_print(CMD_CTX
, "usage: xscale trace_image <file> [base address] [type]");
3576 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3577 if (retval
!= ERROR_OK
)
3580 if (xscale
->trace
.image
)
3582 image_close(xscale
->trace
.image
);
3583 free(xscale
->trace
.image
);
3584 command_print(CMD_CTX
, "previously loaded image found and closed");
3587 xscale
->trace
.image
= malloc(sizeof(struct image
));
3588 xscale
->trace
.image
->base_address_set
= 0;
3589 xscale
->trace
.image
->start_address_set
= 0;
3591 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3594 xscale
->trace
.image
->base_address_set
= 1;
3595 COMMAND_PARSE_NUMBER(llong
, CMD_ARGV
[1], xscale
->trace
.image
->base_address
);
3599 xscale
->trace
.image
->base_address_set
= 0;
3602 if (image_open(xscale
->trace
.image
, CMD_ARGV
[0], (CMD_ARGC
>= 3) ? CMD_ARGV
[2] : NULL
) != ERROR_OK
)
3604 free(xscale
->trace
.image
);
3605 xscale
->trace
.image
= NULL
;
3612 COMMAND_HANDLER(xscale_handle_dump_trace_command
)
3614 struct target
*target
= get_current_target(CMD_CTX
);
3615 struct xscale_common
*xscale
= target_to_xscale(target
);
3616 struct xscale_trace_data
*trace_data
;
3620 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3621 if (retval
!= ERROR_OK
)
3624 if (target
->state
!= TARGET_HALTED
)
3626 command_print(CMD_CTX
, "target must be stopped for \"%s\" command", CMD_NAME
);
3632 command_print(CMD_CTX
, "usage: xscale dump_trace <file>");
3636 trace_data
= xscale
->trace
.data
;
3640 command_print(CMD_CTX
, "no trace data collected");
3644 if (fileio_open(&file
, CMD_ARGV
[0], FILEIO_WRITE
, FILEIO_BINARY
) != ERROR_OK
)
3653 fileio_write_u32(&file
, trace_data
->chkpt0
);
3654 fileio_write_u32(&file
, trace_data
->chkpt1
);
3655 fileio_write_u32(&file
, trace_data
->last_instruction
);
3656 fileio_write_u32(&file
, trace_data
->depth
);
3658 for (i
= 0; i
< trace_data
->depth
; i
++)
3659 fileio_write_u32(&file
, trace_data
->entries
[i
].data
| ((trace_data
->entries
[i
].type
& 0xffff) << 16));
3661 trace_data
= trace_data
->next
;
3664 fileio_close(&file
);
3669 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command
)
3671 struct target
*target
= get_current_target(CMD_CTX
);
3672 struct xscale_common
*xscale
= target_to_xscale(target
);
3675 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3676 if (retval
!= ERROR_OK
)
3679 xscale_analyze_trace(target
, CMD_CTX
);
3684 COMMAND_HANDLER(xscale_handle_cp15
)
3686 struct target
*target
= get_current_target(CMD_CTX
);
3687 struct xscale_common
*xscale
= target_to_xscale(target
);
3690 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3691 if (retval
!= ERROR_OK
)
3694 if (target
->state
!= TARGET_HALTED
)
3696 command_print(CMD_CTX
, "target must be stopped for \"%s\" command", CMD_NAME
);
3699 uint32_t reg_no
= 0;
3700 struct reg
*reg
= NULL
;
3703 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[0], reg_no
);
3704 /*translate from xscale cp15 register no to openocd register*/
3708 reg_no
= XSCALE_MAINID
;
3711 reg_no
= XSCALE_CTRL
;
3714 reg_no
= XSCALE_TTB
;
3717 reg_no
= XSCALE_DAC
;
3720 reg_no
= XSCALE_FSR
;
3723 reg_no
= XSCALE_FAR
;
3726 reg_no
= XSCALE_PID
;
3729 reg_no
= XSCALE_CPACCESS
;
3732 command_print(CMD_CTX
, "invalid register number");
3733 return ERROR_COMMAND_SYNTAX_ERROR
;
3735 reg
= &xscale
->reg_cache
->reg_list
[reg_no
];
3742 /* read cp15 control register */
3743 xscale_get_reg(reg
);
3744 value
= buf_get_u32(reg
->value
, 0, 32);
3745 command_print(CMD_CTX
, "%s (/%i): 0x%" PRIx32
"", reg
->name
, (int)(reg
->size
), value
);
3747 else if (CMD_ARGC
== 2)
3750 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[1], value
);
3752 /* send CP write request (command 0x41) */
3753 xscale_send_u32(target
, 0x41);
3755 /* send CP register number */
3756 xscale_send_u32(target
, reg_no
);
3758 /* send CP register value */
3759 xscale_send_u32(target
, value
);
3761 /* execute cpwait to ensure outstanding operations complete */
3762 xscale_send_u32(target
, 0x53);
3766 command_print(CMD_CTX
, "usage: cp15 [register]<, [value]>");
3772 static const struct command_registration xscale_exec_command_handlers
[] = {
3774 .name
= "cache_info",
3775 .handler
= xscale_handle_cache_info_command
,
3776 .mode
= COMMAND_EXEC
,
3777 .help
= "display information about CPU caches",
3781 .handler
= xscale_handle_mmu_command
,
3782 .mode
= COMMAND_EXEC
,
3783 .help
= "enable or disable the MMU",
3784 .usage
= "['enable'|'disable']",
3788 .handler
= xscale_handle_idcache_command
,
3789 .mode
= COMMAND_EXEC
,
3790 .help
= "display ICache state, optionally enabling or "
3792 .usage
= "['enable'|'disable']",
3796 .handler
= xscale_handle_idcache_command
,
3797 .mode
= COMMAND_EXEC
,
3798 .help
= "display DCache state, optionally enabling or "
3800 .usage
= "['enable'|'disable']",
3803 .name
= "vector_catch",
3804 .handler
= xscale_handle_vector_catch_command
,
3805 .mode
= COMMAND_EXEC
,
3806 .help
= "set or display 8-bit mask of vectors "
3807 "that should trigger debug entry",
3811 .name
= "vector_table",
3812 .handler
= xscale_handle_vector_table_command
,
3813 .mode
= COMMAND_EXEC
,
3814 .help
= "set vector table entry in mini-ICache, "
3815 "or display current tables",
3816 .usage
= "[('high'|'low') index code]",
3819 .name
= "trace_buffer",
3820 .handler
= xscale_handle_trace_buffer_command
,
3821 .mode
= COMMAND_EXEC
,
3822 .help
= "display trace buffer status, enable or disable "
3823 "tracing, and optionally reconfigure trace mode",
3824 .usage
= "['enable'|'disable' ['fill' [number]|'wrap']]",
3827 .name
= "dump_trace",
3828 .handler
= xscale_handle_dump_trace_command
,
3829 .mode
= COMMAND_EXEC
,
3830 .help
= "dump content of trace buffer to file",
3831 .usage
= "filename",
3834 .name
= "analyze_trace",
3835 .handler
= xscale_handle_analyze_trace_buffer_command
,
3836 .mode
= COMMAND_EXEC
,
3837 .help
= "analyze content of trace buffer",
3841 .name
= "trace_image",
3842 .handler
= xscale_handle_trace_image_command
,
3843 .mode
= COMMAND_EXEC
,
3844 .help
= "load image from file to address (default 0)",
3845 .usage
= "filename [offset [filetype]]",
3849 .handler
= xscale_handle_cp15
,
3850 .mode
= COMMAND_EXEC
,
3851 .help
= "Read or write coprocessor 15 register.",
3852 .usage
= "register [value]",
3854 COMMAND_REGISTRATION_DONE
3856 static const struct command_registration xscale_any_command_handlers
[] = {
3858 .name
= "debug_handler",
3859 .handler
= xscale_handle_debug_handler_command
,
3860 .mode
= COMMAND_ANY
,
3861 .help
= "Change address used for debug handler.",
3862 .usage
= "target address",
3865 .name
= "cache_clean_address",
3866 .handler
= xscale_handle_cache_clean_address_command
,
3867 .mode
= COMMAND_ANY
,
3868 .help
= "Change address used for cleaning data cache.",
3872 .chain
= xscale_exec_command_handlers
,
3874 COMMAND_REGISTRATION_DONE
3876 static const struct command_registration xscale_command_handlers
[] = {
3878 .chain
= arm_command_handlers
,
3882 .mode
= COMMAND_ANY
,
3883 .help
= "xscale command group",
3884 .chain
= xscale_any_command_handlers
,
3886 COMMAND_REGISTRATION_DONE
3889 struct target_type xscale_target
=
3893 .poll
= xscale_poll
,
3894 .arch_state
= xscale_arch_state
,
3896 .target_request_data
= NULL
,
3898 .halt
= xscale_halt
,
3899 .resume
= xscale_resume
,
3900 .step
= xscale_step
,
3902 .assert_reset
= xscale_assert_reset
,
3903 .deassert_reset
= xscale_deassert_reset
,
3904 .soft_reset_halt
= NULL
,
3906 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3907 .get_gdb_reg_list
= arm_get_gdb_reg_list
,
3909 .read_memory
= xscale_read_memory
,
3910 .read_phys_memory
= xscale_read_phys_memory
,
3911 .write_memory
= xscale_write_memory
,
3912 .write_phys_memory
= xscale_write_phys_memory
,
3913 .bulk_write_memory
= xscale_bulk_write_memory
,
3915 .checksum_memory
= arm_checksum_memory
,
3916 .blank_check_memory
= arm_blank_check_memory
,
3918 .run_algorithm
= armv4_5_run_algorithm
,
3920 .add_breakpoint
= xscale_add_breakpoint
,
3921 .remove_breakpoint
= xscale_remove_breakpoint
,
3922 .add_watchpoint
= xscale_add_watchpoint
,
3923 .remove_watchpoint
= xscale_remove_watchpoint
,
3925 .commands
= xscale_command_handlers
,
3926 .target_create
= xscale_target_create
,
3927 .init_target
= xscale_init_target
,
3929 .virt2phys
= xscale_virt2phys
,