1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
30 #include "breakpoints.h"
32 #include "target_type.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
39 #include "arm_opcodes.h"
44 * Important XScale documents available as of October 2009 include:
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
59 * Chip-specific microarchitecture documents may also be useful.
63 /* forward declarations */
64 static int xscale_resume(struct target
*, int current
,
65 uint32_t address
, int handle_breakpoints
, int debug_execution
);
66 static int xscale_debug_entry(struct target
*);
67 static int xscale_restore_banked(struct target
*);
68 static int xscale_get_reg(struct reg
*reg
);
69 static int xscale_set_reg(struct reg
*reg
, uint8_t *buf
);
70 static int xscale_set_breakpoint(struct target
*, struct breakpoint
*);
71 static int xscale_set_watchpoint(struct target
*, struct watchpoint
*);
72 static int xscale_unset_breakpoint(struct target
*, struct breakpoint
*);
73 static int xscale_read_trace(struct target
*);
76 /* This XScale "debug handler" is loaded into the processor's
77 * mini-ICache, which is 2K of code writable only via JTAG.
79 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
80 * binary files cleanly. It's string oriented, and terminates them
81 * with a NUL character. Better would be to generate the constants
82 * and let other code decide names, scoping, and other housekeeping.
84 static /* unsigned const char xscale_debug_handler[] = ... */
85 #include "xscale_debug.h"
87 static char *const xscale_reg_list
[] =
89 "XSCALE_MAINID", /* 0 */
99 "XSCALE_IBCR0", /* 10 */
109 "XSCALE_RX", /* 20 */
113 static const struct xscale_reg xscale_reg_arch_info
[] =
115 {XSCALE_MAINID
, NULL
},
116 {XSCALE_CACHETYPE
, NULL
},
118 {XSCALE_AUXCTRL
, NULL
},
124 {XSCALE_CPACCESS
, NULL
},
125 {XSCALE_IBCR0
, NULL
},
126 {XSCALE_IBCR1
, NULL
},
129 {XSCALE_DBCON
, NULL
},
130 {XSCALE_TBREG
, NULL
},
131 {XSCALE_CHKPT0
, NULL
},
132 {XSCALE_CHKPT1
, NULL
},
133 {XSCALE_DCSR
, NULL
}, /* DCSR accessed via JTAG or SW */
134 {-1, NULL
}, /* TX accessed via JTAG */
135 {-1, NULL
}, /* RX accessed via JTAG */
136 {-1, NULL
}, /* TXRXCTRL implicit access via JTAG */
139 /* convenience wrapper to access XScale specific registers */
140 static int xscale_set_reg_u32(struct reg
*reg
, uint32_t value
)
144 buf_set_u32(buf
, 0, 32, value
);
146 return xscale_set_reg(reg
, buf
);
149 static const char xscale_not
[] = "target is not an XScale";
151 static int xscale_verify_pointer(struct command_context
*cmd_ctx
,
152 struct xscale_common
*xscale
)
154 if (xscale
->common_magic
!= XSCALE_COMMON_MAGIC
) {
155 command_print(cmd_ctx
, xscale_not
);
156 return ERROR_TARGET_INVALID
;
161 static int xscale_jtag_set_instr(struct jtag_tap
*tap
, uint32_t new_instr
, tap_state_t end_state
)
163 assert (tap
!= NULL
);
165 if (buf_get_u32(tap
->cur_instr
, 0, tap
->ir_length
) != new_instr
)
167 struct scan_field field
;
170 memset(&field
, 0, sizeof field
);
171 field
.num_bits
= tap
->ir_length
;
172 field
.out_value
= scratch
;
173 buf_set_u32(scratch
, 0, field
.num_bits
, new_instr
);
175 jtag_add_ir_scan(tap
, &field
, end_state
);
181 static int xscale_read_dcsr(struct target
*target
)
183 struct xscale_common
*xscale
= target_to_xscale(target
);
185 struct scan_field fields
[3];
186 uint8_t field0
= 0x0;
187 uint8_t field0_check_value
= 0x2;
188 uint8_t field0_check_mask
= 0x7;
189 uint8_t field2
= 0x0;
190 uint8_t field2_check_value
= 0x0;
191 uint8_t field2_check_mask
= 0x1;
193 xscale_jtag_set_instr(target
->tap
,
194 XSCALE_SELDCSR
<< xscale
->xscale_variant
,
197 buf_set_u32(&field0
, 1, 1, xscale
->hold_rst
);
198 buf_set_u32(&field0
, 2, 1, xscale
->external_debug_break
);
200 memset(&fields
, 0, sizeof fields
);
202 fields
[0].num_bits
= 3;
203 fields
[0].out_value
= &field0
;
205 fields
[0].in_value
= &tmp
;
207 fields
[1].num_bits
= 32;
208 fields
[1].in_value
= xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
;
210 fields
[2].num_bits
= 1;
211 fields
[2].out_value
= &field2
;
213 fields
[2].in_value
= &tmp2
;
215 jtag_add_dr_scan(target
->tap
, 3, fields
, TAP_DRPAUSE
);
217 jtag_check_value_mask(fields
+ 0, &field0_check_value
, &field0_check_mask
);
218 jtag_check_value_mask(fields
+ 2, &field2_check_value
, &field2_check_mask
);
220 if ((retval
= jtag_execute_queue()) != ERROR_OK
)
222 LOG_ERROR("JTAG error while reading DCSR");
226 xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].dirty
= 0;
227 xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].valid
= 1;
229 /* write the register with the value we just read
230 * on this second pass, only the first bit of field0 is guaranteed to be 0)
232 field0_check_mask
= 0x1;
233 fields
[1].out_value
= xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
;
234 fields
[1].in_value
= NULL
;
236 jtag_add_dr_scan(target
->tap
, 3, fields
, TAP_DRPAUSE
);
238 /* DANGER!!! this must be here. It will make sure that the arguments
239 * to jtag_set_check_value() does not go out of scope! */
240 return jtag_execute_queue();
244 static void xscale_getbuf(jtag_callback_data_t arg
)
246 uint8_t *in
= (uint8_t *)arg
;
247 *((uint32_t *)arg
) = buf_get_u32(in
, 0, 32);
250 static int xscale_receive(struct target
*target
, uint32_t *buffer
, int num_words
)
253 return ERROR_INVALID_ARGUMENTS
;
255 struct xscale_common
*xscale
= target_to_xscale(target
);
256 int retval
= ERROR_OK
;
258 struct scan_field fields
[3];
259 uint8_t *field0
= malloc(num_words
* 1);
260 uint8_t field0_check_value
= 0x2;
261 uint8_t field0_check_mask
= 0x6;
262 uint32_t *field1
= malloc(num_words
* 4);
263 uint8_t field2_check_value
= 0x0;
264 uint8_t field2_check_mask
= 0x1;
266 int words_scheduled
= 0;
269 path
[0] = TAP_DRSELECT
;
270 path
[1] = TAP_DRCAPTURE
;
271 path
[2] = TAP_DRSHIFT
;
273 memset(&fields
, 0, sizeof fields
);
275 fields
[0].num_bits
= 3;
276 fields
[0].check_value
= &field0_check_value
;
277 fields
[0].check_mask
= &field0_check_mask
;
279 fields
[1].num_bits
= 32;
281 fields
[2].num_bits
= 1;
282 fields
[2].check_value
= &field2_check_value
;
283 fields
[2].check_mask
= &field2_check_mask
;
285 xscale_jtag_set_instr(target
->tap
,
286 XSCALE_DBGTX
<< xscale
->xscale_variant
,
288 jtag_add_runtest(1, TAP_IDLE
); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
290 /* repeat until all words have been collected */
292 while (words_done
< num_words
)
296 for (i
= words_done
; i
< num_words
; i
++)
298 fields
[0].in_value
= &field0
[i
];
300 jtag_add_pathmove(3, path
);
302 fields
[1].in_value
= (uint8_t *)(field1
+ i
);
304 jtag_add_dr_scan_check(target
->tap
, 3, fields
, TAP_IDLE
);
306 jtag_add_callback(xscale_getbuf
, (jtag_callback_data_t
)(field1
+ i
));
311 if ((retval
= jtag_execute_queue()) != ERROR_OK
)
313 LOG_ERROR("JTAG error while receiving data from debug handler");
317 /* examine results */
318 for (i
= words_done
; i
< num_words
; i
++)
320 if (!(field0
[0] & 1))
322 /* move backwards if necessary */
324 for (j
= i
; j
< num_words
- 1; j
++)
326 field0
[j
] = field0
[j
+ 1];
327 field1
[j
] = field1
[j
+ 1];
332 if (words_scheduled
== 0)
334 if (attempts
++==1000)
336 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
337 retval
= ERROR_TARGET_TIMEOUT
;
342 words_done
+= words_scheduled
;
345 for (i
= 0; i
< num_words
; i
++)
346 *(buffer
++) = buf_get_u32((uint8_t*)&field1
[i
], 0, 32);
353 static int xscale_read_tx(struct target
*target
, int consume
)
355 struct xscale_common
*xscale
= target_to_xscale(target
);
357 tap_state_t noconsume_path
[6];
359 struct timeval timeout
, now
;
360 struct scan_field fields
[3];
361 uint8_t field0_in
= 0x0;
362 uint8_t field0_check_value
= 0x2;
363 uint8_t field0_check_mask
= 0x6;
364 uint8_t field2_check_value
= 0x0;
365 uint8_t field2_check_mask
= 0x1;
367 xscale_jtag_set_instr(target
->tap
,
368 XSCALE_DBGTX
<< xscale
->xscale_variant
,
371 path
[0] = TAP_DRSELECT
;
372 path
[1] = TAP_DRCAPTURE
;
373 path
[2] = TAP_DRSHIFT
;
375 noconsume_path
[0] = TAP_DRSELECT
;
376 noconsume_path
[1] = TAP_DRCAPTURE
;
377 noconsume_path
[2] = TAP_DREXIT1
;
378 noconsume_path
[3] = TAP_DRPAUSE
;
379 noconsume_path
[4] = TAP_DREXIT2
;
380 noconsume_path
[5] = TAP_DRSHIFT
;
382 memset(&fields
, 0, sizeof fields
);
384 fields
[0].num_bits
= 3;
385 fields
[0].in_value
= &field0_in
;
387 fields
[1].num_bits
= 32;
388 fields
[1].in_value
= xscale
->reg_cache
->reg_list
[XSCALE_TX
].value
;
390 fields
[2].num_bits
= 1;
392 fields
[2].in_value
= &tmp
;
394 gettimeofday(&timeout
, NULL
);
395 timeval_add_time(&timeout
, 1, 0);
399 /* if we want to consume the register content (i.e. clear TX_READY),
400 * we have to go straight from Capture-DR to Shift-DR
401 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
404 jtag_add_pathmove(3, path
);
407 jtag_add_pathmove(ARRAY_SIZE(noconsume_path
), noconsume_path
);
410 jtag_add_dr_scan(target
->tap
, 3, fields
, TAP_IDLE
);
412 jtag_check_value_mask(fields
+ 0, &field0_check_value
, &field0_check_mask
);
413 jtag_check_value_mask(fields
+ 2, &field2_check_value
, &field2_check_mask
);
415 if ((retval
= jtag_execute_queue()) != ERROR_OK
)
417 LOG_ERROR("JTAG error while reading TX");
418 return ERROR_TARGET_TIMEOUT
;
421 gettimeofday(&now
, NULL
);
422 if ((now
.tv_sec
> timeout
.tv_sec
) || ((now
.tv_sec
== timeout
.tv_sec
)&& (now
.tv_usec
> timeout
.tv_usec
)))
424 LOG_ERROR("time out reading TX register");
425 return ERROR_TARGET_TIMEOUT
;
427 if (!((!(field0_in
& 1)) && consume
))
431 if (debug_level
>= 3)
433 LOG_DEBUG("waiting 100ms");
434 alive_sleep(100); /* avoid flooding the logs */
442 if (!(field0_in
& 1))
443 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
448 static int xscale_write_rx(struct target
*target
)
450 struct xscale_common
*xscale
= target_to_xscale(target
);
452 struct timeval timeout
, now
;
453 struct scan_field fields
[3];
454 uint8_t field0_out
= 0x0;
455 uint8_t field0_in
= 0x0;
456 uint8_t field0_check_value
= 0x2;
457 uint8_t field0_check_mask
= 0x6;
458 uint8_t field2
= 0x0;
459 uint8_t field2_check_value
= 0x0;
460 uint8_t field2_check_mask
= 0x1;
462 xscale_jtag_set_instr(target
->tap
,
463 XSCALE_DBGRX
<< xscale
->xscale_variant
,
466 memset(&fields
, 0, sizeof fields
);
468 fields
[0].num_bits
= 3;
469 fields
[0].out_value
= &field0_out
;
470 fields
[0].in_value
= &field0_in
;
472 fields
[1].num_bits
= 32;
473 fields
[1].out_value
= xscale
->reg_cache
->reg_list
[XSCALE_RX
].value
;
475 fields
[2].num_bits
= 1;
476 fields
[2].out_value
= &field2
;
478 fields
[2].in_value
= &tmp
;
480 gettimeofday(&timeout
, NULL
);
481 timeval_add_time(&timeout
, 1, 0);
483 /* poll until rx_read is low */
484 LOG_DEBUG("polling RX");
487 jtag_add_dr_scan(target
->tap
, 3, fields
, TAP_IDLE
);
489 jtag_check_value_mask(fields
+ 0, &field0_check_value
, &field0_check_mask
);
490 jtag_check_value_mask(fields
+ 2, &field2_check_value
, &field2_check_mask
);
492 if ((retval
= jtag_execute_queue()) != ERROR_OK
)
494 LOG_ERROR("JTAG error while writing RX");
498 gettimeofday(&now
, NULL
);
499 if ((now
.tv_sec
> timeout
.tv_sec
) || ((now
.tv_sec
== timeout
.tv_sec
)&& (now
.tv_usec
> timeout
.tv_usec
)))
501 LOG_ERROR("time out writing RX register");
502 return ERROR_TARGET_TIMEOUT
;
504 if (!(field0_in
& 1))
506 if (debug_level
>= 3)
508 LOG_DEBUG("waiting 100ms");
509 alive_sleep(100); /* avoid flooding the logs */
519 jtag_add_dr_scan(target
->tap
, 3, fields
, TAP_IDLE
);
521 if ((retval
= jtag_execute_queue()) != ERROR_OK
)
523 LOG_ERROR("JTAG error while writing RX");
530 /* send count elements of size byte to the debug handler */
531 static int xscale_send(struct target
*target
, uint8_t *buffer
, int count
, int size
)
533 struct xscale_common
*xscale
= target_to_xscale(target
);
539 xscale_jtag_set_instr(target
->tap
,
540 XSCALE_DBGRX
<< xscale
->xscale_variant
,
548 int endianness
= target
->endianness
;
549 while (done_count
++ < count
)
554 if (endianness
== TARGET_LITTLE_ENDIAN
)
556 t
[1]=le_to_h_u32(buffer
);
559 t
[1]=be_to_h_u32(buffer
);
563 if (endianness
== TARGET_LITTLE_ENDIAN
)
565 t
[1]=le_to_h_u16(buffer
);
568 t
[1]=be_to_h_u16(buffer
);
575 LOG_ERROR("BUG: size neither 4, 2 nor 1");
576 return ERROR_INVALID_ARGUMENTS
;
578 jtag_add_dr_out(target
->tap
,
586 if ((retval
= jtag_execute_queue()) != ERROR_OK
)
588 LOG_ERROR("JTAG error while sending data to debug handler");
595 static int xscale_send_u32(struct target
*target
, uint32_t value
)
597 struct xscale_common
*xscale
= target_to_xscale(target
);
599 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_RX
].value
, 0, 32, value
);
600 return xscale_write_rx(target
);
603 static int xscale_write_dcsr(struct target
*target
, int hold_rst
, int ext_dbg_brk
)
605 struct xscale_common
*xscale
= target_to_xscale(target
);
607 struct scan_field fields
[3];
608 uint8_t field0
= 0x0;
609 uint8_t field0_check_value
= 0x2;
610 uint8_t field0_check_mask
= 0x7;
611 uint8_t field2
= 0x0;
612 uint8_t field2_check_value
= 0x0;
613 uint8_t field2_check_mask
= 0x1;
616 xscale
->hold_rst
= hold_rst
;
618 if (ext_dbg_brk
!= -1)
619 xscale
->external_debug_break
= ext_dbg_brk
;
621 xscale_jtag_set_instr(target
->tap
,
622 XSCALE_SELDCSR
<< xscale
->xscale_variant
,
625 buf_set_u32(&field0
, 1, 1, xscale
->hold_rst
);
626 buf_set_u32(&field0
, 2, 1, xscale
->external_debug_break
);
628 memset(&fields
, 0, sizeof fields
);
630 fields
[0].num_bits
= 3;
631 fields
[0].out_value
= &field0
;
633 fields
[0].in_value
= &tmp
;
635 fields
[1].num_bits
= 32;
636 fields
[1].out_value
= xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
;
638 fields
[2].num_bits
= 1;
639 fields
[2].out_value
= &field2
;
641 fields
[2].in_value
= &tmp2
;
643 jtag_add_dr_scan(target
->tap
, 3, fields
, TAP_IDLE
);
645 jtag_check_value_mask(fields
+ 0, &field0_check_value
, &field0_check_mask
);
646 jtag_check_value_mask(fields
+ 2, &field2_check_value
, &field2_check_mask
);
648 if ((retval
= jtag_execute_queue()) != ERROR_OK
)
650 LOG_ERROR("JTAG error while writing DCSR");
654 xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].dirty
= 0;
655 xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].valid
= 1;
660 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
661 static unsigned int parity (unsigned int v
)
663 // unsigned int ov = v;
668 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
669 return (0x6996 >> v
) & 1;
672 static int xscale_load_ic(struct target
*target
, uint32_t va
, uint32_t buffer
[8])
674 struct xscale_common
*xscale
= target_to_xscale(target
);
678 struct scan_field fields
[2];
680 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32
"", va
);
683 xscale_jtag_set_instr(target
->tap
,
684 XSCALE_LDIC
<< xscale
->xscale_variant
,
687 /* CMD is b011 to load a cacheline into the Mini ICache.
688 * Loading into the main ICache is deprecated, and unused.
689 * It's followed by three zero bits, and 27 address bits.
691 buf_set_u32(&cmd
, 0, 6, 0x3);
693 /* virtual address of desired cache line */
694 buf_set_u32(packet
, 0, 27, va
>> 5);
696 memset(&fields
, 0, sizeof fields
);
698 fields
[0].num_bits
= 6;
699 fields
[0].out_value
= &cmd
;
701 fields
[1].num_bits
= 27;
702 fields
[1].out_value
= packet
;
704 jtag_add_dr_scan(target
->tap
, 2, fields
, TAP_IDLE
);
706 /* rest of packet is a cacheline: 8 instructions, with parity */
707 fields
[0].num_bits
= 32;
708 fields
[0].out_value
= packet
;
710 fields
[1].num_bits
= 1;
711 fields
[1].out_value
= &cmd
;
713 for (word
= 0; word
< 8; word
++)
715 buf_set_u32(packet
, 0, 32, buffer
[word
]);
718 memcpy(&value
, packet
, sizeof(uint32_t));
721 jtag_add_dr_scan(target
->tap
, 2, fields
, TAP_IDLE
);
724 return jtag_execute_queue();
727 static int xscale_invalidate_ic_line(struct target
*target
, uint32_t va
)
729 struct xscale_common
*xscale
= target_to_xscale(target
);
732 struct scan_field fields
[2];
734 xscale_jtag_set_instr(target
->tap
,
735 XSCALE_LDIC
<< xscale
->xscale_variant
,
738 /* CMD for invalidate IC line b000, bits [6:4] b000 */
739 buf_set_u32(&cmd
, 0, 6, 0x0);
741 /* virtual address of desired cache line */
742 buf_set_u32(packet
, 0, 27, va
>> 5);
744 memset(&fields
, 0, sizeof fields
);
746 fields
[0].num_bits
= 6;
747 fields
[0].out_value
= &cmd
;
749 fields
[1].num_bits
= 27;
750 fields
[1].out_value
= packet
;
752 jtag_add_dr_scan(target
->tap
, 2, fields
, TAP_IDLE
);
757 static int xscale_update_vectors(struct target
*target
)
759 struct xscale_common
*xscale
= target_to_xscale(target
);
763 uint32_t low_reset_branch
, high_reset_branch
;
765 for (i
= 1; i
< 8; i
++)
767 /* if there's a static vector specified for this exception, override */
768 if (xscale
->static_high_vectors_set
& (1 << i
))
770 xscale
->high_vectors
[i
] = xscale
->static_high_vectors
[i
];
774 retval
= target_read_u32(target
, 0xffff0000 + 4*i
, &xscale
->high_vectors
[i
]);
775 if (retval
== ERROR_TARGET_TIMEOUT
)
777 if (retval
!= ERROR_OK
)
779 /* Some of these reads will fail as part of normal execution */
780 xscale
->high_vectors
[i
] = ARMV4_5_B(0xfffffe, 0);
785 for (i
= 1; i
< 8; i
++)
787 if (xscale
->static_low_vectors_set
& (1 << i
))
789 xscale
->low_vectors
[i
] = xscale
->static_low_vectors
[i
];
793 retval
= target_read_u32(target
, 0x0 + 4*i
, &xscale
->low_vectors
[i
]);
794 if (retval
== ERROR_TARGET_TIMEOUT
)
796 if (retval
!= ERROR_OK
)
798 /* Some of these reads will fail as part of normal execution */
799 xscale
->low_vectors
[i
] = ARMV4_5_B(0xfffffe, 0);
804 /* calculate branches to debug handler */
805 low_reset_branch
= (xscale
->handler_address
+ 0x20 - 0x0 - 0x8) >> 2;
806 high_reset_branch
= (xscale
->handler_address
+ 0x20 - 0xffff0000 - 0x8) >> 2;
808 xscale
->low_vectors
[0] = ARMV4_5_B((low_reset_branch
& 0xffffff), 0);
809 xscale
->high_vectors
[0] = ARMV4_5_B((high_reset_branch
& 0xffffff), 0);
811 /* invalidate and load exception vectors in mini i-cache */
812 xscale_invalidate_ic_line(target
, 0x0);
813 xscale_invalidate_ic_line(target
, 0xffff0000);
815 xscale_load_ic(target
, 0x0, xscale
->low_vectors
);
816 xscale_load_ic(target
, 0xffff0000, xscale
->high_vectors
);
821 static int xscale_arch_state(struct target
*target
)
823 struct xscale_common
*xscale
= target_to_xscale(target
);
824 struct arm
*armv4_5
= &xscale
->armv4_5_common
;
826 static const char *state
[] =
828 "disabled", "enabled"
831 static const char *arch_dbg_reason
[] =
833 "", "\n(processor reset)", "\n(trace buffer full)"
836 if (armv4_5
->common_magic
!= ARM_COMMON_MAGIC
)
838 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
839 return ERROR_INVALID_ARGUMENTS
;
842 arm_arch_state(target
);
843 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
844 state
[xscale
->armv4_5_mmu
.mmu_enabled
],
845 state
[xscale
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
],
846 state
[xscale
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
],
847 arch_dbg_reason
[xscale
->arch_debug_reason
]);
852 static int xscale_poll(struct target
*target
)
854 int retval
= ERROR_OK
;
856 if ((target
->state
== TARGET_RUNNING
) || (target
->state
== TARGET_DEBUG_RUNNING
))
858 enum target_state previous_state
= target
->state
;
859 if ((retval
= xscale_read_tx(target
, 0)) == ERROR_OK
)
862 /* there's data to read from the tx register, we entered debug state */
863 target
->state
= TARGET_HALTED
;
865 /* process debug entry, fetching current mode regs */
866 retval
= xscale_debug_entry(target
);
868 else if (retval
!= ERROR_TARGET_RESOURCE_NOT_AVAILABLE
)
870 LOG_USER("error while polling TX register, reset CPU");
871 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
872 target
->state
= TARGET_HALTED
;
875 /* debug_entry could have overwritten target state (i.e. immediate resume)
876 * don't signal event handlers in that case
878 if (target
->state
!= TARGET_HALTED
)
881 /* if target was running, signal that we halted
882 * otherwise we reentered from debug execution */
883 if (previous_state
== TARGET_RUNNING
)
884 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
886 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_HALTED
);
892 static int xscale_debug_entry(struct target
*target
)
894 struct xscale_common
*xscale
= target_to_xscale(target
);
895 struct arm
*armv4_5
= &xscale
->armv4_5_common
;
902 /* clear external dbg break (will be written on next DCSR read) */
903 xscale
->external_debug_break
= 0;
904 if ((retval
= xscale_read_dcsr(target
)) != ERROR_OK
)
907 /* get r0, pc, r1 to r7 and cpsr */
908 if ((retval
= xscale_receive(target
, buffer
, 10)) != ERROR_OK
)
911 /* move r0 from buffer to register cache */
912 buf_set_u32(armv4_5
->core_cache
->reg_list
[0].value
, 0, 32, buffer
[0]);
913 armv4_5
->core_cache
->reg_list
[0].dirty
= 1;
914 armv4_5
->core_cache
->reg_list
[0].valid
= 1;
915 LOG_DEBUG("r0: 0x%8.8" PRIx32
"", buffer
[0]);
917 /* move pc from buffer to register cache */
918 buf_set_u32(armv4_5
->pc
->value
, 0, 32, buffer
[1]);
919 armv4_5
->pc
->dirty
= 1;
920 armv4_5
->pc
->valid
= 1;
921 LOG_DEBUG("pc: 0x%8.8" PRIx32
"", buffer
[1]);
923 /* move data from buffer to register cache */
924 for (i
= 1; i
<= 7; i
++)
926 buf_set_u32(armv4_5
->core_cache
->reg_list
[i
].value
, 0, 32, buffer
[1 + i
]);
927 armv4_5
->core_cache
->reg_list
[i
].dirty
= 1;
928 armv4_5
->core_cache
->reg_list
[i
].valid
= 1;
929 LOG_DEBUG("r%i: 0x%8.8" PRIx32
"", i
, buffer
[i
+ 1]);
932 arm_set_cpsr(armv4_5
, buffer
[9]);
933 LOG_DEBUG("cpsr: 0x%8.8" PRIx32
"", buffer
[9]);
935 if (!is_arm_mode(armv4_5
->core_mode
))
937 target
->state
= TARGET_UNKNOWN
;
938 LOG_ERROR("cpsr contains invalid mode value - communication failure");
939 return ERROR_TARGET_FAILURE
;
941 LOG_DEBUG("target entered debug state in %s mode",
942 arm_mode_name(armv4_5
->core_mode
));
944 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
946 xscale_receive(target
, buffer
, 8);
947 buf_set_u32(armv4_5
->spsr
->value
, 0, 32, buffer
[7]);
948 armv4_5
->spsr
->dirty
= false;
949 armv4_5
->spsr
->valid
= true;
953 /* r8 to r14, but no spsr */
954 xscale_receive(target
, buffer
, 7);
957 /* move data from buffer to right banked register in cache */
958 for (i
= 8; i
<= 14; i
++)
960 struct reg
*r
= arm_reg_current(armv4_5
, i
);
962 buf_set_u32(r
->value
, 0, 32, buffer
[i
- 8]);
967 /* mark xscale regs invalid to ensure they are retrieved from the
968 * debug handler if requested */
969 for (i
= 0; i
< xscale
->reg_cache
->num_regs
; i
++)
970 xscale
->reg_cache
->reg_list
[i
].valid
= 0;
972 /* examine debug reason */
973 xscale_read_dcsr(target
);
974 moe
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 2, 3);
976 /* stored PC (for calculating fixup) */
977 pc
= buf_get_u32(armv4_5
->pc
->value
, 0, 32);
981 case 0x0: /* Processor reset */
982 target
->debug_reason
= DBG_REASON_DBGRQ
;
983 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_RESET
;
986 case 0x1: /* Instruction breakpoint hit */
987 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
988 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_GENERIC
;
991 case 0x2: /* Data breakpoint hit */
992 target
->debug_reason
= DBG_REASON_WATCHPOINT
;
993 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_GENERIC
;
996 case 0x3: /* BKPT instruction executed */
997 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
998 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_GENERIC
;
1001 case 0x4: /* Ext. debug event */
1002 target
->debug_reason
= DBG_REASON_DBGRQ
;
1003 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_GENERIC
;
1006 case 0x5: /* Vector trap occured */
1007 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
1008 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_GENERIC
;
1011 case 0x6: /* Trace buffer full break */
1012 target
->debug_reason
= DBG_REASON_DBGRQ
;
1013 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_TB_FULL
;
1016 case 0x7: /* Reserved (may flag Hot-Debug support) */
1018 LOG_ERROR("Method of Entry is 'Reserved'");
1023 /* apply PC fixup */
1024 buf_set_u32(armv4_5
->pc
->value
, 0, 32, pc
);
1026 /* on the first debug entry, identify cache type */
1027 if (xscale
->armv4_5_mmu
.armv4_5_cache
.ctype
== -1)
1029 uint32_t cache_type_reg
;
1031 /* read cp15 cache type register */
1032 xscale_get_reg(&xscale
->reg_cache
->reg_list
[XSCALE_CACHETYPE
]);
1033 cache_type_reg
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_CACHETYPE
].value
, 0, 32);
1035 armv4_5_identify_cache(cache_type_reg
, &xscale
->armv4_5_mmu
.armv4_5_cache
);
1038 /* examine MMU and Cache settings */
1039 /* read cp15 control register */
1040 xscale_get_reg(&xscale
->reg_cache
->reg_list
[XSCALE_CTRL
]);
1041 xscale
->cp15_control_reg
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_CTRL
].value
, 0, 32);
1042 xscale
->armv4_5_mmu
.mmu_enabled
= (xscale
->cp15_control_reg
& 0x1U
) ? 1 : 0;
1043 xscale
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
= (xscale
->cp15_control_reg
& 0x4U
) ? 1 : 0;
1044 xscale
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
= (xscale
->cp15_control_reg
& 0x1000U
) ? 1 : 0;
1046 /* tracing enabled, read collected trace data */
1047 if (xscale
->trace
.mode
!= XSCALE_TRACE_DISABLED
)
1049 xscale_read_trace(target
);
1051 /* Resume if entered debug due to buffer fill and we're still collecting
1052 * trace data. Note that a debug exception due to trace buffer full
1053 * can only happen in fill mode. */
1054 if (xscale
->arch_debug_reason
== XSCALE_DBG_REASON_TB_FULL
)
1056 if (--xscale
->trace
.fill_counter
> 0)
1057 xscale_resume(target
, 1, 0x0, 1, 0);
1059 else /* entered debug for other reason; reset counter */
1060 xscale
->trace
.fill_counter
= 0;
1066 static int xscale_halt(struct target
*target
)
1068 struct xscale_common
*xscale
= target_to_xscale(target
);
1070 LOG_DEBUG("target->state: %s",
1071 target_state_name(target
));
1073 if (target
->state
== TARGET_HALTED
)
1075 LOG_DEBUG("target was already halted");
1078 else if (target
->state
== TARGET_UNKNOWN
)
1080 /* this must not happen for a xscale target */
1081 LOG_ERROR("target was in unknown state when halt was requested");
1082 return ERROR_TARGET_INVALID
;
1084 else if (target
->state
== TARGET_RESET
)
1086 LOG_DEBUG("target->state == TARGET_RESET");
1090 /* assert external dbg break */
1091 xscale
->external_debug_break
= 1;
1092 xscale_read_dcsr(target
);
1094 target
->debug_reason
= DBG_REASON_DBGRQ
;
1100 static int xscale_enable_single_step(struct target
*target
, uint32_t next_pc
)
1102 struct xscale_common
*xscale
= target_to_xscale(target
);
1103 struct reg
*ibcr0
= &xscale
->reg_cache
->reg_list
[XSCALE_IBCR0
];
1106 if (xscale
->ibcr0_used
)
1108 struct breakpoint
*ibcr0_bp
= breakpoint_find(target
, buf_get_u32(ibcr0
->value
, 0, 32) & 0xfffffffe);
1112 xscale_unset_breakpoint(target
, ibcr0_bp
);
1116 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1121 if ((retval
= xscale_set_reg_u32(ibcr0
, next_pc
| 0x1)) != ERROR_OK
)
1127 static int xscale_disable_single_step(struct target
*target
)
1129 struct xscale_common
*xscale
= target_to_xscale(target
);
1130 struct reg
*ibcr0
= &xscale
->reg_cache
->reg_list
[XSCALE_IBCR0
];
1133 if ((retval
= xscale_set_reg_u32(ibcr0
, 0x0)) != ERROR_OK
)
1139 static void xscale_enable_watchpoints(struct target
*target
)
1141 struct watchpoint
*watchpoint
= target
->watchpoints
;
1145 if (watchpoint
->set
== 0)
1146 xscale_set_watchpoint(target
, watchpoint
);
1147 watchpoint
= watchpoint
->next
;
1151 static void xscale_enable_breakpoints(struct target
*target
)
1153 struct breakpoint
*breakpoint
= target
->breakpoints
;
1155 /* set any pending breakpoints */
1158 if (breakpoint
->set
== 0)
1159 xscale_set_breakpoint(target
, breakpoint
);
1160 breakpoint
= breakpoint
->next
;
1164 static void xscale_free_trace_data(struct xscale_common
*xscale
)
1166 struct xscale_trace_data
*td
= xscale
->trace
.data
;
1169 struct xscale_trace_data
*next_td
= td
->next
;
1175 xscale
->trace
.data
= NULL
;
1178 static int xscale_resume(struct target
*target
, int current
,
1179 uint32_t address
, int handle_breakpoints
, int debug_execution
)
1181 struct xscale_common
*xscale
= target_to_xscale(target
);
1182 struct arm
*armv4_5
= &xscale
->armv4_5_common
;
1183 struct breakpoint
*breakpoint
= target
->breakpoints
;
1184 uint32_t current_pc
;
1190 if (target
->state
!= TARGET_HALTED
)
1192 LOG_WARNING("target not halted");
1193 return ERROR_TARGET_NOT_HALTED
;
1196 if (!debug_execution
)
1198 target_free_all_working_areas(target
);
1201 /* update vector tables */
1202 if ((retval
= xscale_update_vectors(target
)) != ERROR_OK
)
1205 /* current = 1: continue on current pc, otherwise continue at <address> */
1207 buf_set_u32(armv4_5
->pc
->value
, 0, 32, address
);
1209 current_pc
= buf_get_u32(armv4_5
->pc
->value
, 0, 32);
1211 /* if we're at the reset vector, we have to simulate the branch */
1212 if (current_pc
== 0x0)
1214 arm_simulate_step(target
, NULL
);
1215 current_pc
= buf_get_u32(armv4_5
->pc
->value
, 0, 32);
1218 /* the front-end may request us not to handle breakpoints */
1219 if (handle_breakpoints
)
1221 breakpoint
= breakpoint_find(target
,
1222 buf_get_u32(armv4_5
->pc
->value
, 0, 32));
1223 if (breakpoint
!= NULL
)
1226 enum trace_mode saved_trace_mode
;
1228 /* there's a breakpoint at the current PC, we have to step over it */
1229 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32
"", breakpoint
->address
);
1230 xscale_unset_breakpoint(target
, breakpoint
);
1232 /* calculate PC of next instruction */
1233 if ((retval
= arm_simulate_step(target
, &next_pc
)) != ERROR_OK
)
1235 uint32_t current_opcode
;
1236 target_read_u32(target
, current_pc
, ¤t_opcode
);
1237 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32
"", current_opcode
);
1240 LOG_DEBUG("enable single-step");
1241 xscale_enable_single_step(target
, next_pc
);
1243 /* restore banked registers */
1244 retval
= xscale_restore_banked(target
);
1246 /* send resume request */
1247 xscale_send_u32(target
, 0x30);
1250 xscale_send_u32(target
,
1251 buf_get_u32(armv4_5
->cpsr
->value
, 0, 32));
1252 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32
,
1253 buf_get_u32(armv4_5
->cpsr
->value
, 0, 32));
1255 for (i
= 7; i
>= 0; i
--)
1258 xscale_send_u32(target
, buf_get_u32(armv4_5
->core_cache
->reg_list
[i
].value
, 0, 32));
1259 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32
"", i
, buf_get_u32(armv4_5
->core_cache
->reg_list
[i
].value
, 0, 32));
1263 xscale_send_u32(target
,
1264 buf_get_u32(armv4_5
->pc
->value
, 0, 32));
1265 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32
,
1266 buf_get_u32(armv4_5
->pc
->value
, 0, 32));
1268 /* disable trace data collection in xscale_debug_entry() */
1269 saved_trace_mode
= xscale
->trace
.mode
;
1270 xscale
->trace
.mode
= XSCALE_TRACE_DISABLED
;
1272 /* wait for and process debug entry */
1273 xscale_debug_entry(target
);
1275 /* re-enable trace buffer, if enabled previously */
1276 xscale
->trace
.mode
= saved_trace_mode
;
1278 LOG_DEBUG("disable single-step");
1279 xscale_disable_single_step(target
);
1281 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32
"", breakpoint
->address
);
1282 xscale_set_breakpoint(target
, breakpoint
);
1286 /* enable any pending breakpoints and watchpoints */
1287 xscale_enable_breakpoints(target
);
1288 xscale_enable_watchpoints(target
);
1290 /* restore banked registers */
1291 retval
= xscale_restore_banked(target
);
1293 /* send resume request (command 0x30 or 0x31)
1294 * clean the trace buffer if it is to be enabled (0x62) */
1295 if (xscale
->trace
.mode
!= XSCALE_TRACE_DISABLED
)
1297 if (xscale
->trace
.mode
== XSCALE_TRACE_FILL
)
1299 /* If trace enabled in fill mode and starting collection of new set
1300 * of buffers, initialize buffer counter and free previous buffers */
1301 if (xscale
->trace
.fill_counter
== 0)
1303 xscale
->trace
.fill_counter
= xscale
->trace
.buffer_fill
;
1304 xscale_free_trace_data(xscale
);
1307 else /* wrap mode; free previous buffer */
1308 xscale_free_trace_data(xscale
);
1310 xscale_send_u32(target
, 0x62);
1311 xscale_send_u32(target
, 0x31);
1314 xscale_send_u32(target
, 0x30);
1317 xscale_send_u32(target
, buf_get_u32(armv4_5
->cpsr
->value
, 0, 32));
1318 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32
,
1319 buf_get_u32(armv4_5
->cpsr
->value
, 0, 32));
1321 for (i
= 7; i
>= 0; i
--)
1324 xscale_send_u32(target
, buf_get_u32(armv4_5
->core_cache
->reg_list
[i
].value
, 0, 32));
1325 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32
"", i
, buf_get_u32(armv4_5
->core_cache
->reg_list
[i
].value
, 0, 32));
1329 xscale_send_u32(target
, buf_get_u32(armv4_5
->pc
->value
, 0, 32));
1330 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32
,
1331 buf_get_u32(armv4_5
->pc
->value
, 0, 32));
1333 target
->debug_reason
= DBG_REASON_NOTHALTED
;
1335 if (!debug_execution
)
1337 /* registers are now invalid */
1338 register_cache_invalidate(armv4_5
->core_cache
);
1339 target
->state
= TARGET_RUNNING
;
1340 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1344 target
->state
= TARGET_DEBUG_RUNNING
;
1345 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
1348 LOG_DEBUG("target resumed");
1353 static int xscale_step_inner(struct target
*target
, int current
,
1354 uint32_t address
, int handle_breakpoints
)
1356 struct xscale_common
*xscale
= target_to_xscale(target
);
1357 struct arm
*armv4_5
= &xscale
->armv4_5_common
;
1362 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1364 /* calculate PC of next instruction */
1365 if ((retval
= arm_simulate_step(target
, &next_pc
)) != ERROR_OK
)
1367 uint32_t current_opcode
, current_pc
;
1368 current_pc
= buf_get_u32(armv4_5
->pc
->value
, 0, 32);
1370 target_read_u32(target
, current_pc
, ¤t_opcode
);
1371 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32
"", current_opcode
);
1375 LOG_DEBUG("enable single-step");
1376 if ((retval
= xscale_enable_single_step(target
, next_pc
)) != ERROR_OK
)
1379 /* restore banked registers */
1380 if ((retval
= xscale_restore_banked(target
)) != ERROR_OK
)
1383 /* send resume request (command 0x30 or 0x31)
1384 * clean the trace buffer if it is to be enabled (0x62) */
1385 if (xscale
->trace
.mode
!= XSCALE_TRACE_DISABLED
)
1387 if ((retval
= xscale_send_u32(target
, 0x62)) != ERROR_OK
)
1389 if ((retval
= xscale_send_u32(target
, 0x31)) != ERROR_OK
)
1393 if ((retval
= xscale_send_u32(target
, 0x30)) != ERROR_OK
)
1397 retval
= xscale_send_u32(target
,
1398 buf_get_u32(armv4_5
->cpsr
->value
, 0, 32));
1399 if (retval
!= ERROR_OK
)
1401 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32
,
1402 buf_get_u32(armv4_5
->cpsr
->value
, 0, 32));
1404 for (i
= 7; i
>= 0; i
--)
1407 if ((retval
= xscale_send_u32(target
, buf_get_u32(armv4_5
->core_cache
->reg_list
[i
].value
, 0, 32))) != ERROR_OK
)
1409 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32
"", i
, buf_get_u32(armv4_5
->core_cache
->reg_list
[i
].value
, 0, 32));
1413 retval
= xscale_send_u32(target
,
1414 buf_get_u32(armv4_5
->pc
->value
, 0, 32));
1415 if (retval
!= ERROR_OK
)
1417 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32
,
1418 buf_get_u32(armv4_5
->pc
->value
, 0, 32));
1420 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1422 /* registers are now invalid */
1423 register_cache_invalidate(armv4_5
->core_cache
);
1425 /* wait for and process debug entry */
1426 if ((retval
= xscale_debug_entry(target
)) != ERROR_OK
)
1429 LOG_DEBUG("disable single-step");
1430 if ((retval
= xscale_disable_single_step(target
)) != ERROR_OK
)
1433 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
1438 static int xscale_step(struct target
*target
, int current
,
1439 uint32_t address
, int handle_breakpoints
)
1441 struct arm
*armv4_5
= target_to_arm(target
);
1442 struct breakpoint
*breakpoint
= NULL
;
1444 uint32_t current_pc
;
1447 if (target
->state
!= TARGET_HALTED
)
1449 LOG_WARNING("target not halted");
1450 return ERROR_TARGET_NOT_HALTED
;
1453 /* current = 1: continue on current pc, otherwise continue at <address> */
1455 buf_set_u32(armv4_5
->pc
->value
, 0, 32, address
);
1457 current_pc
= buf_get_u32(armv4_5
->pc
->value
, 0, 32);
1459 /* if we're at the reset vector, we have to simulate the step */
1460 if (current_pc
== 0x0)
1462 if ((retval
= arm_simulate_step(target
, NULL
)) != ERROR_OK
)
1464 current_pc
= buf_get_u32(armv4_5
->pc
->value
, 0, 32);
1466 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1467 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
1472 /* the front-end may request us not to handle breakpoints */
1473 if (handle_breakpoints
)
1474 breakpoint
= breakpoint_find(target
,
1475 buf_get_u32(armv4_5
->pc
->value
, 0, 32));
1476 if (breakpoint
!= NULL
) {
1477 retval
= xscale_unset_breakpoint(target
, breakpoint
);
1478 if (retval
!= ERROR_OK
)
1482 retval
= xscale_step_inner(target
, current
, address
, handle_breakpoints
);
1486 xscale_set_breakpoint(target
, breakpoint
);
1489 LOG_DEBUG("target stepped");
1495 static int xscale_assert_reset(struct target
*target
)
1497 struct xscale_common
*xscale
= target_to_xscale(target
);
1499 LOG_DEBUG("target->state: %s",
1500 target_state_name(target
));
1502 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1503 * end up in T-L-R, which would reset JTAG
1505 xscale_jtag_set_instr(target
->tap
,
1506 XSCALE_SELDCSR
<< xscale
->xscale_variant
,
1509 /* set Hold reset, Halt mode and Trap Reset */
1510 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 30, 1, 0x1);
1511 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 16, 1, 0x1);
1512 xscale_write_dcsr(target
, 1, 0);
1514 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1515 xscale_jtag_set_instr(target
->tap
, ~0, TAP_IDLE
);
1516 jtag_execute_queue();
1519 jtag_add_reset(0, 1);
1521 /* sleep 1ms, to be sure we fulfill any requirements */
1522 jtag_add_sleep(1000);
1523 jtag_execute_queue();
1525 target
->state
= TARGET_RESET
;
1527 if (target
->reset_halt
)
1530 if ((retval
= target_halt(target
)) != ERROR_OK
)
1537 static int xscale_deassert_reset(struct target
*target
)
1539 struct xscale_common
*xscale
= target_to_xscale(target
);
1540 struct breakpoint
*breakpoint
= target
->breakpoints
;
1544 xscale
->ibcr_available
= 2;
1545 xscale
->ibcr0_used
= 0;
1546 xscale
->ibcr1_used
= 0;
1548 xscale
->dbr_available
= 2;
1549 xscale
->dbr0_used
= 0;
1550 xscale
->dbr1_used
= 0;
1552 /* mark all hardware breakpoints as unset */
1555 if (breakpoint
->type
== BKPT_HARD
)
1557 breakpoint
->set
= 0;
1559 breakpoint
= breakpoint
->next
;
1562 xscale
->trace
.mode
= XSCALE_TRACE_DISABLED
;
1563 xscale_free_trace_data(xscale
);
1565 register_cache_invalidate(xscale
->armv4_5_common
.core_cache
);
1567 /* FIXME mark hardware watchpoints got unset too. Also,
1568 * at least some of the XScale registers are invalid...
1572 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1573 * contents got invalidated. Safer to force that, so writing new
1574 * contents can't ever fail..
1579 const uint8_t *buffer
= xscale_debug_handler
;
1583 jtag_add_reset(0, 0);
1585 /* wait 300ms; 150 and 100ms were not enough */
1586 jtag_add_sleep(300*1000);
1588 jtag_add_runtest(2030, TAP_IDLE
);
1589 jtag_execute_queue();
1591 /* set Hold reset, Halt mode and Trap Reset */
1592 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 30, 1, 0x1);
1593 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 16, 1, 0x1);
1594 xscale_write_dcsr(target
, 1, 0);
1596 /* Load the debug handler into the mini-icache. Since
1597 * it's using halt mode (not monitor mode), it runs in
1598 * "Special Debug State" for access to registers, memory,
1599 * coprocessors, trace data, etc.
1601 address
= xscale
->handler_address
;
1602 for (unsigned binary_size
= sizeof xscale_debug_handler
- 1;
1604 binary_size
-= buf_cnt
, buffer
+= buf_cnt
)
1606 uint32_t cache_line
[8];
1609 buf_cnt
= binary_size
;
1613 for (i
= 0; i
< buf_cnt
; i
+= 4)
1615 /* convert LE buffer to host-endian uint32_t */
1616 cache_line
[i
/ 4] = le_to_h_u32(&buffer
[i
]);
1619 for (; i
< 32; i
+= 4)
1621 cache_line
[i
/ 4] = 0xe1a08008;
1624 /* only load addresses other than the reset vectors */
1625 if ((address
% 0x400) != 0x0)
1627 retval
= xscale_load_ic(target
, address
,
1629 if (retval
!= ERROR_OK
)
1636 retval
= xscale_load_ic(target
, 0x0,
1637 xscale
->low_vectors
);
1638 if (retval
!= ERROR_OK
)
1640 retval
= xscale_load_ic(target
, 0xffff0000,
1641 xscale
->high_vectors
);
1642 if (retval
!= ERROR_OK
)
1645 jtag_add_runtest(30, TAP_IDLE
);
1647 jtag_add_sleep(100000);
1649 /* set Hold reset, Halt mode and Trap Reset */
1650 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 30, 1, 0x1);
1651 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 16, 1, 0x1);
1652 xscale_write_dcsr(target
, 1, 0);
1654 /* clear Hold reset to let the target run (should enter debug handler) */
1655 xscale_write_dcsr(target
, 0, 1);
1656 target
->state
= TARGET_RUNNING
;
1658 if (!target
->reset_halt
)
1660 jtag_add_sleep(10000);
1662 /* we should have entered debug now */
1663 xscale_debug_entry(target
);
1664 target
->state
= TARGET_HALTED
;
1666 /* resume the target */
1667 xscale_resume(target
, 1, 0x0, 1, 0);
1674 static int xscale_read_core_reg(struct target
*target
, struct reg
*r
,
1675 int num
, enum arm_mode mode
)
1677 /** \todo add debug handler support for core register reads */
1678 LOG_ERROR("not implemented");
1682 static int xscale_write_core_reg(struct target
*target
, struct reg
*r
,
1683 int num
, enum arm_mode mode
, uint32_t value
)
1685 /** \todo add debug handler support for core register writes */
1686 LOG_ERROR("not implemented");
1690 static int xscale_full_context(struct target
*target
)
1692 struct arm
*armv4_5
= target_to_arm(target
);
1700 if (target
->state
!= TARGET_HALTED
)
1702 LOG_WARNING("target not halted");
1703 return ERROR_TARGET_NOT_HALTED
;
1706 buffer
= malloc(4 * 8);
1708 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1709 * we can't enter User mode on an XScale (unpredictable),
1710 * but User shares registers with SYS
1712 for (i
= 1; i
< 7; i
++)
1714 enum arm_mode mode
= armv4_5_number_to_mode(i
);
1718 if (mode
== ARM_MODE_USR
)
1721 /* check if there are invalid registers in the current mode
1723 for (j
= 0; valid
&& j
<= 16; j
++)
1725 if (!ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
,
1732 /* request banked registers */
1733 xscale_send_u32(target
, 0x0);
1735 /* send CPSR for desired bank mode */
1736 xscale_send_u32(target
, mode
| 0xc0 /* I/F bits */);
1738 /* get banked registers: r8 to r14; and SPSR
1739 * except in USR/SYS mode
1741 if (mode
!= ARM_MODE_SYS
) {
1743 r
= &ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
,
1746 xscale_receive(target
, buffer
, 8);
1748 buf_set_u32(r
->value
, 0, 32, buffer
[7]);
1752 xscale_receive(target
, buffer
, 7);
1755 /* move data from buffer to register cache */
1756 for (j
= 8; j
<= 14; j
++)
1758 r
= &ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
,
1761 buf_set_u32(r
->value
, 0, 32, buffer
[j
- 8]);
1772 static int xscale_restore_banked(struct target
*target
)
1774 struct arm
*armv4_5
= target_to_arm(target
);
1778 if (target
->state
!= TARGET_HALTED
)
1780 LOG_WARNING("target not halted");
1781 return ERROR_TARGET_NOT_HALTED
;
1784 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1785 * and check if any banked registers need to be written. Ignore
1786 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1787 * an XScale (unpredictable), but they share all registers.
1789 for (i
= 1; i
< 7; i
++)
1791 enum arm_mode mode
= armv4_5_number_to_mode(i
);
1794 if (mode
== ARM_MODE_USR
)
1797 /* check if there are dirty registers in this mode */
1798 for (j
= 8; j
<= 14; j
++)
1800 if (ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
,
1805 /* if not USR/SYS, check if the SPSR needs to be written */
1806 if (mode
!= ARM_MODE_SYS
)
1808 if (ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
,
1813 /* there's nothing to flush for this mode */
1817 /* command 0x1: "send banked registers" */
1818 xscale_send_u32(target
, 0x1);
1820 /* send CPSR for desired mode */
1821 xscale_send_u32(target
, mode
| 0xc0 /* I/F bits */);
1823 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1824 * but this protocol doesn't understand that nuance.
1826 for (j
= 8; j
<= 14; j
++) {
1827 r
= &ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
,
1829 xscale_send_u32(target
, buf_get_u32(r
->value
, 0, 32));
1833 /* send spsr if not in USR/SYS mode */
1834 if (mode
!= ARM_MODE_SYS
) {
1835 r
= &ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
,
1837 xscale_send_u32(target
, buf_get_u32(r
->value
, 0, 32));
1845 static int xscale_read_memory(struct target
*target
, uint32_t address
,
1846 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1848 struct xscale_common
*xscale
= target_to_xscale(target
);
1853 LOG_DEBUG("address: 0x%8.8" PRIx32
", size: 0x%8.8" PRIx32
", count: 0x%8.8" PRIx32
, address
, size
, count
);
1855 if (target
->state
!= TARGET_HALTED
)
1857 LOG_WARNING("target not halted");
1858 return ERROR_TARGET_NOT_HALTED
;
1861 /* sanitize arguments */
1862 if (((size
!= 4) && (size
!= 2) && (size
!= 1)) || (count
== 0) || !(buffer
))
1863 return ERROR_INVALID_ARGUMENTS
;
1865 if (((size
== 4) && (address
& 0x3u
)) || ((size
== 2) && (address
& 0x1u
)))
1866 return ERROR_TARGET_UNALIGNED_ACCESS
;
1868 /* send memory read request (command 0x1n, n: access size) */
1869 if ((retval
= xscale_send_u32(target
, 0x10 | size
)) != ERROR_OK
)
1872 /* send base address for read request */
1873 if ((retval
= xscale_send_u32(target
, address
)) != ERROR_OK
)
1876 /* send number of requested data words */
1877 if ((retval
= xscale_send_u32(target
, count
)) != ERROR_OK
)
1880 /* receive data from target (count times 32-bit words in host endianness) */
1881 buf32
= malloc(4 * count
);
1882 if ((retval
= xscale_receive(target
, buf32
, count
)) != ERROR_OK
)
1885 /* extract data from host-endian buffer into byte stream */
1886 for (i
= 0; i
< count
; i
++)
1891 target_buffer_set_u32(target
, buffer
, buf32
[i
]);
1895 target_buffer_set_u16(target
, buffer
, buf32
[i
] & 0xffff);
1899 *buffer
++ = buf32
[i
] & 0xff;
1902 LOG_ERROR("invalid read size");
1903 return ERROR_INVALID_ARGUMENTS
;
1909 /* examine DCSR, to see if Sticky Abort (SA) got set */
1910 if ((retval
= xscale_read_dcsr(target
)) != ERROR_OK
)
1912 if (buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 5, 1) == 1)
1915 if ((retval
= xscale_send_u32(target
, 0x60)) != ERROR_OK
)
1918 return ERROR_TARGET_DATA_ABORT
;
1924 static int xscale_read_phys_memory(struct target
*target
, uint32_t address
,
1925 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1927 struct xscale_common
*xscale
= target_to_xscale(target
);
1929 /* with MMU inactive, there are only physical addresses */
1930 if (!xscale
->armv4_5_mmu
.mmu_enabled
)
1931 return xscale_read_memory(target
, address
, size
, count
, buffer
);
1933 /** \todo: provide a non-stub implementation of this routine. */
1934 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1935 target_name(target
), __func__
);
1939 static int xscale_write_memory(struct target
*target
, uint32_t address
,
1940 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1942 struct xscale_common
*xscale
= target_to_xscale(target
);
1945 LOG_DEBUG("address: 0x%8.8" PRIx32
", size: 0x%8.8" PRIx32
", count: 0x%8.8" PRIx32
, address
, size
, count
);
1947 if (target
->state
!= TARGET_HALTED
)
1949 LOG_WARNING("target not halted");
1950 return ERROR_TARGET_NOT_HALTED
;
1953 /* sanitize arguments */
1954 if (((size
!= 4) && (size
!= 2) && (size
!= 1)) || (count
== 0) || !(buffer
))
1955 return ERROR_INVALID_ARGUMENTS
;
1957 if (((size
== 4) && (address
& 0x3u
)) || ((size
== 2) && (address
& 0x1u
)))
1958 return ERROR_TARGET_UNALIGNED_ACCESS
;
1960 /* send memory write request (command 0x2n, n: access size) */
1961 if ((retval
= xscale_send_u32(target
, 0x20 | size
)) != ERROR_OK
)
1964 /* send base address for read request */
1965 if ((retval
= xscale_send_u32(target
, address
)) != ERROR_OK
)
1968 /* send number of requested data words to be written*/
1969 if ((retval
= xscale_send_u32(target
, count
)) != ERROR_OK
)
1972 /* extract data from host-endian buffer into byte stream */
1974 for (i
= 0; i
< count
; i
++)
1979 value
= target_buffer_get_u32(target
, buffer
);
1980 xscale_send_u32(target
, value
);
1984 value
= target_buffer_get_u16(target
, buffer
);
1985 xscale_send_u32(target
, value
);
1990 xscale_send_u32(target
, value
);
1994 LOG_ERROR("should never get here");
1999 if ((retval
= xscale_send(target
, buffer
, count
, size
)) != ERROR_OK
)
2002 /* examine DCSR, to see if Sticky Abort (SA) got set */
2003 if ((retval
= xscale_read_dcsr(target
)) != ERROR_OK
)
2005 if (buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 5, 1) == 1)
2008 if ((retval
= xscale_send_u32(target
, 0x60)) != ERROR_OK
)
2011 LOG_ERROR("data abort writing memory");
2012 return ERROR_TARGET_DATA_ABORT
;
2018 static int xscale_write_phys_memory(struct target
*target
, uint32_t address
,
2019 uint32_t size
, uint32_t count
, uint8_t *buffer
)
2021 struct xscale_common
*xscale
= target_to_xscale(target
);
2023 /* with MMU inactive, there are only physical addresses */
2024 if (!xscale
->armv4_5_mmu
.mmu_enabled
)
2025 return xscale_read_memory(target
, address
, size
, count
, buffer
);
2027 /** \todo: provide a non-stub implementation of this routine. */
2028 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
2029 target_name(target
), __func__
);
2033 static int xscale_bulk_write_memory(struct target
*target
, uint32_t address
,
2034 uint32_t count
, uint8_t *buffer
)
2036 return xscale_write_memory(target
, address
, 4, count
, buffer
);
2039 static int xscale_get_ttb(struct target
*target
, uint32_t *result
)
2041 struct xscale_common
*xscale
= target_to_xscale(target
);
2045 retval
= xscale_get_reg(&xscale
->reg_cache
->reg_list
[XSCALE_TTB
]);
2046 if (retval
!= ERROR_OK
)
2048 ttb
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_TTB
].value
, 0, 32);
2055 static int xscale_disable_mmu_caches(struct target
*target
, int mmu
,
2056 int d_u_cache
, int i_cache
)
2058 struct xscale_common
*xscale
= target_to_xscale(target
);
2059 uint32_t cp15_control
;
2062 /* read cp15 control register */
2063 retval
= xscale_get_reg(&xscale
->reg_cache
->reg_list
[XSCALE_CTRL
]);
2064 if (retval
!=ERROR_OK
)
2066 cp15_control
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_CTRL
].value
, 0, 32);
2069 cp15_control
&= ~0x1U
;
2074 retval
= xscale_send_u32(target
, 0x50);
2075 if (retval
!=ERROR_OK
)
2077 retval
= xscale_send_u32(target
, xscale
->cache_clean_address
);
2078 if (retval
!=ERROR_OK
)
2081 /* invalidate DCache */
2082 retval
= xscale_send_u32(target
, 0x51);
2083 if (retval
!=ERROR_OK
)
2086 cp15_control
&= ~0x4U
;
2091 /* invalidate ICache */
2092 retval
= xscale_send_u32(target
, 0x52);
2093 if (retval
!=ERROR_OK
)
2095 cp15_control
&= ~0x1000U
;
2098 /* write new cp15 control register */
2099 retval
= xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_CTRL
], cp15_control
);
2100 if (retval
!=ERROR_OK
)
2103 /* execute cpwait to ensure outstanding operations complete */
2104 retval
= xscale_send_u32(target
, 0x53);
2108 static int xscale_enable_mmu_caches(struct target
*target
, int mmu
,
2109 int d_u_cache
, int i_cache
)
2111 struct xscale_common
*xscale
= target_to_xscale(target
);
2112 uint32_t cp15_control
;
2115 /* read cp15 control register */
2116 retval
= xscale_get_reg(&xscale
->reg_cache
->reg_list
[XSCALE_CTRL
]);
2117 if (retval
!=ERROR_OK
)
2119 cp15_control
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_CTRL
].value
, 0, 32);
2122 cp15_control
|= 0x1U
;
2125 cp15_control
|= 0x4U
;
2128 cp15_control
|= 0x1000U
;
2130 /* write new cp15 control register */
2131 retval
= xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_CTRL
], cp15_control
);
2132 if (retval
!=ERROR_OK
)
2135 /* execute cpwait to ensure outstanding operations complete */
2136 retval
= xscale_send_u32(target
, 0x53);
2140 static int xscale_set_breakpoint(struct target
*target
,
2141 struct breakpoint
*breakpoint
)
2144 struct xscale_common
*xscale
= target_to_xscale(target
);
2146 if (target
->state
!= TARGET_HALTED
)
2148 LOG_WARNING("target not halted");
2149 return ERROR_TARGET_NOT_HALTED
;
2152 if (breakpoint
->set
)
2154 LOG_WARNING("breakpoint already set");
2158 if (breakpoint
->type
== BKPT_HARD
)
2160 uint32_t value
= breakpoint
->address
| 1;
2161 if (!xscale
->ibcr0_used
)
2163 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_IBCR0
], value
);
2164 xscale
->ibcr0_used
= 1;
2165 breakpoint
->set
= 1; /* breakpoint set on first breakpoint register */
2167 else if (!xscale
->ibcr1_used
)
2169 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_IBCR1
], value
);
2170 xscale
->ibcr1_used
= 1;
2171 breakpoint
->set
= 2; /* breakpoint set on second breakpoint register */
2174 { /* bug: availability previously verified in xscale_add_breakpoint() */
2175 LOG_ERROR("BUG: no hardware comparator available");
2176 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2179 else if (breakpoint
->type
== BKPT_SOFT
)
2181 if (breakpoint
->length
== 4)
2183 /* keep the original instruction in target endianness */
2184 if ((retval
= target_read_memory(target
, breakpoint
->address
, 4, 1, breakpoint
->orig_instr
)) != ERROR_OK
)
2188 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2189 if ((retval
= target_write_u32(target
, breakpoint
->address
, xscale
->arm_bkpt
)) != ERROR_OK
)
2196 /* keep the original instruction in target endianness */
2197 if ((retval
= target_read_memory(target
, breakpoint
->address
, 2, 1, breakpoint
->orig_instr
)) != ERROR_OK
)
2201 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2202 if ((retval
= target_write_u16(target
, breakpoint
->address
, xscale
->thumb_bkpt
)) != ERROR_OK
)
2207 breakpoint
->set
= 1;
2209 xscale_send_u32(target
, 0x50); /* clean dcache */
2210 xscale_send_u32(target
, xscale
->cache_clean_address
);
2211 xscale_send_u32(target
, 0x51); /* invalidate dcache */
2212 xscale_send_u32(target
, 0x52); /* invalidate icache and flush fetch buffers */
2218 static int xscale_add_breakpoint(struct target
*target
,
2219 struct breakpoint
*breakpoint
)
2221 struct xscale_common
*xscale
= target_to_xscale(target
);
2223 if ((breakpoint
->type
== BKPT_HARD
) && (xscale
->ibcr_available
< 1))
2225 LOG_ERROR("no breakpoint unit available for hardware breakpoint");
2226 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2229 if ((breakpoint
->length
!= 2) && (breakpoint
->length
!= 4))
2231 LOG_ERROR("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2232 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2235 if (breakpoint
->type
== BKPT_HARD
)
2237 xscale
->ibcr_available
--;
2240 return xscale_set_breakpoint(target
, breakpoint
);
2243 static int xscale_unset_breakpoint(struct target
*target
,
2244 struct breakpoint
*breakpoint
)
2247 struct xscale_common
*xscale
= target_to_xscale(target
);
2249 if (target
->state
!= TARGET_HALTED
)
2251 LOG_WARNING("target not halted");
2252 return ERROR_TARGET_NOT_HALTED
;
2255 if (!breakpoint
->set
)
2257 LOG_WARNING("breakpoint not set");
2261 if (breakpoint
->type
== BKPT_HARD
)
2263 if (breakpoint
->set
== 1)
2265 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_IBCR0
], 0x0);
2266 xscale
->ibcr0_used
= 0;
2268 else if (breakpoint
->set
== 2)
2270 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_IBCR1
], 0x0);
2271 xscale
->ibcr1_used
= 0;
2273 breakpoint
->set
= 0;
2277 /* restore original instruction (kept in target endianness) */
2278 if (breakpoint
->length
== 4)
2280 if ((retval
= target_write_memory(target
, breakpoint
->address
, 4, 1, breakpoint
->orig_instr
)) != ERROR_OK
)
2287 if ((retval
= target_write_memory(target
, breakpoint
->address
, 2, 1, breakpoint
->orig_instr
)) != ERROR_OK
)
2292 breakpoint
->set
= 0;
2294 xscale_send_u32(target
, 0x50); /* clean dcache */
2295 xscale_send_u32(target
, xscale
->cache_clean_address
);
2296 xscale_send_u32(target
, 0x51); /* invalidate dcache */
2297 xscale_send_u32(target
, 0x52); /* invalidate icache and flush fetch buffers */
2303 static int xscale_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
2305 struct xscale_common
*xscale
= target_to_xscale(target
);
2307 if (target
->state
!= TARGET_HALTED
)
2309 LOG_ERROR("target not halted");
2310 return ERROR_TARGET_NOT_HALTED
;
2313 if (breakpoint
->set
)
2315 xscale_unset_breakpoint(target
, breakpoint
);
2318 if (breakpoint
->type
== BKPT_HARD
)
2319 xscale
->ibcr_available
++;
2324 static int xscale_set_watchpoint(struct target
*target
,
2325 struct watchpoint
*watchpoint
)
2327 struct xscale_common
*xscale
= target_to_xscale(target
);
2328 uint32_t enable
= 0;
2329 struct reg
*dbcon
= &xscale
->reg_cache
->reg_list
[XSCALE_DBCON
];
2330 uint32_t dbcon_value
= buf_get_u32(dbcon
->value
, 0, 32);
2332 if (target
->state
!= TARGET_HALTED
)
2334 LOG_ERROR("target not halted");
2335 return ERROR_TARGET_NOT_HALTED
;
2338 switch (watchpoint
->rw
)
2350 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2353 /* For watchpoint across more than one word, both DBR registers must
2354 be enlisted, with the second used as a mask. */
2355 if (watchpoint
->length
> 4)
2357 if (xscale
->dbr0_used
|| xscale
->dbr1_used
)
2359 LOG_ERROR("BUG: sufficient hardware comparators unavailable");
2360 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2363 /* Write mask value to DBR1, based on the length argument.
2364 * Address bits ignored by the comparator are those set in mask. */
2365 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_DBR1
],
2366 watchpoint
->length
- 1);
2367 xscale
->dbr1_used
= 1;
2368 enable
|= 0x100; /* DBCON[M] */
2371 if (!xscale
->dbr0_used
)
2373 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_DBR0
], watchpoint
->address
);
2374 dbcon_value
|= enable
;
2375 xscale_set_reg_u32(dbcon
, dbcon_value
);
2376 watchpoint
->set
= 1;
2377 xscale
->dbr0_used
= 1;
2379 else if (!xscale
->dbr1_used
)
2381 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_DBR1
], watchpoint
->address
);
2382 dbcon_value
|= enable
<< 2;
2383 xscale_set_reg_u32(dbcon
, dbcon_value
);
2384 watchpoint
->set
= 2;
2385 xscale
->dbr1_used
= 1;
2389 LOG_ERROR("BUG: no hardware comparator available");
2390 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2396 static int xscale_add_watchpoint(struct target
*target
,
2397 struct watchpoint
*watchpoint
)
2399 struct xscale_common
*xscale
= target_to_xscale(target
);
2401 if (xscale
->dbr_available
< 1)
2403 LOG_ERROR("no more watchpoint registers available");
2404 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2407 if (watchpoint
->value
)
2408 LOG_WARNING("xscale does not support value, mask arguments; ignoring");
2410 /* check that length is a power of two */
2411 for (uint32_t len
= watchpoint
->length
; len
!= 1; len
/= 2)
2415 LOG_ERROR("xscale requires that watchpoint length is a power of two");
2416 return ERROR_COMMAND_ARGUMENT_INVALID
;
2420 if (watchpoint
->length
== 4) /* single word watchpoint */
2422 xscale
->dbr_available
--; /* one DBR reg used */
2426 /* watchpoints across multiple words require both DBR registers */
2427 if (xscale
->dbr_available
< 2)
2429 LOG_ERROR("insufficient watchpoint registers available");
2430 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2433 if (watchpoint
->length
> watchpoint
->address
)
2435 LOG_ERROR("xscale does not support watchpoints with length "
2436 "greater than address");
2437 return ERROR_COMMAND_ARGUMENT_INVALID
;
2440 xscale
->dbr_available
= 0;
2444 static int xscale_unset_watchpoint(struct target
*target
,
2445 struct watchpoint
*watchpoint
)
2447 struct xscale_common
*xscale
= target_to_xscale(target
);
2448 struct reg
*dbcon
= &xscale
->reg_cache
->reg_list
[XSCALE_DBCON
];
2449 uint32_t dbcon_value
= buf_get_u32(dbcon
->value
, 0, 32);
2451 if (target
->state
!= TARGET_HALTED
)
2453 LOG_WARNING("target not halted");
2454 return ERROR_TARGET_NOT_HALTED
;
2457 if (!watchpoint
->set
)
2459 LOG_WARNING("breakpoint not set");
2463 if (watchpoint
->set
== 1)
2465 if (watchpoint
->length
> 4)
2467 dbcon_value
&= ~0x103; /* clear DBCON[M] as well */
2468 xscale
->dbr1_used
= 0; /* DBR1 was used for mask */
2471 dbcon_value
&= ~0x3;
2473 xscale_set_reg_u32(dbcon
, dbcon_value
);
2474 xscale
->dbr0_used
= 0;
2476 else if (watchpoint
->set
== 2)
2478 dbcon_value
&= ~0xc;
2479 xscale_set_reg_u32(dbcon
, dbcon_value
);
2480 xscale
->dbr1_used
= 0;
2482 watchpoint
->set
= 0;
2487 static int xscale_remove_watchpoint(struct target
*target
, struct watchpoint
*watchpoint
)
2489 struct xscale_common
*xscale
= target_to_xscale(target
);
2491 if (target
->state
!= TARGET_HALTED
)
2493 LOG_ERROR("target not halted");
2494 return ERROR_TARGET_NOT_HALTED
;
2497 if (watchpoint
->set
)
2499 xscale_unset_watchpoint(target
, watchpoint
);
2502 if (watchpoint
->length
> 4)
2503 xscale
->dbr_available
++; /* both DBR regs now available */
2505 xscale
->dbr_available
++;
2510 static int xscale_get_reg(struct reg
*reg
)
2512 struct xscale_reg
*arch_info
= reg
->arch_info
;
2513 struct target
*target
= arch_info
->target
;
2514 struct xscale_common
*xscale
= target_to_xscale(target
);
2516 /* DCSR, TX and RX are accessible via JTAG */
2517 if (strcmp(reg
->name
, "XSCALE_DCSR") == 0)
2519 return xscale_read_dcsr(arch_info
->target
);
2521 else if (strcmp(reg
->name
, "XSCALE_TX") == 0)
2523 /* 1 = consume register content */
2524 return xscale_read_tx(arch_info
->target
, 1);
2526 else if (strcmp(reg
->name
, "XSCALE_RX") == 0)
2528 /* can't read from RX register (host -> debug handler) */
2531 else if (strcmp(reg
->name
, "XSCALE_TXRXCTRL") == 0)
2533 /* can't (explicitly) read from TXRXCTRL register */
2536 else /* Other DBG registers have to be transfered by the debug handler */
2538 /* send CP read request (command 0x40) */
2539 xscale_send_u32(target
, 0x40);
2541 /* send CP register number */
2542 xscale_send_u32(target
, arch_info
->dbg_handler_number
);
2544 /* read register value */
2545 xscale_read_tx(target
, 1);
2546 buf_cpy(xscale
->reg_cache
->reg_list
[XSCALE_TX
].value
, reg
->value
, 32);
2555 static int xscale_set_reg(struct reg
*reg
, uint8_t* buf
)
2557 struct xscale_reg
*arch_info
= reg
->arch_info
;
2558 struct target
*target
= arch_info
->target
;
2559 struct xscale_common
*xscale
= target_to_xscale(target
);
2560 uint32_t value
= buf_get_u32(buf
, 0, 32);
2562 /* DCSR, TX and RX are accessible via JTAG */
2563 if (strcmp(reg
->name
, "XSCALE_DCSR") == 0)
2565 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 0, 32, value
);
2566 return xscale_write_dcsr(arch_info
->target
, -1, -1);
2568 else if (strcmp(reg
->name
, "XSCALE_RX") == 0)
2570 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_RX
].value
, 0, 32, value
);
2571 return xscale_write_rx(arch_info
->target
);
2573 else if (strcmp(reg
->name
, "XSCALE_TX") == 0)
2575 /* can't write to TX register (debug-handler -> host) */
2578 else if (strcmp(reg
->name
, "XSCALE_TXRXCTRL") == 0)
2580 /* can't (explicitly) write to TXRXCTRL register */
2583 else /* Other DBG registers have to be transfered by the debug handler */
2585 /* send CP write request (command 0x41) */
2586 xscale_send_u32(target
, 0x41);
2588 /* send CP register number */
2589 xscale_send_u32(target
, arch_info
->dbg_handler_number
);
2591 /* send CP register value */
2592 xscale_send_u32(target
, value
);
2593 buf_set_u32(reg
->value
, 0, 32, value
);
2599 static int xscale_write_dcsr_sw(struct target
*target
, uint32_t value
)
2601 struct xscale_common
*xscale
= target_to_xscale(target
);
2602 struct reg
*dcsr
= &xscale
->reg_cache
->reg_list
[XSCALE_DCSR
];
2603 struct xscale_reg
*dcsr_arch_info
= dcsr
->arch_info
;
2605 /* send CP write request (command 0x41) */
2606 xscale_send_u32(target
, 0x41);
2608 /* send CP register number */
2609 xscale_send_u32(target
, dcsr_arch_info
->dbg_handler_number
);
2611 /* send CP register value */
2612 xscale_send_u32(target
, value
);
2613 buf_set_u32(dcsr
->value
, 0, 32, value
);
2618 static int xscale_read_trace(struct target
*target
)
2620 struct xscale_common
*xscale
= target_to_xscale(target
);
2621 struct arm
*armv4_5
= &xscale
->armv4_5_common
;
2622 struct xscale_trace_data
**trace_data_p
;
2624 /* 258 words from debug handler
2625 * 256 trace buffer entries
2626 * 2 checkpoint addresses
2628 uint32_t trace_buffer
[258];
2629 int is_address
[256];
2631 unsigned int num_checkpoints
= 0;
2633 if (target
->state
!= TARGET_HALTED
)
2635 LOG_WARNING("target must be stopped to read trace data");
2636 return ERROR_TARGET_NOT_HALTED
;
2639 /* send read trace buffer command (command 0x61) */
2640 xscale_send_u32(target
, 0x61);
2642 /* receive trace buffer content */
2643 xscale_receive(target
, trace_buffer
, 258);
2645 /* parse buffer backwards to identify address entries */
2646 for (i
= 255; i
>= 0; i
--)
2648 /* also count number of checkpointed entries */
2649 if ((trace_buffer
[i
] & 0xe0) == 0xc0)
2653 if (((trace_buffer
[i
] & 0xf0) == 0x90) ||
2654 ((trace_buffer
[i
] & 0xf0) == 0xd0))
2657 is_address
[--i
] = 1;
2659 is_address
[--i
] = 1;
2661 is_address
[--i
] = 1;
2663 is_address
[--i
] = 1;
2668 /* search first non-zero entry that is not part of an address */
2669 for (j
= 0; (j
< 256) && (trace_buffer
[j
] == 0) && (!is_address
[j
]); j
++)
2674 LOG_DEBUG("no trace data collected");
2675 return ERROR_XSCALE_NO_TRACE_DATA
;
2678 /* account for possible partial address at buffer start (wrap mode only) */
2680 { /* first entry is address; complete set of 4? */
2683 if (!is_address
[i
++])
2686 j
+= i
; /* partial address; can't use it */
2689 /* if first valid entry is indirect branch, can't use that either (no address) */
2690 if (((trace_buffer
[j
] & 0xf0) == 0x90) || ((trace_buffer
[j
] & 0xf0) == 0xd0))
2693 /* walk linked list to terminating entry */
2694 for (trace_data_p
= &xscale
->trace
.data
; *trace_data_p
; trace_data_p
= &(*trace_data_p
)->next
)
2697 *trace_data_p
= malloc(sizeof(struct xscale_trace_data
));
2698 (*trace_data_p
)->next
= NULL
;
2699 (*trace_data_p
)->chkpt0
= trace_buffer
[256];
2700 (*trace_data_p
)->chkpt1
= trace_buffer
[257];
2701 (*trace_data_p
)->last_instruction
=
2702 buf_get_u32(armv4_5
->pc
->value
, 0, 32);
2703 (*trace_data_p
)->entries
= malloc(sizeof(struct xscale_trace_entry
) * (256 - j
));
2704 (*trace_data_p
)->depth
= 256 - j
;
2705 (*trace_data_p
)->num_checkpoints
= num_checkpoints
;
2707 for (i
= j
; i
< 256; i
++)
2709 (*trace_data_p
)->entries
[i
- j
].data
= trace_buffer
[i
];
2711 (*trace_data_p
)->entries
[i
- j
].type
= XSCALE_TRACE_ADDRESS
;
2713 (*trace_data_p
)->entries
[i
- j
].type
= XSCALE_TRACE_MESSAGE
;
2719 static int xscale_read_instruction(struct target
*target
, uint32_t pc
,
2720 struct arm_instruction
*instruction
)
2722 struct xscale_common
*const xscale
= target_to_xscale(target
);
2729 if (!xscale
->trace
.image
)
2730 return ERROR_TRACE_IMAGE_UNAVAILABLE
;
2732 /* search for the section the current instruction belongs to */
2733 for (i
= 0; i
< xscale
->trace
.image
->num_sections
; i
++)
2735 if ((xscale
->trace
.image
->sections
[i
].base_address
<= pc
) &&
2736 (xscale
->trace
.image
->sections
[i
].base_address
+ xscale
->trace
.image
->sections
[i
].size
> pc
))
2745 /* current instruction couldn't be found in the image */
2746 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE
;
2749 if (xscale
->trace
.core_state
== ARM_STATE_ARM
)
2752 if ((retval
= image_read_section(xscale
->trace
.image
, section
,
2753 pc
- xscale
->trace
.image
->sections
[section
].base_address
,
2754 4, buf
, &size_read
)) != ERROR_OK
)
2756 LOG_ERROR("error while reading instruction");
2757 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE
;
2759 opcode
= target_buffer_get_u32(target
, buf
);
2760 arm_evaluate_opcode(opcode
, pc
, instruction
);
2762 else if (xscale
->trace
.core_state
== ARM_STATE_THUMB
)
2765 if ((retval
= image_read_section(xscale
->trace
.image
, section
,
2766 pc
- xscale
->trace
.image
->sections
[section
].base_address
,
2767 2, buf
, &size_read
)) != ERROR_OK
)
2769 LOG_ERROR("error while reading instruction");
2770 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE
;
2772 opcode
= target_buffer_get_u16(target
, buf
);
2773 thumb_evaluate_opcode(opcode
, pc
, instruction
);
2777 LOG_ERROR("BUG: unknown core state encountered");
2784 /* Extract address encoded into trace data.
2785 * Write result to address referenced by argument 'target', or 0 if incomplete. */
2786 static inline void xscale_branch_address(struct xscale_trace_data
*trace_data
,
2787 int i
, uint32_t *target
)
2789 /* if there are less than four entries prior to the indirect branch message
2790 * we can't extract the address */
2794 *target
= (trace_data
->entries
[i
-1].data
) | (trace_data
->entries
[i
-2].data
<< 8) |
2795 (trace_data
->entries
[i
-3].data
<< 16) | (trace_data
->entries
[i
-4].data
<< 24);
2798 static inline void xscale_display_instruction(struct target
*target
, uint32_t pc
,
2799 struct arm_instruction
*instruction
,
2800 struct command_context
*cmd_ctx
)
2802 int retval
= xscale_read_instruction(target
, pc
, instruction
);
2803 if (retval
== ERROR_OK
)
2804 command_print(cmd_ctx
, "%s", instruction
->text
);
2806 command_print(cmd_ctx
, "0x%8.8" PRIx32
"\t<not found in image>", pc
);
2809 static int xscale_analyze_trace(struct target
*target
, struct command_context
*cmd_ctx
)
2811 struct xscale_common
*xscale
= target_to_xscale(target
);
2812 struct xscale_trace_data
*trace_data
= xscale
->trace
.data
;
2814 uint32_t breakpoint_pc
;
2815 struct arm_instruction instruction
;
2816 uint32_t current_pc
= 0; /* initialized when address determined */
2818 if (!xscale
->trace
.image
)
2819 LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
2821 /* loop for each trace buffer that was loaded from target */
2824 int chkpt
= 0; /* incremented as checkpointed entries found */
2827 /* FIXME: set this to correct mode when trace buffer is first enabled */
2828 xscale
->trace
.core_state
= ARM_STATE_ARM
;
2830 /* loop for each entry in this trace buffer */
2831 for (i
= 0; i
< trace_data
->depth
; i
++)
2834 uint32_t chkpt_reg
= 0x0;
2835 uint32_t branch_target
= 0;
2838 /* trace entry type is upper nybble of 'message byte' */
2839 int trace_msg_type
= (trace_data
->entries
[i
].data
& 0xf0) >> 4;
2841 /* Target addresses of indirect branches are written into buffer
2842 * before the message byte representing the branch. Skip past it */
2843 if (trace_data
->entries
[i
].type
== XSCALE_TRACE_ADDRESS
)
2846 switch (trace_msg_type
)
2848 case 0: /* Exceptions */
2856 exception
= (trace_data
->entries
[i
].data
& 0x70) >> 4;
2858 /* FIXME: vector table may be at ffff0000 */
2859 branch_target
= (trace_data
->entries
[i
].data
& 0xf0) >> 2;
2862 case 8: /* Direct Branch */
2865 case 9: /* Indirect Branch */
2866 xscale_branch_address(trace_data
, i
, &branch_target
);
2869 case 13: /* Checkpointed Indirect Branch */
2870 xscale_branch_address(trace_data
, i
, &branch_target
);
2871 if ((trace_data
->num_checkpoints
== 2) && (chkpt
== 0))
2872 chkpt_reg
= trace_data
->chkpt1
; /* 2 chkpts, this is oldest */
2874 chkpt_reg
= trace_data
->chkpt0
; /* 1 chkpt, or 2 and newest */
2879 case 12: /* Checkpointed Direct Branch */
2880 if ((trace_data
->num_checkpoints
== 2) && (chkpt
== 0))
2881 chkpt_reg
= trace_data
->chkpt1
; /* 2 chkpts, this is oldest */
2883 chkpt_reg
= trace_data
->chkpt0
; /* 1 chkpt, or 2 and newest */
2885 /* if no current_pc, checkpoint will be starting point */
2886 if (current_pc
== 0)
2887 branch_target
= chkpt_reg
;
2892 case 15: /* Roll-over */
2895 default: /* Reserved */
2896 LOG_WARNING("trace is suspect: invalid trace message byte");
2901 /* If we don't have the current_pc yet, but we did get the branch target
2902 * (either from the trace buffer on indirect branch, or from a checkpoint reg),
2903 * then we can start displaying instructions at the next iteration, with
2904 * branch_target as the starting point.
2906 if (current_pc
== 0)
2908 current_pc
= branch_target
; /* remains 0 unless branch_target obtained */
2912 /* We have current_pc. Read and display the instructions from the image.
2913 * First, display count instructions (lower nybble of message byte). */
2914 count
= trace_data
->entries
[i
].data
& 0x0f;
2915 for (j
= 0; j
< count
; j
++)
2917 xscale_display_instruction(target
, current_pc
, &instruction
, cmd_ctx
);
2918 current_pc
+= xscale
->trace
.core_state
== ARM_STATE_ARM
? 4 : 2;
2921 /* An additional instruction is implicitly added to count for
2922 * rollover and some exceptions: undef, swi, prefetch abort. */
2923 if ((trace_msg_type
== 15) || (exception
> 0 && exception
< 4))
2925 xscale_display_instruction(target
, current_pc
, &instruction
, cmd_ctx
);
2926 current_pc
+= xscale
->trace
.core_state
== ARM_STATE_ARM
? 4 : 2;
2929 if (trace_msg_type
== 15) /* rollover */
2934 command_print(cmd_ctx
, "--- exception %i ---", exception
);
2938 /* not exception or rollover; next instruction is a branch and is
2939 * not included in the count */
2940 xscale_display_instruction(target
, current_pc
, &instruction
, cmd_ctx
);
2942 /* for direct branches, extract branch destination from instruction */
2943 if ((trace_msg_type
== 8) || (trace_msg_type
== 12))
2945 retval
= xscale_read_instruction(target
, current_pc
, &instruction
);
2946 if (retval
== ERROR_OK
)
2947 current_pc
= instruction
.info
.b_bl_bx_blx
.target_address
;
2949 current_pc
= 0; /* branch destination unknown */
2951 /* direct branch w/ checkpoint; can also get from checkpoint reg */
2952 if (trace_msg_type
== 12)
2954 if (current_pc
== 0)
2955 current_pc
= chkpt_reg
;
2956 else if (current_pc
!= chkpt_reg
) /* sanity check */
2957 LOG_WARNING("trace is suspect: checkpoint register "
2958 "inconsistent with adddress from image");
2961 if (current_pc
== 0)
2962 command_print(cmd_ctx
, "address unknown");
2967 /* indirect branch; the branch destination was read from trace buffer */
2968 if ((trace_msg_type
== 9) || (trace_msg_type
== 13))
2970 current_pc
= branch_target
;
2972 /* sanity check (checkpoint reg is redundant) */
2973 if ((trace_msg_type
== 13) && (chkpt_reg
!= branch_target
))
2974 LOG_WARNING("trace is suspect: checkpoint register "
2975 "inconsistent with address from trace buffer");
2978 } /* END: for (i = 0; i < trace_data->depth; i++) */
2980 breakpoint_pc
= trace_data
->last_instruction
; /* used below */
2981 trace_data
= trace_data
->next
;
2983 } /* END: while (trace_data) */
2985 /* Finally... display all instructions up to the value of the pc when the
2986 * debug break occurred (saved when trace data was collected from target).
2987 * This is necessary because the trace only records execution branches and 16
2988 * consecutive instructions (rollovers), so last few typically missed.
2990 if (current_pc
== 0)
2991 return ERROR_OK
; /* current_pc was never found */
2993 /* how many instructions remaining? */
2994 int gap_count
= (breakpoint_pc
- current_pc
) /
2995 (xscale
->trace
.core_state
== ARM_STATE_ARM
? 4 : 2);
2997 /* should never be negative or over 16, but verify */
2998 if (gap_count
< 0 || gap_count
> 16)
3000 LOG_WARNING("trace is suspect: excessive gap at end of trace");
3001 return ERROR_OK
; /* bail; large number or negative value no good */
3004 /* display remaining instructions */
3005 for (i
= 0; i
< gap_count
; i
++)
3007 xscale_display_instruction(target
, current_pc
, &instruction
, cmd_ctx
);
3008 current_pc
+= xscale
->trace
.core_state
== ARM_STATE_ARM
? 4 : 2;
3014 static const struct reg_arch_type xscale_reg_type
= {
3015 .get
= xscale_get_reg
,
3016 .set
= xscale_set_reg
,
3019 static void xscale_build_reg_cache(struct target
*target
)
3021 struct xscale_common
*xscale
= target_to_xscale(target
);
3022 struct arm
*armv4_5
= &xscale
->armv4_5_common
;
3023 struct reg_cache
**cache_p
= register_get_last_cache_p(&target
->reg_cache
);
3024 struct xscale_reg
*arch_info
= malloc(sizeof(xscale_reg_arch_info
));
3026 int num_regs
= ARRAY_SIZE(xscale_reg_arch_info
);
3028 (*cache_p
) = arm_build_reg_cache(target
, armv4_5
);
3030 (*cache_p
)->next
= malloc(sizeof(struct reg_cache
));
3031 cache_p
= &(*cache_p
)->next
;
3033 /* fill in values for the xscale reg cache */
3034 (*cache_p
)->name
= "XScale registers";
3035 (*cache_p
)->next
= NULL
;
3036 (*cache_p
)->reg_list
= malloc(num_regs
* sizeof(struct reg
));
3037 (*cache_p
)->num_regs
= num_regs
;
3039 for (i
= 0; i
< num_regs
; i
++)
3041 (*cache_p
)->reg_list
[i
].name
= xscale_reg_list
[i
];
3042 (*cache_p
)->reg_list
[i
].value
= calloc(4, 1);
3043 (*cache_p
)->reg_list
[i
].dirty
= 0;
3044 (*cache_p
)->reg_list
[i
].valid
= 0;
3045 (*cache_p
)->reg_list
[i
].size
= 32;
3046 (*cache_p
)->reg_list
[i
].arch_info
= &arch_info
[i
];
3047 (*cache_p
)->reg_list
[i
].type
= &xscale_reg_type
;
3048 arch_info
[i
] = xscale_reg_arch_info
[i
];
3049 arch_info
[i
].target
= target
;
3052 xscale
->reg_cache
= (*cache_p
);
3055 static int xscale_init_target(struct command_context
*cmd_ctx
,
3056 struct target
*target
)
3058 xscale_build_reg_cache(target
);
3062 static int xscale_init_arch_info(struct target
*target
,
3063 struct xscale_common
*xscale
, struct jtag_tap
*tap
, const char *variant
)
3065 struct arm
*armv4_5
;
3066 uint32_t high_reset_branch
, low_reset_branch
;
3069 armv4_5
= &xscale
->armv4_5_common
;
3071 /* store architecture specfic data */
3072 xscale
->common_magic
= XSCALE_COMMON_MAGIC
;
3074 /* we don't really *need* a variant param ... */
3078 if (strcmp(variant
, "pxa250") == 0
3079 || strcmp(variant
, "pxa255") == 0
3080 || strcmp(variant
, "pxa26x") == 0)
3082 else if (strcmp(variant
, "pxa27x") == 0
3083 || strcmp(variant
, "ixp42x") == 0
3084 || strcmp(variant
, "ixp45x") == 0
3085 || strcmp(variant
, "ixp46x") == 0)
3087 else if (strcmp(variant
, "pxa3xx") == 0)
3090 LOG_WARNING("%s: unrecognized variant %s",
3091 tap
->dotted_name
, variant
);
3093 if (ir_length
&& ir_length
!= tap
->ir_length
) {
3094 LOG_WARNING("%s: IR length for %s is %d; fixing",
3095 tap
->dotted_name
, variant
, ir_length
);
3096 tap
->ir_length
= ir_length
;
3100 /* PXA3xx shifts the JTAG instructions */
3101 if (tap
->ir_length
== 11)
3102 xscale
->xscale_variant
= XSCALE_PXA3XX
;
3104 xscale
->xscale_variant
= XSCALE_IXP4XX_PXA2XX
;
3106 /* the debug handler isn't installed (and thus not running) at this time */
3107 xscale
->handler_address
= 0xfe000800;
3109 /* clear the vectors we keep locally for reference */
3110 memset(xscale
->low_vectors
, 0, sizeof(xscale
->low_vectors
));
3111 memset(xscale
->high_vectors
, 0, sizeof(xscale
->high_vectors
));
3113 /* no user-specified vectors have been configured yet */
3114 xscale
->static_low_vectors_set
= 0x0;
3115 xscale
->static_high_vectors_set
= 0x0;
3117 /* calculate branches to debug handler */
3118 low_reset_branch
= (xscale
->handler_address
+ 0x20 - 0x0 - 0x8) >> 2;
3119 high_reset_branch
= (xscale
->handler_address
+ 0x20 - 0xffff0000 - 0x8) >> 2;
3121 xscale
->low_vectors
[0] = ARMV4_5_B((low_reset_branch
& 0xffffff), 0);
3122 xscale
->high_vectors
[0] = ARMV4_5_B((high_reset_branch
& 0xffffff), 0);
3124 for (i
= 1; i
<= 7; i
++)
3126 xscale
->low_vectors
[i
] = ARMV4_5_B(0xfffffe, 0);
3127 xscale
->high_vectors
[i
] = ARMV4_5_B(0xfffffe, 0);
3130 /* 64kB aligned region used for DCache cleaning */
3131 xscale
->cache_clean_address
= 0xfffe0000;
3133 xscale
->hold_rst
= 0;
3134 xscale
->external_debug_break
= 0;
3136 xscale
->ibcr_available
= 2;
3137 xscale
->ibcr0_used
= 0;
3138 xscale
->ibcr1_used
= 0;
3140 xscale
->dbr_available
= 2;
3141 xscale
->dbr0_used
= 0;
3142 xscale
->dbr1_used
= 0;
3144 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
3145 target_name(target
));
3147 xscale
->arm_bkpt
= ARMV5_BKPT(0x0);
3148 xscale
->thumb_bkpt
= ARMV5_T_BKPT(0x0) & 0xffff;
3150 xscale
->vector_catch
= 0x1;
3152 xscale
->trace
.data
= NULL
;
3153 xscale
->trace
.image
= NULL
;
3154 xscale
->trace
.mode
= XSCALE_TRACE_DISABLED
;
3155 xscale
->trace
.buffer_fill
= 0;
3156 xscale
->trace
.fill_counter
= 0;
3158 /* prepare ARMv4/5 specific information */
3159 armv4_5
->arch_info
= xscale
;
3160 armv4_5
->read_core_reg
= xscale_read_core_reg
;
3161 armv4_5
->write_core_reg
= xscale_write_core_reg
;
3162 armv4_5
->full_context
= xscale_full_context
;
3164 arm_init_arch_info(target
, armv4_5
);
3166 xscale
->armv4_5_mmu
.armv4_5_cache
.ctype
= -1;
3167 xscale
->armv4_5_mmu
.get_ttb
= xscale_get_ttb
;
3168 xscale
->armv4_5_mmu
.read_memory
= xscale_read_memory
;
3169 xscale
->armv4_5_mmu
.write_memory
= xscale_write_memory
;
3170 xscale
->armv4_5_mmu
.disable_mmu_caches
= xscale_disable_mmu_caches
;
3171 xscale
->armv4_5_mmu
.enable_mmu_caches
= xscale_enable_mmu_caches
;
3172 xscale
->armv4_5_mmu
.has_tiny_pages
= 1;
3173 xscale
->armv4_5_mmu
.mmu_enabled
= 0;
3178 static int xscale_target_create(struct target
*target
, Jim_Interp
*interp
)
3180 struct xscale_common
*xscale
;
3182 if (sizeof xscale_debug_handler
- 1 > 0x800) {
3183 LOG_ERROR("debug_handler.bin: larger than 2kb");
3187 xscale
= calloc(1, sizeof(*xscale
));
3191 return xscale_init_arch_info(target
, xscale
, target
->tap
,
3195 COMMAND_HANDLER(xscale_handle_debug_handler_command
)
3197 struct target
*target
= NULL
;
3198 struct xscale_common
*xscale
;
3200 uint32_t handler_address
;
3204 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3208 if ((target
= get_target(CMD_ARGV
[0])) == NULL
)
3210 LOG_ERROR("target '%s' not defined", CMD_ARGV
[0]);
3214 xscale
= target_to_xscale(target
);
3215 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3216 if (retval
!= ERROR_OK
)
3219 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[1], handler_address
);
3221 if (((handler_address
>= 0x800) && (handler_address
<= 0x1fef800)) ||
3222 ((handler_address
>= 0xfe000800) && (handler_address
<= 0xfffff800)))
3224 xscale
->handler_address
= handler_address
;
3228 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3235 COMMAND_HANDLER(xscale_handle_cache_clean_address_command
)
3237 struct target
*target
= NULL
;
3238 struct xscale_common
*xscale
;
3240 uint32_t cache_clean_address
;
3244 return ERROR_COMMAND_SYNTAX_ERROR
;
3247 target
= get_target(CMD_ARGV
[0]);
3250 LOG_ERROR("target '%s' not defined", CMD_ARGV
[0]);
3253 xscale
= target_to_xscale(target
);
3254 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3255 if (retval
!= ERROR_OK
)
3258 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[1], cache_clean_address
);
3260 if (cache_clean_address
& 0xffff)
3262 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3266 xscale
->cache_clean_address
= cache_clean_address
;
3272 COMMAND_HANDLER(xscale_handle_cache_info_command
)
3274 struct target
*target
= get_current_target(CMD_CTX
);
3275 struct xscale_common
*xscale
= target_to_xscale(target
);
3278 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3279 if (retval
!= ERROR_OK
)
3282 return armv4_5_handle_cache_info_command(CMD_CTX
, &xscale
->armv4_5_mmu
.armv4_5_cache
);
3285 static int xscale_virt2phys(struct target
*target
,
3286 uint32_t virtual, uint32_t *physical
)
3288 struct xscale_common
*xscale
= target_to_xscale(target
);
3291 if (xscale
->common_magic
!= XSCALE_COMMON_MAGIC
) {
3292 LOG_ERROR(xscale_not
);
3293 return ERROR_TARGET_INVALID
;
3297 int retval
= armv4_5_mmu_translate_va(target
, &xscale
->armv4_5_mmu
,
3298 virtual, &cb
, &ret
);
3299 if (retval
!= ERROR_OK
)
3305 static int xscale_mmu(struct target
*target
, int *enabled
)
3307 struct xscale_common
*xscale
= target_to_xscale(target
);
3309 if (target
->state
!= TARGET_HALTED
)
3311 LOG_ERROR("Target not halted");
3312 return ERROR_TARGET_INVALID
;
3314 *enabled
= xscale
->armv4_5_mmu
.mmu_enabled
;
3318 COMMAND_HANDLER(xscale_handle_mmu_command
)
3320 struct target
*target
= get_current_target(CMD_CTX
);
3321 struct xscale_common
*xscale
= target_to_xscale(target
);
3324 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3325 if (retval
!= ERROR_OK
)
3328 if (target
->state
!= TARGET_HALTED
)
3330 command_print(CMD_CTX
, "target must be stopped for \"%s\" command", CMD_NAME
);
3337 COMMAND_PARSE_ENABLE(CMD_ARGV
[0], enable
);
3339 xscale_enable_mmu_caches(target
, 1, 0, 0);
3341 xscale_disable_mmu_caches(target
, 1, 0, 0);
3342 xscale
->armv4_5_mmu
.mmu_enabled
= enable
;
3345 command_print(CMD_CTX
, "mmu %s", (xscale
->armv4_5_mmu
.mmu_enabled
) ? "enabled" : "disabled");
3350 COMMAND_HANDLER(xscale_handle_idcache_command
)
3352 struct target
*target
= get_current_target(CMD_CTX
);
3353 struct xscale_common
*xscale
= target_to_xscale(target
);
3355 int retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3356 if (retval
!= ERROR_OK
)
3359 if (target
->state
!= TARGET_HALTED
)
3361 command_print(CMD_CTX
, "target must be stopped for \"%s\" command", CMD_NAME
);
3365 bool icache
= false;
3366 if (strcmp(CMD_NAME
, "icache") == 0)
3371 COMMAND_PARSE_ENABLE(CMD_ARGV
[0], enable
);
3373 xscale
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
= enable
;
3375 xscale_enable_mmu_caches(target
, 0, 0, 1);
3377 xscale_disable_mmu_caches(target
, 0, 0, 1);
3379 xscale
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
= enable
;
3381 xscale_enable_mmu_caches(target
, 0, 1, 0);
3383 xscale_disable_mmu_caches(target
, 0, 1, 0);
3387 bool enabled
= icache
?
3388 xscale
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
:
3389 xscale
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
;
3390 const char *msg
= enabled
? "enabled" : "disabled";
3391 command_print(CMD_CTX
, "%s %s", CMD_NAME
, msg
);
3396 COMMAND_HANDLER(xscale_handle_vector_catch_command
)
3398 struct target
*target
= get_current_target(CMD_CTX
);
3399 struct xscale_common
*xscale
= target_to_xscale(target
);
3402 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3403 if (retval
!= ERROR_OK
)
3408 command_print(CMD_CTX
, "usage: xscale vector_catch [mask]");
3412 COMMAND_PARSE_NUMBER(u8
, CMD_ARGV
[0], xscale
->vector_catch
);
3413 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 16, 8, xscale
->vector_catch
);
3414 xscale_write_dcsr(target
, -1, -1);
3417 command_print(CMD_CTX
, "vector catch mask: 0x%2.2x", xscale
->vector_catch
);
3423 COMMAND_HANDLER(xscale_handle_vector_table_command
)
3425 struct target
*target
= get_current_target(CMD_CTX
);
3426 struct xscale_common
*xscale
= target_to_xscale(target
);
3430 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3431 if (retval
!= ERROR_OK
)
3434 if (CMD_ARGC
== 0) /* print current settings */
3438 command_print(CMD_CTX
, "active user-set static vectors:");
3439 for (idx
= 1; idx
< 8; idx
++)
3440 if (xscale
->static_low_vectors_set
& (1 << idx
))
3441 command_print(CMD_CTX
, "low %d: 0x%" PRIx32
, idx
, xscale
->static_low_vectors
[idx
]);
3442 for (idx
= 1; idx
< 8; idx
++)
3443 if (xscale
->static_high_vectors_set
& (1 << idx
))
3444 command_print(CMD_CTX
, "high %d: 0x%" PRIx32
, idx
, xscale
->static_high_vectors
[idx
]);
3453 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[1], idx
);
3455 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[2], vec
);
3457 if (idx
< 1 || idx
>= 8)
3460 if (!err
&& strcmp(CMD_ARGV
[0], "low") == 0)
3462 xscale
->static_low_vectors_set
|= (1<<idx
);
3463 xscale
->static_low_vectors
[idx
] = vec
;
3465 else if (!err
&& (strcmp(CMD_ARGV
[0], "high") == 0))
3467 xscale
->static_high_vectors_set
|= (1<<idx
);
3468 xscale
->static_high_vectors
[idx
] = vec
;
3475 command_print(CMD_CTX
, "usage: xscale vector_table <high|low> <index> <code>");
3481 COMMAND_HANDLER(xscale_handle_trace_buffer_command
)
3483 struct target
*target
= get_current_target(CMD_CTX
);
3484 struct xscale_common
*xscale
= target_to_xscale(target
);
3485 uint32_t dcsr_value
;
3488 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3489 if (retval
!= ERROR_OK
)
3492 if (target
->state
!= TARGET_HALTED
)
3494 command_print(CMD_CTX
, "target must be stopped for \"%s\" command", CMD_NAME
);
3500 if (strcmp("enable", CMD_ARGV
[0]) == 0)
3501 xscale
->trace
.mode
= XSCALE_TRACE_WRAP
; /* default */
3502 else if (strcmp("disable", CMD_ARGV
[0]) == 0)
3503 xscale
->trace
.mode
= XSCALE_TRACE_DISABLED
;
3505 return ERROR_INVALID_ARGUMENTS
;
3508 if (CMD_ARGC
>= 2 && xscale
->trace
.mode
!= XSCALE_TRACE_DISABLED
)
3510 if (strcmp("fill", CMD_ARGV
[1]) == 0)
3512 int buffcount
= 1; /* default */
3514 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[2], buffcount
);
3515 if (buffcount
< 1) /* invalid */
3517 command_print(CMD_CTX
, "fill buffer count must be > 0");
3518 xscale
->trace
.mode
= XSCALE_TRACE_DISABLED
;
3519 return ERROR_INVALID_ARGUMENTS
;
3521 xscale
->trace
.buffer_fill
= buffcount
;
3522 xscale
->trace
.mode
= XSCALE_TRACE_FILL
;
3524 else if (strcmp("wrap", CMD_ARGV
[1]) == 0)
3525 xscale
->trace
.mode
= XSCALE_TRACE_WRAP
;
3528 xscale
->trace
.mode
= XSCALE_TRACE_DISABLED
;
3529 return ERROR_INVALID_ARGUMENTS
;
3533 if (xscale
->trace
.mode
!= XSCALE_TRACE_DISABLED
)
3535 char fill_string
[12];
3536 sprintf(fill_string
, "fill %" PRId32
, xscale
->trace
.buffer_fill
);
3537 command_print(CMD_CTX
, "trace buffer enabled (%s)",
3538 (xscale
->trace
.mode
== XSCALE_TRACE_FILL
)
3539 ? fill_string
: "wrap");
3542 command_print(CMD_CTX
, "trace buffer disabled");
3544 dcsr_value
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 0, 32);
3545 if (xscale
->trace
.mode
== XSCALE_TRACE_FILL
)
3546 xscale_write_dcsr_sw(target
, (dcsr_value
& 0xfffffffc) | 2);
3548 xscale_write_dcsr_sw(target
, dcsr_value
& 0xfffffffc);
3553 COMMAND_HANDLER(xscale_handle_trace_image_command
)
3555 struct target
*target
= get_current_target(CMD_CTX
);
3556 struct xscale_common
*xscale
= target_to_xscale(target
);
3561 command_print(CMD_CTX
, "usage: xscale trace_image <file> [base address] [type]");
3565 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3566 if (retval
!= ERROR_OK
)
3569 if (xscale
->trace
.image
)
3571 image_close(xscale
->trace
.image
);
3572 free(xscale
->trace
.image
);
3573 command_print(CMD_CTX
, "previously loaded image found and closed");
3576 xscale
->trace
.image
= malloc(sizeof(struct image
));
3577 xscale
->trace
.image
->base_address_set
= 0;
3578 xscale
->trace
.image
->start_address_set
= 0;
3580 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3583 xscale
->trace
.image
->base_address_set
= 1;
3584 COMMAND_PARSE_NUMBER(llong
, CMD_ARGV
[1], xscale
->trace
.image
->base_address
);
3588 xscale
->trace
.image
->base_address_set
= 0;
3591 if (image_open(xscale
->trace
.image
, CMD_ARGV
[0], (CMD_ARGC
>= 3) ? CMD_ARGV
[2] : NULL
) != ERROR_OK
)
3593 free(xscale
->trace
.image
);
3594 xscale
->trace
.image
= NULL
;
3601 COMMAND_HANDLER(xscale_handle_dump_trace_command
)
3603 struct target
*target
= get_current_target(CMD_CTX
);
3604 struct xscale_common
*xscale
= target_to_xscale(target
);
3605 struct xscale_trace_data
*trace_data
;
3609 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3610 if (retval
!= ERROR_OK
)
3613 if (target
->state
!= TARGET_HALTED
)
3615 command_print(CMD_CTX
, "target must be stopped for \"%s\" command", CMD_NAME
);
3621 command_print(CMD_CTX
, "usage: xscale dump_trace <file>");
3625 trace_data
= xscale
->trace
.data
;
3629 command_print(CMD_CTX
, "no trace data collected");
3633 if (fileio_open(&file
, CMD_ARGV
[0], FILEIO_WRITE
, FILEIO_BINARY
) != ERROR_OK
)
3642 fileio_write_u32(&file
, trace_data
->chkpt0
);
3643 fileio_write_u32(&file
, trace_data
->chkpt1
);
3644 fileio_write_u32(&file
, trace_data
->last_instruction
);
3645 fileio_write_u32(&file
, trace_data
->depth
);
3647 for (i
= 0; i
< trace_data
->depth
; i
++)
3648 fileio_write_u32(&file
, trace_data
->entries
[i
].data
| ((trace_data
->entries
[i
].type
& 0xffff) << 16));
3650 trace_data
= trace_data
->next
;
3653 fileio_close(&file
);
3658 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command
)
3660 struct target
*target
= get_current_target(CMD_CTX
);
3661 struct xscale_common
*xscale
= target_to_xscale(target
);
3664 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3665 if (retval
!= ERROR_OK
)
3668 xscale_analyze_trace(target
, CMD_CTX
);
3673 COMMAND_HANDLER(xscale_handle_cp15
)
3675 struct target
*target
= get_current_target(CMD_CTX
);
3676 struct xscale_common
*xscale
= target_to_xscale(target
);
3679 retval
= xscale_verify_pointer(CMD_CTX
, xscale
);
3680 if (retval
!= ERROR_OK
)
3683 if (target
->state
!= TARGET_HALTED
)
3685 command_print(CMD_CTX
, "target must be stopped for \"%s\" command", CMD_NAME
);
3688 uint32_t reg_no
= 0;
3689 struct reg
*reg
= NULL
;
3692 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[0], reg_no
);
3693 /*translate from xscale cp15 register no to openocd register*/
3697 reg_no
= XSCALE_MAINID
;
3700 reg_no
= XSCALE_CTRL
;
3703 reg_no
= XSCALE_TTB
;
3706 reg_no
= XSCALE_DAC
;
3709 reg_no
= XSCALE_FSR
;
3712 reg_no
= XSCALE_FAR
;
3715 reg_no
= XSCALE_PID
;
3718 reg_no
= XSCALE_CPACCESS
;
3721 command_print(CMD_CTX
, "invalid register number");
3722 return ERROR_INVALID_ARGUMENTS
;
3724 reg
= &xscale
->reg_cache
->reg_list
[reg_no
];
3731 /* read cp15 control register */
3732 xscale_get_reg(reg
);
3733 value
= buf_get_u32(reg
->value
, 0, 32);
3734 command_print(CMD_CTX
, "%s (/%i): 0x%" PRIx32
"", reg
->name
, (int)(reg
->size
), value
);
3736 else if (CMD_ARGC
== 2)
3739 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[1], value
);
3741 /* send CP write request (command 0x41) */
3742 xscale_send_u32(target
, 0x41);
3744 /* send CP register number */
3745 xscale_send_u32(target
, reg_no
);
3747 /* send CP register value */
3748 xscale_send_u32(target
, value
);
3750 /* execute cpwait to ensure outstanding operations complete */
3751 xscale_send_u32(target
, 0x53);
3755 command_print(CMD_CTX
, "usage: cp15 [register]<, [value]>");
3761 static const struct command_registration xscale_exec_command_handlers
[] = {
3763 .name
= "cache_info",
3764 .handler
= xscale_handle_cache_info_command
,
3765 .mode
= COMMAND_EXEC
,
3766 .help
= "display information about CPU caches",
3770 .handler
= xscale_handle_mmu_command
,
3771 .mode
= COMMAND_EXEC
,
3772 .help
= "enable or disable the MMU",
3773 .usage
= "['enable'|'disable']",
3777 .handler
= xscale_handle_idcache_command
,
3778 .mode
= COMMAND_EXEC
,
3779 .help
= "display ICache state, optionally enabling or "
3781 .usage
= "['enable'|'disable']",
3785 .handler
= xscale_handle_idcache_command
,
3786 .mode
= COMMAND_EXEC
,
3787 .help
= "display DCache state, optionally enabling or "
3789 .usage
= "['enable'|'disable']",
3792 .name
= "vector_catch",
3793 .handler
= xscale_handle_vector_catch_command
,
3794 .mode
= COMMAND_EXEC
,
3795 .help
= "set or display 8-bit mask of vectors "
3796 "that should trigger debug entry",
3800 .name
= "vector_table",
3801 .handler
= xscale_handle_vector_table_command
,
3802 .mode
= COMMAND_EXEC
,
3803 .help
= "set vector table entry in mini-ICache, "
3804 "or display current tables",
3805 .usage
= "[('high'|'low') index code]",
3808 .name
= "trace_buffer",
3809 .handler
= xscale_handle_trace_buffer_command
,
3810 .mode
= COMMAND_EXEC
,
3811 .help
= "display trace buffer status, enable or disable "
3812 "tracing, and optionally reconfigure trace mode",
3813 .usage
= "['enable'|'disable' ['fill' [number]|'wrap']]",
3816 .name
= "dump_trace",
3817 .handler
= xscale_handle_dump_trace_command
,
3818 .mode
= COMMAND_EXEC
,
3819 .help
= "dump content of trace buffer to file",
3820 .usage
= "filename",
3823 .name
= "analyze_trace",
3824 .handler
= xscale_handle_analyze_trace_buffer_command
,
3825 .mode
= COMMAND_EXEC
,
3826 .help
= "analyze content of trace buffer",
3830 .name
= "trace_image",
3831 .handler
= xscale_handle_trace_image_command
,
3832 .mode
= COMMAND_EXEC
,
3833 .help
= "load image from file to address (default 0)",
3834 .usage
= "filename [offset [filetype]]",
3838 .handler
= xscale_handle_cp15
,
3839 .mode
= COMMAND_EXEC
,
3840 .help
= "Read or write coprocessor 15 register.",
3841 .usage
= "register [value]",
3843 COMMAND_REGISTRATION_DONE
3845 static const struct command_registration xscale_any_command_handlers
[] = {
3847 .name
= "debug_handler",
3848 .handler
= xscale_handle_debug_handler_command
,
3849 .mode
= COMMAND_ANY
,
3850 .help
= "Change address used for debug handler.",
3851 .usage
= "target address",
3854 .name
= "cache_clean_address",
3855 .handler
= xscale_handle_cache_clean_address_command
,
3856 .mode
= COMMAND_ANY
,
3857 .help
= "Change address used for cleaning data cache.",
3861 .chain
= xscale_exec_command_handlers
,
3863 COMMAND_REGISTRATION_DONE
3865 static const struct command_registration xscale_command_handlers
[] = {
3867 .chain
= arm_command_handlers
,
3871 .mode
= COMMAND_ANY
,
3872 .help
= "xscale command group",
3873 .chain
= xscale_any_command_handlers
,
3875 COMMAND_REGISTRATION_DONE
3878 struct target_type xscale_target
=
3882 .poll
= xscale_poll
,
3883 .arch_state
= xscale_arch_state
,
3885 .target_request_data
= NULL
,
3887 .halt
= xscale_halt
,
3888 .resume
= xscale_resume
,
3889 .step
= xscale_step
,
3891 .assert_reset
= xscale_assert_reset
,
3892 .deassert_reset
= xscale_deassert_reset
,
3893 .soft_reset_halt
= NULL
,
3895 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3896 .get_gdb_reg_list
= arm_get_gdb_reg_list
,
3898 .read_memory
= xscale_read_memory
,
3899 .read_phys_memory
= xscale_read_phys_memory
,
3900 .write_memory
= xscale_write_memory
,
3901 .write_phys_memory
= xscale_write_phys_memory
,
3902 .bulk_write_memory
= xscale_bulk_write_memory
,
3904 .checksum_memory
= arm_checksum_memory
,
3905 .blank_check_memory
= arm_blank_check_memory
,
3907 .run_algorithm
= armv4_5_run_algorithm
,
3909 .add_breakpoint
= xscale_add_breakpoint
,
3910 .remove_breakpoint
= xscale_remove_breakpoint
,
3911 .add_watchpoint
= xscale_add_watchpoint
,
3912 .remove_watchpoint
= xscale_remove_watchpoint
,
3914 .commands
= xscale_command_handlers
,
3915 .target_create
= xscale_target_create
,
3916 .init_target
= xscale_init_target
,
3918 .virt2phys
= xscale_virt2phys
,