target: with pointers, use NULL instead of 0
[openocd.git] / src / target / xtensa / xtensa.c
blob63ffefce708099d1efafd725260bb50f21ffee67
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 /***************************************************************************
4 * Generic Xtensa target API for OpenOCD *
5 * Copyright (C) 2020-2022 Cadence Design Systems, Inc. *
6 * Copyright (C) 2016-2019 Espressif Systems Ltd. *
7 * Derived from esp108.c *
8 * Author: Angus Gratton gus@projectgus.com *
9 ***************************************************************************/
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
15 #include <stdlib.h>
16 #include <helper/time_support.h>
17 #include <helper/align.h>
18 #include <target/register.h>
20 #include "xtensa_chip.h"
21 #include "xtensa.h"
23 /* Swap 4-bit Xtensa opcodes and fields */
24 #define XT_NIBSWAP8(V) \
25 ((((V) & 0x0F) << 4) \
26 | (((V) & 0xF0) >> 4))
28 #define XT_NIBSWAP16(V) \
29 ((((V) & 0x000F) << 12) \
30 | (((V) & 0x00F0) << 4) \
31 | (((V) & 0x0F00) >> 4) \
32 | (((V) & 0xF000) >> 12))
34 #define XT_NIBSWAP24(V) \
35 ((((V) & 0x00000F) << 20) \
36 | (((V) & 0x0000F0) << 12) \
37 | (((V) & 0x000F00) << 4) \
38 | (((V) & 0x00F000) >> 4) \
39 | (((V) & 0x0F0000) >> 12) \
40 | (((V) & 0xF00000) >> 20))
42 /* _XT_INS_FORMAT_*()
43 * Instruction formatting converted from little-endian inputs
44 * and shifted to the MSB-side of DIR for BE systems.
46 #define _XT_INS_FORMAT_RSR(X, OPCODE, SR, T) \
47 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
48 | (((T) & 0x0F) << 16) \
49 | (((SR) & 0xFF) << 8)) << 8 \
50 : (OPCODE) \
51 | (((SR) & 0xFF) << 8) \
52 | (((T) & 0x0F) << 4))
54 #define _XT_INS_FORMAT_RRR(X, OPCODE, ST, R) \
55 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
56 | ((XT_NIBSWAP8((ST) & 0xFF)) << 12) \
57 | (((R) & 0x0F) << 8)) << 8 \
58 : (OPCODE) \
59 | (((ST) & 0xFF) << 4) \
60 | (((R) & 0x0F) << 12))
62 #define _XT_INS_FORMAT_RRRN(X, OPCODE, S, T, IMM4) \
63 (XT_ISBE(X) ? (XT_NIBSWAP16(OPCODE) \
64 | (((T) & 0x0F) << 8) \
65 | (((S) & 0x0F) << 4) \
66 | ((IMM4) & 0x0F)) << 16 \
67 : (OPCODE) \
68 | (((T) & 0x0F) << 4) \
69 | (((S) & 0x0F) << 8) \
70 | (((IMM4) & 0x0F) << 12))
72 #define _XT_INS_FORMAT_RRI8(X, OPCODE, R, S, T, IMM8) \
73 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
74 | (((T) & 0x0F) << 16) \
75 | (((S) & 0x0F) << 12) \
76 | (((R) & 0x0F) << 8) \
77 | ((IMM8) & 0xFF)) << 8 \
78 : (OPCODE) \
79 | (((IMM8) & 0xFF) << 16) \
80 | (((R) & 0x0F) << 12) \
81 | (((S) & 0x0F) << 8) \
82 | (((T) & 0x0F) << 4))
84 #define _XT_INS_FORMAT_RRI4(X, OPCODE, IMM4, R, S, T) \
85 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
86 | (((T) & 0x0F) << 16) \
87 | (((S) & 0x0F) << 12) \
88 | (((R) & 0x0F) << 8)) << 8 \
89 | ((IMM4) & 0x0F) \
90 : (OPCODE) \
91 | (((IMM4) & 0x0F) << 20) \
92 | (((R) & 0x0F) << 12) \
93 | (((S) & 0x0F) << 8) \
94 | (((T) & 0x0F) << 4))
96 /* Xtensa processor instruction opcodes
98 /* "Return From Debug Operation" to Normal */
99 #define XT_INS_RFDO(X) (XT_ISBE(X) ? 0x000e1f << 8 : 0xf1e000)
100 /* "Return From Debug and Dispatch" - allow sw debugging stuff to take over */
101 #define XT_INS_RFDD(X) (XT_ISBE(X) ? 0x010e1f << 8 : 0xf1e010)
103 /* Load to DDR register, increase addr register */
104 #define XT_INS_LDDR32P(X, S) (XT_ISBE(X) ? (0x0E0700 | ((S) << 12)) << 8 : (0x0070E0 | ((S) << 8)))
105 /* Store from DDR register, increase addr register */
106 #define XT_INS_SDDR32P(X, S) (XT_ISBE(X) ? (0x0F0700 | ((S) << 12)) << 8 : (0x0070F0 | ((S) << 8)))
108 /* Load 32-bit Indirect from A(S)+4*IMM8 to A(T) */
109 #define XT_INS_L32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x002002, 0, S, T, IMM8)
110 /* Load 16-bit Unsigned from A(S)+2*IMM8 to A(T) */
111 #define XT_INS_L16UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x001002, 0, S, T, IMM8)
112 /* Load 8-bit Unsigned from A(S)+IMM8 to A(T) */
113 #define XT_INS_L8UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x000002, 0, S, T, IMM8)
115 /* Store 32-bit Indirect to A(S)+4*IMM8 from A(T) */
116 #define XT_INS_S32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x006002, 0, S, T, IMM8)
117 /* Store 16-bit to A(S)+2*IMM8 from A(T) */
118 #define XT_INS_S16I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x005002, 0, S, T, IMM8)
119 /* Store 8-bit to A(S)+IMM8 from A(T) */
120 #define XT_INS_S8I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x004002, 0, S, T, IMM8)
122 /* Cache Instructions */
123 #define XT_INS_IHI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x0070E2, 0, S, 0, IMM8)
124 #define XT_INS_DHWBI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007052, 0, S, 0, IMM8)
125 #define XT_INS_DHWB(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007042, 0, S, 0, IMM8)
126 #define XT_INS_ISYNC(X) (XT_ISBE(X) ? 0x000200 << 8 : 0x002000)
128 /* Control Instructions */
129 #define XT_INS_JX(X, S) (XT_ISBE(X) ? (0x050000 | ((S) << 12)) : (0x0000a0 | ((S) << 8)))
130 #define XT_INS_CALL0(X, IMM18) (XT_ISBE(X) ? (0x500000 | ((IMM18) & 0x3ffff)) : (0x000005 | (((IMM18) & 0x3ffff) << 6)))
132 /* Read Special Register */
133 #define XT_INS_RSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x030000, SR, T)
134 /* Write Special Register */
135 #define XT_INS_WSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x130000, SR, T)
136 /* Swap Special Register */
137 #define XT_INS_XSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x610000, SR, T)
139 /* Rotate Window by (-8..7) */
140 #define XT_INS_ROTW(X, N) (XT_ISBE(X) ? ((0x000804) | (((N) & 15) << 16)) << 8 : ((0x408000) | (((N) & 15) << 4)))
142 /* Read User Register */
143 #define XT_INS_RUR(X, UR, T) _XT_INS_FORMAT_RRR(X, 0xE30000, UR, T)
144 /* Write User Register */
145 #define XT_INS_WUR(X, UR, T) _XT_INS_FORMAT_RSR(X, 0xF30000, UR, T)
147 /* Read Floating-Point Register */
148 #define XT_INS_RFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((FR << 4) | 0x4), T)
149 /* Write Floating-Point Register */
150 #define XT_INS_WFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((T << 4) | 0x5), FR)
152 #define XT_INS_L32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x090000, 0, R, S, T)
153 #define XT_INS_S32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x490000, 0, R, S, T)
154 #define XT_INS_L32E_S32E_MASK(X) (XT_ISBE(X) ? 0xF000FF << 8 : 0xFF000F)
156 #define XT_INS_RFWO(X) (XT_ISBE(X) ? 0x004300 << 8 : 0x003400)
157 #define XT_INS_RFWU(X) (XT_ISBE(X) ? 0x005300 << 8 : 0x003500)
158 #define XT_INS_RFWO_RFWU_MASK(X) (XT_ISBE(X) ? 0xFFFFFF << 8 : 0xFFFFFF)
160 #define XT_WATCHPOINTS_NUM_MAX 2
162 /* Special register number macro for DDR, PS, WB, A3, A4 registers.
163 * These get used a lot so making a shortcut is useful.
165 #define XT_SR_DDR (xtensa_regs[XT_REG_IDX_DDR].reg_num)
166 #define XT_SR_PS (xtensa_regs[XT_REG_IDX_PS].reg_num)
167 #define XT_SR_WB (xtensa_regs[XT_REG_IDX_WINDOWBASE].reg_num)
168 #define XT_REG_A0 (xtensa_regs[XT_REG_IDX_AR0].reg_num)
169 #define XT_REG_A3 (xtensa_regs[XT_REG_IDX_AR3].reg_num)
170 #define XT_REG_A4 (xtensa_regs[XT_REG_IDX_AR4].reg_num)
172 #define XT_PS_REG_NUM (0xe6U)
173 #define XT_EPS_REG_NUM_BASE (0xc0U) /* (EPS2 - 2), for adding DBGLEVEL */
174 #define XT_EPC_REG_NUM_BASE (0xb0U) /* (EPC1 - 1), for adding DBGLEVEL */
175 #define XT_PC_REG_NUM_VIRTUAL (0xffU) /* Marker for computing PC (EPC[DBGLEVEL) */
176 #define XT_PC_DBREG_NUM_BASE (0x20U) /* External (i.e., GDB) access */
177 #define XT_NX_IBREAKC_BASE (0xc0U) /* (IBREAKC0..IBREAKC1) for NX */
179 #define XT_SW_BREAKPOINTS_MAX_NUM 32
180 #define XT_HW_IBREAK_MAX_NUM 2
181 #define XT_HW_DBREAK_MAX_NUM 2
183 struct xtensa_reg_desc xtensa_regs[XT_NUM_REGS] = {
184 XT_MK_REG_DESC("pc", XT_PC_REG_NUM_VIRTUAL, XT_REG_SPECIAL, 0),
185 XT_MK_REG_DESC("ar0", 0x00, XT_REG_GENERAL, 0),
186 XT_MK_REG_DESC("ar1", 0x01, XT_REG_GENERAL, 0),
187 XT_MK_REG_DESC("ar2", 0x02, XT_REG_GENERAL, 0),
188 XT_MK_REG_DESC("ar3", 0x03, XT_REG_GENERAL, 0),
189 XT_MK_REG_DESC("ar4", 0x04, XT_REG_GENERAL, 0),
190 XT_MK_REG_DESC("ar5", 0x05, XT_REG_GENERAL, 0),
191 XT_MK_REG_DESC("ar6", 0x06, XT_REG_GENERAL, 0),
192 XT_MK_REG_DESC("ar7", 0x07, XT_REG_GENERAL, 0),
193 XT_MK_REG_DESC("ar8", 0x08, XT_REG_GENERAL, 0),
194 XT_MK_REG_DESC("ar9", 0x09, XT_REG_GENERAL, 0),
195 XT_MK_REG_DESC("ar10", 0x0A, XT_REG_GENERAL, 0),
196 XT_MK_REG_DESC("ar11", 0x0B, XT_REG_GENERAL, 0),
197 XT_MK_REG_DESC("ar12", 0x0C, XT_REG_GENERAL, 0),
198 XT_MK_REG_DESC("ar13", 0x0D, XT_REG_GENERAL, 0),
199 XT_MK_REG_DESC("ar14", 0x0E, XT_REG_GENERAL, 0),
200 XT_MK_REG_DESC("ar15", 0x0F, XT_REG_GENERAL, 0),
201 XT_MK_REG_DESC("ar16", 0x10, XT_REG_GENERAL, 0),
202 XT_MK_REG_DESC("ar17", 0x11, XT_REG_GENERAL, 0),
203 XT_MK_REG_DESC("ar18", 0x12, XT_REG_GENERAL, 0),
204 XT_MK_REG_DESC("ar19", 0x13, XT_REG_GENERAL, 0),
205 XT_MK_REG_DESC("ar20", 0x14, XT_REG_GENERAL, 0),
206 XT_MK_REG_DESC("ar21", 0x15, XT_REG_GENERAL, 0),
207 XT_MK_REG_DESC("ar22", 0x16, XT_REG_GENERAL, 0),
208 XT_MK_REG_DESC("ar23", 0x17, XT_REG_GENERAL, 0),
209 XT_MK_REG_DESC("ar24", 0x18, XT_REG_GENERAL, 0),
210 XT_MK_REG_DESC("ar25", 0x19, XT_REG_GENERAL, 0),
211 XT_MK_REG_DESC("ar26", 0x1A, XT_REG_GENERAL, 0),
212 XT_MK_REG_DESC("ar27", 0x1B, XT_REG_GENERAL, 0),
213 XT_MK_REG_DESC("ar28", 0x1C, XT_REG_GENERAL, 0),
214 XT_MK_REG_DESC("ar29", 0x1D, XT_REG_GENERAL, 0),
215 XT_MK_REG_DESC("ar30", 0x1E, XT_REG_GENERAL, 0),
216 XT_MK_REG_DESC("ar31", 0x1F, XT_REG_GENERAL, 0),
217 XT_MK_REG_DESC("ar32", 0x20, XT_REG_GENERAL, 0),
218 XT_MK_REG_DESC("ar33", 0x21, XT_REG_GENERAL, 0),
219 XT_MK_REG_DESC("ar34", 0x22, XT_REG_GENERAL, 0),
220 XT_MK_REG_DESC("ar35", 0x23, XT_REG_GENERAL, 0),
221 XT_MK_REG_DESC("ar36", 0x24, XT_REG_GENERAL, 0),
222 XT_MK_REG_DESC("ar37", 0x25, XT_REG_GENERAL, 0),
223 XT_MK_REG_DESC("ar38", 0x26, XT_REG_GENERAL, 0),
224 XT_MK_REG_DESC("ar39", 0x27, XT_REG_GENERAL, 0),
225 XT_MK_REG_DESC("ar40", 0x28, XT_REG_GENERAL, 0),
226 XT_MK_REG_DESC("ar41", 0x29, XT_REG_GENERAL, 0),
227 XT_MK_REG_DESC("ar42", 0x2A, XT_REG_GENERAL, 0),
228 XT_MK_REG_DESC("ar43", 0x2B, XT_REG_GENERAL, 0),
229 XT_MK_REG_DESC("ar44", 0x2C, XT_REG_GENERAL, 0),
230 XT_MK_REG_DESC("ar45", 0x2D, XT_REG_GENERAL, 0),
231 XT_MK_REG_DESC("ar46", 0x2E, XT_REG_GENERAL, 0),
232 XT_MK_REG_DESC("ar47", 0x2F, XT_REG_GENERAL, 0),
233 XT_MK_REG_DESC("ar48", 0x30, XT_REG_GENERAL, 0),
234 XT_MK_REG_DESC("ar49", 0x31, XT_REG_GENERAL, 0),
235 XT_MK_REG_DESC("ar50", 0x32, XT_REG_GENERAL, 0),
236 XT_MK_REG_DESC("ar51", 0x33, XT_REG_GENERAL, 0),
237 XT_MK_REG_DESC("ar52", 0x34, XT_REG_GENERAL, 0),
238 XT_MK_REG_DESC("ar53", 0x35, XT_REG_GENERAL, 0),
239 XT_MK_REG_DESC("ar54", 0x36, XT_REG_GENERAL, 0),
240 XT_MK_REG_DESC("ar55", 0x37, XT_REG_GENERAL, 0),
241 XT_MK_REG_DESC("ar56", 0x38, XT_REG_GENERAL, 0),
242 XT_MK_REG_DESC("ar57", 0x39, XT_REG_GENERAL, 0),
243 XT_MK_REG_DESC("ar58", 0x3A, XT_REG_GENERAL, 0),
244 XT_MK_REG_DESC("ar59", 0x3B, XT_REG_GENERAL, 0),
245 XT_MK_REG_DESC("ar60", 0x3C, XT_REG_GENERAL, 0),
246 XT_MK_REG_DESC("ar61", 0x3D, XT_REG_GENERAL, 0),
247 XT_MK_REG_DESC("ar62", 0x3E, XT_REG_GENERAL, 0),
248 XT_MK_REG_DESC("ar63", 0x3F, XT_REG_GENERAL, 0),
249 XT_MK_REG_DESC("windowbase", 0x48, XT_REG_SPECIAL, 0),
250 XT_MK_REG_DESC("windowstart", 0x49, XT_REG_SPECIAL, 0),
251 XT_MK_REG_DESC("ps", XT_PS_REG_NUM, XT_REG_SPECIAL, 0), /* PS (not mapped through EPS[]) */
252 XT_MK_REG_DESC("ibreakenable", 0x60, XT_REG_SPECIAL, 0),
253 XT_MK_REG_DESC("ddr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD),
254 XT_MK_REG_DESC("ibreaka0", 0x80, XT_REG_SPECIAL, 0),
255 XT_MK_REG_DESC("ibreaka1", 0x81, XT_REG_SPECIAL, 0),
256 XT_MK_REG_DESC("dbreaka0", 0x90, XT_REG_SPECIAL, 0),
257 XT_MK_REG_DESC("dbreaka1", 0x91, XT_REG_SPECIAL, 0),
258 XT_MK_REG_DESC("dbreakc0", 0xA0, XT_REG_SPECIAL, 0),
259 XT_MK_REG_DESC("dbreakc1", 0xA1, XT_REG_SPECIAL, 0),
260 XT_MK_REG_DESC("cpenable", 0xE0, XT_REG_SPECIAL, 0),
261 XT_MK_REG_DESC("exccause", 0xE8, XT_REG_SPECIAL, 0),
262 XT_MK_REG_DESC("debugcause", 0xE9, XT_REG_SPECIAL, 0),
263 XT_MK_REG_DESC("icount", 0xEC, XT_REG_SPECIAL, 0),
264 XT_MK_REG_DESC("icountlevel", 0xED, XT_REG_SPECIAL, 0),
266 /* WARNING: For these registers, regnum points to the
267 * index of the corresponding ARx registers, NOT to
268 * the processor register number! */
269 XT_MK_REG_DESC("a0", XT_REG_IDX_AR0, XT_REG_RELGEN, 0),
270 XT_MK_REG_DESC("a1", XT_REG_IDX_AR1, XT_REG_RELGEN, 0),
271 XT_MK_REG_DESC("a2", XT_REG_IDX_AR2, XT_REG_RELGEN, 0),
272 XT_MK_REG_DESC("a3", XT_REG_IDX_AR3, XT_REG_RELGEN, 0),
273 XT_MK_REG_DESC("a4", XT_REG_IDX_AR4, XT_REG_RELGEN, 0),
274 XT_MK_REG_DESC("a5", XT_REG_IDX_AR5, XT_REG_RELGEN, 0),
275 XT_MK_REG_DESC("a6", XT_REG_IDX_AR6, XT_REG_RELGEN, 0),
276 XT_MK_REG_DESC("a7", XT_REG_IDX_AR7, XT_REG_RELGEN, 0),
277 XT_MK_REG_DESC("a8", XT_REG_IDX_AR8, XT_REG_RELGEN, 0),
278 XT_MK_REG_DESC("a9", XT_REG_IDX_AR9, XT_REG_RELGEN, 0),
279 XT_MK_REG_DESC("a10", XT_REG_IDX_AR10, XT_REG_RELGEN, 0),
280 XT_MK_REG_DESC("a11", XT_REG_IDX_AR11, XT_REG_RELGEN, 0),
281 XT_MK_REG_DESC("a12", XT_REG_IDX_AR12, XT_REG_RELGEN, 0),
282 XT_MK_REG_DESC("a13", XT_REG_IDX_AR13, XT_REG_RELGEN, 0),
283 XT_MK_REG_DESC("a14", XT_REG_IDX_AR14, XT_REG_RELGEN, 0),
284 XT_MK_REG_DESC("a15", XT_REG_IDX_AR15, XT_REG_RELGEN, 0),
288 * Types of memory used at xtensa target
290 enum xtensa_mem_region_type {
291 XTENSA_MEM_REG_IROM = 0x0,
292 XTENSA_MEM_REG_IRAM,
293 XTENSA_MEM_REG_DROM,
294 XTENSA_MEM_REG_DRAM,
295 XTENSA_MEM_REG_SRAM,
296 XTENSA_MEM_REG_SROM,
297 XTENSA_MEM_REGS_NUM
300 /* Register definition as union for list allocation */
301 union xtensa_reg_val_u {
302 xtensa_reg_val_t val;
303 uint8_t buf[4];
306 static const struct xtensa_keyval_info_s xt_qerr[XT_QERR_NUM] = {
307 { .chrval = "E00", .intval = ERROR_FAIL },
308 { .chrval = "E01", .intval = ERROR_FAIL },
309 { .chrval = "E02", .intval = ERROR_COMMAND_ARGUMENT_INVALID },
310 { .chrval = "E03", .intval = ERROR_FAIL },
313 /* Set to true for extra debug logging */
314 static const bool xtensa_extra_debug_log;
317 * Gets a config for the specific mem type
319 static inline const struct xtensa_local_mem_config *xtensa_get_mem_config(
320 struct xtensa *xtensa,
321 enum xtensa_mem_region_type type)
323 switch (type) {
324 case XTENSA_MEM_REG_IROM:
325 return &xtensa->core_config->irom;
326 case XTENSA_MEM_REG_IRAM:
327 return &xtensa->core_config->iram;
328 case XTENSA_MEM_REG_DROM:
329 return &xtensa->core_config->drom;
330 case XTENSA_MEM_REG_DRAM:
331 return &xtensa->core_config->dram;
332 case XTENSA_MEM_REG_SRAM:
333 return &xtensa->core_config->sram;
334 case XTENSA_MEM_REG_SROM:
335 return &xtensa->core_config->srom;
336 default:
337 return NULL;
342 * Extracts an exact xtensa_local_mem_region_config from xtensa_local_mem_config
343 * for a given address
344 * Returns NULL if nothing found
346 static inline const struct xtensa_local_mem_region_config *xtensa_memory_region_find(
347 const struct xtensa_local_mem_config *mem,
348 target_addr_t address)
350 for (unsigned int i = 0; i < mem->count; i++) {
351 const struct xtensa_local_mem_region_config *region = &mem->regions[i];
352 if (address >= region->base && address < (region->base + region->size))
353 return region;
355 return NULL;
359 * Returns a corresponding xtensa_local_mem_region_config from the xtensa target
360 * for a given address
361 * Returns NULL if nothing found
363 static inline const struct xtensa_local_mem_region_config *xtensa_target_memory_region_find(
364 struct xtensa *xtensa,
365 target_addr_t address)
367 const struct xtensa_local_mem_region_config *result;
368 const struct xtensa_local_mem_config *mcgf;
369 for (unsigned int mtype = 0; mtype < XTENSA_MEM_REGS_NUM; mtype++) {
370 mcgf = xtensa_get_mem_config(xtensa, mtype);
371 result = xtensa_memory_region_find(mcgf, address);
372 if (result)
373 return result;
375 return NULL;
378 static inline bool xtensa_is_cacheable(const struct xtensa_cache_config *cache,
379 const struct xtensa_local_mem_config *mem,
380 target_addr_t address)
382 if (!cache->size)
383 return false;
384 return xtensa_memory_region_find(mem, address);
387 static inline bool xtensa_is_icacheable(struct xtensa *xtensa, target_addr_t address)
389 return xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->iram, address) ||
390 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->irom, address) ||
391 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->sram, address) ||
392 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->srom, address);
395 static inline bool xtensa_is_dcacheable(struct xtensa *xtensa, target_addr_t address)
397 return xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->dram, address) ||
398 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->drom, address) ||
399 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->sram, address) ||
400 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->srom, address);
403 static int xtensa_core_reg_get(struct reg *reg)
405 /* We don't need this because we read all registers on halt anyway. */
406 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
407 struct target *target = xtensa->target;
409 if (target->state != TARGET_HALTED)
410 return ERROR_TARGET_NOT_HALTED;
411 if (!reg->exist) {
412 if (strncmp(reg->name, "?0x", 3) == 0) {
413 unsigned int regnum = strtoul(reg->name + 1, NULL, 0);
414 LOG_WARNING("Read unknown register 0x%04x ignored", regnum);
415 return ERROR_OK;
417 return ERROR_COMMAND_ARGUMENT_INVALID;
419 return ERROR_OK;
422 static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
424 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
425 struct target *target = xtensa->target;
427 assert(reg->size <= 64 && "up to 64-bit regs are supported only!");
428 if (target->state != TARGET_HALTED)
429 return ERROR_TARGET_NOT_HALTED;
431 if (!reg->exist) {
432 if (strncmp(reg->name, "?0x", 3) == 0) {
433 unsigned int regnum = strtoul(reg->name + 1, NULL, 0);
434 LOG_WARNING("Write unknown register 0x%04x ignored", regnum);
435 return ERROR_OK;
437 return ERROR_COMMAND_ARGUMENT_INVALID;
440 buf_cpy(buf, reg->value, reg->size);
442 if (xtensa->core_config->windowed) {
443 /* If the user updates a potential scratch register, track for conflicts */
444 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
445 if (strcmp(reg->name, xtensa->scratch_ars[s].chrval) == 0) {
446 LOG_DEBUG("Scratch reg %s [0x%08" PRIx32 "] set from gdb", reg->name,
447 buf_get_u32(reg->value, 0, 32));
448 LOG_DEBUG("scratch_ars mapping: a3/%s, a4/%s",
449 xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval,
450 xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval);
451 xtensa->scratch_ars[s].intval = true;
452 break;
456 reg->dirty = true;
457 reg->valid = true;
459 return ERROR_OK;
462 static const struct reg_arch_type xtensa_reg_type = {
463 .get = xtensa_core_reg_get,
464 .set = xtensa_core_reg_set,
467 /* Convert a register index that's indexed relative to windowbase, to the real address. */
468 static enum xtensa_reg_id xtensa_windowbase_offset_to_canonical(struct xtensa *xtensa,
469 enum xtensa_reg_id reg_idx,
470 int windowbase)
472 unsigned int idx;
473 if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_ARLAST) {
474 idx = reg_idx - XT_REG_IDX_AR0;
475 } else if (reg_idx >= XT_REG_IDX_A0 && reg_idx <= XT_REG_IDX_A15) {
476 idx = reg_idx - XT_REG_IDX_A0;
477 } else {
478 LOG_ERROR("Error: can't convert register %d to non-windowbased register!", reg_idx);
479 return -1;
481 /* Each windowbase value represents 4 registers on LX and 8 on NX */
482 int base_inc = (xtensa->core_config->core_type == XT_LX) ? 4 : 8;
483 return ((idx + windowbase * base_inc) & (xtensa->core_config->aregs_num - 1)) + XT_REG_IDX_AR0;
486 static enum xtensa_reg_id xtensa_canonical_to_windowbase_offset(struct xtensa *xtensa,
487 enum xtensa_reg_id reg_idx,
488 int windowbase)
490 return xtensa_windowbase_offset_to_canonical(xtensa, reg_idx, -windowbase);
493 static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
495 struct reg *reg_list = xtensa->core_cache->reg_list;
496 reg_list[reg_idx].dirty = true;
499 static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
501 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DIR0EXEC, ins);
504 static void xtensa_queue_exec_ins_wide(struct xtensa *xtensa, uint8_t *ops, uint8_t oplen)
506 const int max_oplen = 64; /* 8 DIRx regs: max width 64B */
507 if ((oplen > 0) && (oplen <= max_oplen)) {
508 uint8_t ops_padded[max_oplen];
509 memcpy(ops_padded, ops, oplen);
510 memset(ops_padded + oplen, 0, max_oplen - oplen);
511 unsigned int oplenw = DIV_ROUND_UP(oplen, sizeof(uint32_t));
512 for (int32_t i = oplenw - 1; i > 0; i--)
513 xtensa_queue_dbg_reg_write(xtensa,
514 XDMREG_DIR0 + i,
515 target_buffer_get_u32(xtensa->target, &ops_padded[sizeof(uint32_t)*i]));
516 /* Write DIR0EXEC last */
517 xtensa_queue_dbg_reg_write(xtensa,
518 XDMREG_DIR0EXEC,
519 target_buffer_get_u32(xtensa->target, &ops_padded[0]));
523 static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
525 struct xtensa_debug_module *dm = &xtensa->dbg_mod;
526 return dm->pwr_ops->queue_reg_write(dm, reg, data);
529 /* NOTE: Assumes A3 has already been saved */
530 static int xtensa_window_state_save(struct target *target, uint32_t *woe)
532 struct xtensa *xtensa = target_to_xtensa(target);
533 unsigned int woe_sr = (xtensa->core_config->core_type == XT_LX) ? XT_SR_PS : XT_SR_WB;
534 uint32_t woe_dis;
535 uint8_t woe_buf[4];
537 if (xtensa->core_config->windowed) {
538 /* Save PS (LX) or WB (NX) and disable window overflow exceptions prior to AR save */
539 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, woe_sr, XT_REG_A3));
540 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
541 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, woe_buf);
542 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
543 if (res != ERROR_OK) {
544 LOG_TARGET_ERROR(target, "Failed to read %s (%d)!",
545 (woe_sr == XT_SR_PS) ? "PS" : "WB", res);
546 return res;
548 xtensa_core_status_check(target);
549 *woe = buf_get_u32(woe_buf, 0, 32);
550 woe_dis = *woe & ~((woe_sr == XT_SR_PS) ? XT_PS_WOE_MSK : XT_WB_S_MSK);
551 LOG_TARGET_DEBUG(target, "Clearing %s (0x%08" PRIx32 " -> 0x%08" PRIx32 ")",
552 (woe_sr == XT_SR_PS) ? "PS.WOE" : "WB.S", *woe, woe_dis);
553 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, woe_dis);
554 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
555 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, woe_sr, XT_REG_A3));
557 return ERROR_OK;
560 /* NOTE: Assumes A3 has already been saved */
561 static void xtensa_window_state_restore(struct target *target, uint32_t woe)
563 struct xtensa *xtensa = target_to_xtensa(target);
564 unsigned int woe_sr = (xtensa->core_config->core_type == XT_LX) ? XT_SR_PS : XT_SR_WB;
565 if (xtensa->core_config->windowed) {
566 /* Restore window overflow exception state */
567 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, woe);
568 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
569 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, woe_sr, XT_REG_A3));
570 LOG_TARGET_DEBUG(target, "Restored %s (0x%08" PRIx32 ")",
571 (woe_sr == XT_SR_PS) ? "PS.WOE" : "WB", woe);
575 static bool xtensa_reg_is_readable(int flags, int cpenable)
577 if (flags & XT_REGF_NOREAD)
578 return false;
579 if ((flags & XT_REGF_COPROC0) && (cpenable & BIT(0)) == 0)
580 return false;
581 return true;
584 static bool xtensa_scratch_regs_fixup(struct xtensa *xtensa, struct reg *reg_list, int i, int j, int a_idx, int ar_idx)
586 int a_name = (a_idx == XT_AR_SCRATCH_A3) ? 3 : 4;
587 if (xtensa->scratch_ars[a_idx].intval && !xtensa->scratch_ars[ar_idx].intval) {
588 LOG_DEBUG("AR conflict: a%d -> ar%d", a_name, j - XT_REG_IDX_AR0);
589 memcpy(reg_list[j].value, reg_list[i].value, sizeof(xtensa_reg_val_t));
590 } else {
591 LOG_DEBUG("AR conflict: ar%d -> a%d", j - XT_REG_IDX_AR0, a_name);
592 memcpy(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t));
594 return xtensa->scratch_ars[a_idx].intval && xtensa->scratch_ars[ar_idx].intval;
597 static int xtensa_write_dirty_registers(struct target *target)
599 struct xtensa *xtensa = target_to_xtensa(target);
600 int res;
601 xtensa_reg_val_t regval, windowbase = 0;
602 bool scratch_reg_dirty = false, delay_cpenable = false;
603 struct reg *reg_list = xtensa->core_cache->reg_list;
604 unsigned int reg_list_size = xtensa->core_cache->num_regs;
605 bool preserve_a3 = false;
606 uint8_t a3_buf[4];
607 xtensa_reg_val_t a3 = 0, woe;
608 unsigned int ms_idx = (xtensa->core_config->core_type == XT_NX) ?
609 xtensa->nx_reg_idx[XT_NX_REG_IDX_MS] : reg_list_size;
610 xtensa_reg_val_t ms;
611 bool restore_ms = false;
613 LOG_TARGET_DEBUG(target, "start");
615 /* We need to write the dirty registers in the cache list back to the processor.
616 * Start by writing the SFR/user registers. */
617 for (unsigned int i = 0; i < reg_list_size; i++) {
618 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
619 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
620 if (reg_list[i].dirty) {
621 if (rlist[ridx].type == XT_REG_SPECIAL ||
622 rlist[ridx].type == XT_REG_USER ||
623 rlist[ridx].type == XT_REG_FR) {
624 scratch_reg_dirty = true;
625 if (i == XT_REG_IDX_CPENABLE) {
626 delay_cpenable = true;
627 continue;
629 regval = xtensa_reg_get(target, i);
630 LOG_TARGET_DEBUG(target, "Writing back reg %s (%d) val %08" PRIX32,
631 reg_list[i].name,
632 rlist[ridx].reg_num,
633 regval);
634 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
635 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
636 if (reg_list[i].exist) {
637 unsigned int reg_num = rlist[ridx].reg_num;
638 if (rlist[ridx].type == XT_REG_USER) {
639 xtensa_queue_exec_ins(xtensa, XT_INS_WUR(xtensa, reg_num, XT_REG_A3));
640 } else if (rlist[ridx].type == XT_REG_FR) {
641 xtensa_queue_exec_ins(xtensa, XT_INS_WFR(xtensa, reg_num, XT_REG_A3));
642 } else {/*SFR */
643 if (reg_num == XT_PC_REG_NUM_VIRTUAL) {
644 if (xtensa->core_config->core_type == XT_LX) {
645 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
646 reg_num = (XT_EPC_REG_NUM_BASE + xtensa->core_config->debug.irq_level);
647 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, reg_num, XT_REG_A3));
648 } else {
649 /* NX PC set through issuing a jump instruction */
650 xtensa_queue_exec_ins(xtensa, XT_INS_JX(xtensa, XT_REG_A3));
652 } else if (i == ms_idx) {
653 /* MS must be restored after ARs. This ensures ARs remain in correct
654 * order even for reversed register groups (overflow/underflow).
656 ms = regval;
657 restore_ms = true;
658 LOG_TARGET_DEBUG(target, "Delaying MS write: 0x%x", ms);
659 } else {
660 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, reg_num, XT_REG_A3));
664 reg_list[i].dirty = false;
668 if (scratch_reg_dirty)
669 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
670 if (delay_cpenable) {
671 regval = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
672 LOG_TARGET_DEBUG(target, "Writing back reg cpenable (224) val %08" PRIX32, regval);
673 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
674 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
675 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
676 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num,
677 XT_REG_A3));
678 reg_list[XT_REG_IDX_CPENABLE].dirty = false;
681 preserve_a3 = (xtensa->core_config->windowed) || (xtensa->core_config->core_type == XT_NX);
682 if (preserve_a3) {
683 /* Save (windowed) A3 for scratch use */
684 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
685 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a3_buf);
686 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
687 if (res != ERROR_OK)
688 return res;
689 xtensa_core_status_check(target);
690 a3 = buf_get_u32(a3_buf, 0, 32);
693 if (xtensa->core_config->windowed) {
694 res = xtensa_window_state_save(target, &woe);
695 if (res != ERROR_OK)
696 return res;
697 /* Grab the windowbase, we need it. */
698 uint32_t wb_idx = (xtensa->core_config->core_type == XT_LX) ?
699 XT_REG_IDX_WINDOWBASE : xtensa->nx_reg_idx[XT_NX_REG_IDX_WB];
700 windowbase = xtensa_reg_get(target, wb_idx);
701 if (xtensa->core_config->core_type == XT_NX)
702 windowbase = (windowbase & XT_WB_P_MSK) >> XT_WB_P_SHIFT;
704 /* Check if there are mismatches between the ARx and corresponding Ax registers.
705 * When the user sets a register on a windowed config, xt-gdb may set the ARx
706 * register directly. Thus we take ARx as priority over Ax if both are dirty
707 * and it's unclear if the user set one over the other explicitly.
709 for (unsigned int i = XT_REG_IDX_A0; i <= XT_REG_IDX_A15; i++) {
710 unsigned int j = xtensa_windowbase_offset_to_canonical(xtensa, i, windowbase);
711 if (reg_list[i].dirty && reg_list[j].dirty) {
712 if (memcmp(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t)) != 0) {
713 bool show_warning = true;
714 if (i == XT_REG_IDX_A3)
715 show_warning = xtensa_scratch_regs_fixup(xtensa,
716 reg_list, i, j, XT_AR_SCRATCH_A3, XT_AR_SCRATCH_AR3);
717 else if (i == XT_REG_IDX_A4)
718 show_warning = xtensa_scratch_regs_fixup(xtensa,
719 reg_list, i, j, XT_AR_SCRATCH_A4, XT_AR_SCRATCH_AR4);
720 if (show_warning)
721 LOG_WARNING(
722 "Warning: Both A%d [0x%08" PRIx32
723 "] as well as its underlying physical register "
724 "(AR%d) [0x%08" PRIx32 "] are dirty and differ in value",
725 i - XT_REG_IDX_A0,
726 buf_get_u32(reg_list[i].value, 0, 32),
727 j - XT_REG_IDX_AR0,
728 buf_get_u32(reg_list[j].value, 0, 32));
734 /* Write A0-A16. */
735 for (unsigned int i = 0; i < 16; i++) {
736 if (reg_list[XT_REG_IDX_A0 + i].dirty) {
737 regval = xtensa_reg_get(target, XT_REG_IDX_A0 + i);
738 LOG_TARGET_DEBUG(target, "Writing back reg %s value %08" PRIX32 ", num =%i",
739 xtensa_regs[XT_REG_IDX_A0 + i].name,
740 regval,
741 xtensa_regs[XT_REG_IDX_A0 + i].reg_num);
742 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
743 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, i));
744 reg_list[XT_REG_IDX_A0 + i].dirty = false;
745 if (i == 3) {
746 /* Avoid stomping A3 during restore at end of function */
747 a3 = regval;
752 if (xtensa->core_config->windowed) {
753 /* Now write AR registers */
754 for (unsigned int j = 0; j < XT_REG_IDX_ARLAST; j += 16) {
755 /* Write the 16 registers we can see */
756 for (unsigned int i = 0; i < 16; i++) {
757 if (i + j < xtensa->core_config->aregs_num) {
758 enum xtensa_reg_id realadr =
759 xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_AR0 + i + j,
760 windowbase);
761 /* Write back any dirty un-windowed registers */
762 if (reg_list[realadr].dirty) {
763 regval = xtensa_reg_get(target, realadr);
764 LOG_TARGET_DEBUG(
765 target,
766 "Writing back reg %s value %08" PRIX32 ", num =%i",
767 xtensa_regs[realadr].name,
768 regval,
769 xtensa_regs[realadr].reg_num);
770 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
771 xtensa_queue_exec_ins(xtensa,
772 XT_INS_RSR(xtensa, XT_SR_DDR,
773 xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
774 reg_list[realadr].dirty = false;
775 if ((i + j) == 3)
776 /* Avoid stomping AR during A3 restore at end of function */
777 a3 = regval;
782 /* Now rotate the window so we'll see the next 16 registers. The final rotate
783 * will wraparound, leaving us in the state we were.
784 * Each ROTW rotates 4 registers on LX and 8 on NX */
785 int rotw_arg = (xtensa->core_config->core_type == XT_LX) ? 4 : 2;
786 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, rotw_arg));
789 xtensa_window_state_restore(target, woe);
791 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
792 xtensa->scratch_ars[s].intval = false;
795 if (restore_ms) {
796 uint32_t ms_regno = xtensa->optregs[ms_idx - XT_NUM_REGS].reg_num;
797 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, ms);
798 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
799 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, ms_regno, XT_REG_A3));
800 LOG_TARGET_DEBUG(target, "Delayed MS (0x%x) write complete: 0x%x", ms_regno, ms);
803 if (preserve_a3) {
804 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, a3);
805 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
808 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
809 xtensa_core_status_check(target);
811 return res;
814 static inline bool xtensa_is_stopped(struct target *target)
816 struct xtensa *xtensa = target_to_xtensa(target);
817 return xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED;
820 int xtensa_examine(struct target *target)
822 struct xtensa *xtensa = target_to_xtensa(target);
823 unsigned int cmd = PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) | PWRCTL_COREWAKEUP(xtensa);
825 LOG_DEBUG("coreid = %d", target->coreid);
827 if (xtensa->core_config->core_type == XT_UNDEF) {
828 LOG_ERROR("XTensa core not configured; is xtensa-core-openocd.cfg missing?");
829 return ERROR_FAIL;
832 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd);
833 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE(xtensa));
834 xtensa_dm_queue_enable(&xtensa->dbg_mod);
835 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
836 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
837 if (res != ERROR_OK)
838 return res;
839 if (!xtensa_dm_is_online(&xtensa->dbg_mod)) {
840 LOG_ERROR("Unexpected OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
841 return ERROR_TARGET_FAILURE;
843 LOG_DEBUG("OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
844 target_set_examined(target);
845 xtensa_smpbreak_write(xtensa, xtensa->smp_break);
846 return ERROR_OK;
849 int xtensa_wakeup(struct target *target)
851 struct xtensa *xtensa = target_to_xtensa(target);
852 unsigned int cmd = PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) | PWRCTL_COREWAKEUP(xtensa);
854 if (xtensa->reset_asserted)
855 cmd |= PWRCTL_CORERESET(xtensa);
856 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd);
857 /* TODO: can we join this with the write above? */
858 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE(xtensa));
859 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
860 return xtensa_dm_queue_execute(&xtensa->dbg_mod);
863 int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
865 uint32_t dsr_data = 0x00110000;
866 uint32_t clear = (set | OCDDCR_ENABLEOCD) ^
867 (OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN | OCDDCR_RUNSTALLINEN |
868 OCDDCR_DEBUGMODEOUTEN | OCDDCR_ENABLEOCD);
870 LOG_TARGET_DEBUG(xtensa->target, "write smpbreak set=0x%" PRIx32 " clear=0x%" PRIx32, set, clear);
871 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, set | OCDDCR_ENABLEOCD);
872 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRCLR, clear);
873 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DSR, dsr_data);
874 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
875 return xtensa_dm_queue_execute(&xtensa->dbg_mod);
878 int xtensa_smpbreak_set(struct target *target, uint32_t set)
880 struct xtensa *xtensa = target_to_xtensa(target);
881 int res = ERROR_OK;
883 xtensa->smp_break = set;
884 if (target_was_examined(target))
885 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
886 LOG_TARGET_DEBUG(target, "set smpbreak=%" PRIx32 ", state=%i", set, target->state);
887 return res;
890 int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
892 uint8_t dcr_buf[sizeof(uint32_t)];
894 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DCRSET, dcr_buf);
895 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
896 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
897 *val = buf_get_u32(dcr_buf, 0, 32);
899 return res;
902 int xtensa_smpbreak_get(struct target *target, uint32_t *val)
904 struct xtensa *xtensa = target_to_xtensa(target);
905 *val = xtensa->smp_break;
906 return ERROR_OK;
909 static inline xtensa_reg_val_t xtensa_reg_get_value(struct reg *reg)
911 return buf_get_u32(reg->value, 0, 32);
914 static inline void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
916 buf_set_u32(reg->value, 0, 32, value);
917 reg->dirty = true;
920 static int xtensa_imprecise_exception_occurred(struct target *target)
922 struct xtensa *xtensa = target_to_xtensa(target);
923 for (enum xtensa_nx_reg_idx idx = XT_NX_REG_IDX_IEVEC; idx <= XT_NX_REG_IDX_MESR; idx++) {
924 enum xtensa_reg_id ridx = xtensa->nx_reg_idx[idx];
925 if (xtensa->nx_reg_idx[idx]) {
926 xtensa_reg_val_t reg = xtensa_reg_get(target, xtensa->nx_reg_idx[idx]);
927 if (reg & XT_IMPR_EXC_MSK) {
928 LOG_TARGET_DEBUG(target, "Imprecise exception: %s: 0x%x",
929 xtensa->core_cache->reg_list[ridx].name, reg);
930 return true;
934 return false;
937 static void xtensa_imprecise_exception_clear(struct target *target)
939 struct xtensa *xtensa = target_to_xtensa(target);
940 for (enum xtensa_nx_reg_idx idx = XT_NX_REG_IDX_IEVEC; idx <= XT_NX_REG_IDX_MESRCLR; idx++) {
941 enum xtensa_reg_id ridx = xtensa->nx_reg_idx[idx];
942 if (ridx && idx != XT_NX_REG_IDX_MESR) {
943 xtensa_reg_val_t value = (idx == XT_NX_REG_IDX_MESRCLR) ? XT_MESRCLR_IMPR_EXC_MSK : 0;
944 xtensa_reg_set(target, ridx, value);
945 LOG_TARGET_DEBUG(target, "Imprecise exception: clearing %s (0x%x)",
946 xtensa->core_cache->reg_list[ridx].name, value);
951 int xtensa_core_status_check(struct target *target)
953 struct xtensa *xtensa = target_to_xtensa(target);
954 int res, needclear = 0, needimprclear = 0;
956 xtensa_dm_core_status_read(&xtensa->dbg_mod);
957 xtensa_dsr_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
958 LOG_TARGET_DEBUG(target, "DSR (%08" PRIX32 ")", dsr);
959 if (dsr & OCDDSR_EXECBUSY) {
960 if (!xtensa->suppress_dsr_errors)
961 LOG_TARGET_ERROR(target, "DSR (%08" PRIX32 ") indicates target still busy!", dsr);
962 needclear = 1;
964 if (dsr & OCDDSR_EXECEXCEPTION) {
965 if (!xtensa->suppress_dsr_errors)
966 LOG_TARGET_ERROR(target,
967 "DSR (%08" PRIX32 ") indicates DIR instruction generated an exception!",
968 dsr);
969 needclear = 1;
971 if (dsr & OCDDSR_EXECOVERRUN) {
972 if (!xtensa->suppress_dsr_errors)
973 LOG_TARGET_ERROR(target,
974 "DSR (%08" PRIX32 ") indicates DIR instruction generated an overrun!",
975 dsr);
976 needclear = 1;
978 if (xtensa->core_config->core_type == XT_NX && (xtensa_imprecise_exception_occurred(target))) {
979 if (!xtensa->suppress_dsr_errors)
980 LOG_TARGET_ERROR(target,
981 "%s: Imprecise exception occurred!", target_name(target));
982 needclear = 1;
983 needimprclear = 1;
985 if (needclear) {
986 res = xtensa_dm_core_status_clear(&xtensa->dbg_mod,
987 OCDDSR_EXECEXCEPTION | OCDDSR_EXECOVERRUN);
988 if (res != ERROR_OK && !xtensa->suppress_dsr_errors)
989 LOG_TARGET_ERROR(target, "clearing DSR failed!");
990 if (xtensa->core_config->core_type == XT_NX && needimprclear)
991 xtensa_imprecise_exception_clear(target);
992 return ERROR_FAIL;
994 return ERROR_OK;
997 xtensa_reg_val_t xtensa_reg_get(struct target *target, enum xtensa_reg_id reg_id)
999 struct xtensa *xtensa = target_to_xtensa(target);
1000 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
1001 return xtensa_reg_get_value(reg);
1004 void xtensa_reg_set(struct target *target, enum xtensa_reg_id reg_id, xtensa_reg_val_t value)
1006 struct xtensa *xtensa = target_to_xtensa(target);
1007 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
1008 if (xtensa_reg_get_value(reg) == value)
1009 return;
1010 xtensa_reg_set_value(reg, value);
1013 /* Set Ax (XT_REG_RELGEN) register along with its underlying ARx (XT_REG_GENERAL) */
1014 void xtensa_reg_set_deep_relgen(struct target *target, enum xtensa_reg_id a_idx, xtensa_reg_val_t value)
1016 struct xtensa *xtensa = target_to_xtensa(target);
1017 uint32_t wb_idx = (xtensa->core_config->core_type == XT_LX) ?
1018 XT_REG_IDX_WINDOWBASE : xtensa->nx_reg_idx[XT_NX_REG_IDX_WB];
1019 uint32_t windowbase = (xtensa->core_config->windowed ?
1020 xtensa_reg_get(target, wb_idx) : 0);
1021 if (xtensa->core_config->core_type == XT_NX)
1022 windowbase = (windowbase & XT_WB_P_MSK) >> XT_WB_P_SHIFT;
1023 int ar_idx = xtensa_windowbase_offset_to_canonical(xtensa, a_idx, windowbase);
1024 xtensa_reg_set(target, a_idx, value);
1025 xtensa_reg_set(target, ar_idx, value);
1028 /* Read cause for entering halted state; return bitmask in DEBUGCAUSE_* format */
1029 uint32_t xtensa_cause_get(struct target *target)
1031 struct xtensa *xtensa = target_to_xtensa(target);
1032 if (xtensa->core_config->core_type == XT_LX) {
1033 /* LX cause in DEBUGCAUSE */
1034 return xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE);
1036 if (xtensa->nx_stop_cause & DEBUGCAUSE_VALID)
1037 return xtensa->nx_stop_cause;
1039 /* NX cause determined from DSR.StopCause */
1040 if (xtensa_dm_core_status_read(&xtensa->dbg_mod) != ERROR_OK) {
1041 LOG_TARGET_ERROR(target, "Read DSR error");
1042 } else {
1043 uint32_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
1044 /* NX causes are prioritized; only 1 bit can be set */
1045 switch ((dsr & OCDDSR_STOPCAUSE) >> OCDDSR_STOPCAUSE_SHIFT) {
1046 case OCDDSR_STOPCAUSE_DI:
1047 xtensa->nx_stop_cause = DEBUGCAUSE_DI;
1048 break;
1049 case OCDDSR_STOPCAUSE_SS:
1050 xtensa->nx_stop_cause = DEBUGCAUSE_IC;
1051 break;
1052 case OCDDSR_STOPCAUSE_IB:
1053 xtensa->nx_stop_cause = DEBUGCAUSE_IB;
1054 break;
1055 case OCDDSR_STOPCAUSE_B:
1056 case OCDDSR_STOPCAUSE_B1:
1057 xtensa->nx_stop_cause = DEBUGCAUSE_BI;
1058 break;
1059 case OCDDSR_STOPCAUSE_BN:
1060 xtensa->nx_stop_cause = DEBUGCAUSE_BN;
1061 break;
1062 case OCDDSR_STOPCAUSE_DB0:
1063 case OCDDSR_STOPCAUSE_DB1:
1064 xtensa->nx_stop_cause = DEBUGCAUSE_DB;
1065 break;
1066 default:
1067 LOG_TARGET_ERROR(target, "Unknown stop cause (DSR: 0x%08x)", dsr);
1068 break;
1070 if (xtensa->nx_stop_cause)
1071 xtensa->nx_stop_cause |= DEBUGCAUSE_VALID;
1073 return xtensa->nx_stop_cause;
1076 void xtensa_cause_clear(struct target *target)
1078 struct xtensa *xtensa = target_to_xtensa(target);
1079 if (xtensa->core_config->core_type == XT_LX) {
1080 xtensa_reg_set(target, XT_REG_IDX_DEBUGCAUSE, 0);
1081 xtensa->core_cache->reg_list[XT_REG_IDX_DEBUGCAUSE].dirty = false;
1082 } else {
1083 /* NX DSR.STOPCAUSE is not writeable; clear cached copy but leave it valid */
1084 xtensa->nx_stop_cause = DEBUGCAUSE_VALID;
1088 void xtensa_cause_reset(struct target *target)
1090 /* Clear DEBUGCAUSE_VALID to trigger re-read (on NX) */
1091 struct xtensa *xtensa = target_to_xtensa(target);
1092 xtensa->nx_stop_cause = 0;
1095 int xtensa_assert_reset(struct target *target)
1097 struct xtensa *xtensa = target_to_xtensa(target);
1099 LOG_TARGET_DEBUG(target, "target_number=%i, begin", target->target_number);
1100 xtensa_queue_pwr_reg_write(xtensa,
1101 XDMREG_PWRCTL,
1102 PWRCTL_JTAGDEBUGUSE(xtensa) | PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) |
1103 PWRCTL_COREWAKEUP(xtensa) | PWRCTL_CORERESET(xtensa));
1104 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1105 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1106 if (res != ERROR_OK)
1107 return res;
1109 /* registers are now invalid */
1110 xtensa->reset_asserted = true;
1111 register_cache_invalidate(xtensa->core_cache);
1112 target->state = TARGET_RESET;
1113 return ERROR_OK;
1116 int xtensa_deassert_reset(struct target *target)
1118 struct xtensa *xtensa = target_to_xtensa(target);
1120 LOG_TARGET_DEBUG(target, "halt=%d", target->reset_halt);
1121 if (target->reset_halt)
1122 xtensa_queue_dbg_reg_write(xtensa,
1123 XDMREG_DCRSET,
1124 OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
1125 xtensa_queue_pwr_reg_write(xtensa,
1126 XDMREG_PWRCTL,
1127 PWRCTL_JTAGDEBUGUSE(xtensa) | PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) |
1128 PWRCTL_COREWAKEUP(xtensa));
1129 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1130 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1131 if (res != ERROR_OK)
1132 return res;
1133 target->state = TARGET_RUNNING;
1134 xtensa->reset_asserted = false;
1135 return res;
1138 int xtensa_soft_reset_halt(struct target *target)
1140 LOG_TARGET_DEBUG(target, "begin");
1141 return xtensa_assert_reset(target);
1144 int xtensa_fetch_all_regs(struct target *target)
1146 struct xtensa *xtensa = target_to_xtensa(target);
1147 struct reg *reg_list = xtensa->core_cache->reg_list;
1148 unsigned int reg_list_size = xtensa->core_cache->num_regs;
1149 xtensa_reg_val_t cpenable = 0, windowbase = 0, a0 = 0, a3;
1150 unsigned int ms_idx = reg_list_size;
1151 uint32_t ms = 0;
1152 uint32_t woe;
1153 uint8_t a0_buf[4], a3_buf[4], ms_buf[4];
1154 bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
1156 union xtensa_reg_val_u *regvals = calloc(reg_list_size, sizeof(*regvals));
1157 if (!regvals) {
1158 LOG_TARGET_ERROR(target, "unable to allocate memory for regvals!");
1159 return ERROR_FAIL;
1161 union xtensa_reg_val_u *dsrs = calloc(reg_list_size, sizeof(*dsrs));
1162 if (!dsrs) {
1163 LOG_TARGET_ERROR(target, "unable to allocate memory for dsrs!");
1164 free(regvals);
1165 return ERROR_FAIL;
1168 LOG_TARGET_DEBUG(target, "start");
1170 /* Save (windowed) A3 so cache matches physical AR3; A3 usable as scratch */
1171 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1172 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a3_buf);
1173 if (xtensa->core_config->core_type == XT_NX) {
1174 /* Save (windowed) A0 as well--it will be required for reading PC */
1175 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A0));
1176 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a0_buf);
1178 /* Set MS.DispSt, clear MS.DE prior to accessing ARs. This ensures ARs remain
1179 * in correct order even for reversed register groups (overflow/underflow).
1181 ms_idx = xtensa->nx_reg_idx[XT_NX_REG_IDX_MS];
1182 uint32_t ms_regno = xtensa->optregs[ms_idx - XT_NUM_REGS].reg_num;
1183 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, ms_regno, XT_REG_A3));
1184 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1185 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, ms_buf);
1186 LOG_TARGET_DEBUG(target, "Overriding MS (0x%x): 0x%x", ms_regno, XT_MS_DISPST_DBG);
1187 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, XT_MS_DISPST_DBG);
1188 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1189 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, ms_regno, XT_REG_A3));
1192 int res = xtensa_window_state_save(target, &woe);
1193 if (res != ERROR_OK)
1194 goto xtensa_fetch_all_regs_done;
1196 /* Assume the CPU has just halted. We now want to fill the register cache with all the
1197 * register contents GDB needs. For speed, we pipeline all the read operations, execute them
1198 * in one go, then sort everything out from the regvals variable. */
1200 /* Start out with AREGS; we can reach those immediately. Grab them per 16 registers. */
1201 for (unsigned int j = 0; j < XT_AREGS_NUM_MAX; j += 16) {
1202 /*Grab the 16 registers we can see */
1203 for (unsigned int i = 0; i < 16; i++) {
1204 if (i + j < xtensa->core_config->aregs_num) {
1205 xtensa_queue_exec_ins(xtensa,
1206 XT_INS_WSR(xtensa, XT_SR_DDR, xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
1207 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR,
1208 regvals[XT_REG_IDX_AR0 + i + j].buf);
1209 if (debug_dsrs)
1210 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR,
1211 dsrs[XT_REG_IDX_AR0 + i + j].buf);
1214 if (xtensa->core_config->windowed) {
1215 /* Now rotate the window so we'll see the next 16 registers. The final rotate
1216 * will wraparound, leaving us in the state we were.
1217 * Each ROTW rotates 4 registers on LX and 8 on NX */
1218 int rotw_arg = (xtensa->core_config->core_type == XT_LX) ? 4 : 2;
1219 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, rotw_arg));
1222 xtensa_window_state_restore(target, woe);
1224 if (xtensa->core_config->coproc) {
1225 /* As the very first thing after AREGS, go grab CPENABLE */
1226 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1227 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1228 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[XT_REG_IDX_CPENABLE].buf);
1230 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1231 if (res != ERROR_OK) {
1232 LOG_ERROR("Failed to read ARs (%d)!", res);
1233 goto xtensa_fetch_all_regs_done;
1235 xtensa_core_status_check(target);
1237 a3 = buf_get_u32(a3_buf, 0, 32);
1238 if (xtensa->core_config->core_type == XT_NX) {
1239 a0 = buf_get_u32(a0_buf, 0, 32);
1240 ms = buf_get_u32(ms_buf, 0, 32);
1243 if (xtensa->core_config->coproc) {
1244 cpenable = buf_get_u32(regvals[XT_REG_IDX_CPENABLE].buf, 0, 32);
1246 /* Enable all coprocessors (by setting all bits in CPENABLE) so we can read FP and user registers. */
1247 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, 0xffffffff);
1248 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1249 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1251 /* Save CPENABLE; flag dirty later (when regcache updated) so original value is always restored */
1252 LOG_TARGET_DEBUG(target, "CPENABLE: was 0x%" PRIx32 ", all enabled", cpenable);
1253 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
1255 /* We're now free to use any of A0-A15 as scratch registers
1256 * Grab the SFRs and user registers first. We use A3 as a scratch register. */
1257 for (unsigned int i = 0; i < reg_list_size; i++) {
1258 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1259 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1260 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1261 bool reg_fetched = true;
1262 unsigned int reg_num = rlist[ridx].reg_num;
1263 switch (rlist[ridx].type) {
1264 case XT_REG_USER:
1265 xtensa_queue_exec_ins(xtensa, XT_INS_RUR(xtensa, reg_num, XT_REG_A3));
1266 break;
1267 case XT_REG_FR:
1268 xtensa_queue_exec_ins(xtensa, XT_INS_RFR(xtensa, reg_num, XT_REG_A3));
1269 break;
1270 case XT_REG_SPECIAL:
1271 if (reg_num == XT_PC_REG_NUM_VIRTUAL) {
1272 if (xtensa->core_config->core_type == XT_LX) {
1273 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
1274 reg_num = XT_EPC_REG_NUM_BASE + xtensa->core_config->debug.irq_level;
1275 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, reg_num, XT_REG_A3));
1276 } else {
1277 /* NX PC read through CALL0(0) and reading A0 */
1278 xtensa_queue_exec_ins(xtensa, XT_INS_CALL0(xtensa, 0));
1279 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A0));
1280 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[i].buf);
1281 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR, dsrs[i].buf);
1282 reg_fetched = false;
1284 } else if ((xtensa->core_config->core_type == XT_LX)
1285 && (reg_num == xtensa_regs[XT_REG_IDX_PS].reg_num)) {
1286 /* reg number of PS for debug interrupt depends on NDEBUGLEVEL */
1287 reg_num = XT_EPS_REG_NUM_BASE + xtensa->core_config->debug.irq_level;
1288 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, reg_num, XT_REG_A3));
1289 } else if (reg_num == xtensa_regs[XT_REG_IDX_CPENABLE].reg_num) {
1290 /* CPENABLE already read/updated; don't re-read */
1291 reg_fetched = false;
1292 break;
1293 } else {
1294 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, reg_num, XT_REG_A3));
1296 break;
1297 default:
1298 reg_fetched = false;
1300 if (reg_fetched) {
1301 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1302 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[i].buf);
1303 if (debug_dsrs)
1304 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR, dsrs[i].buf);
1308 /* Ok, send the whole mess to the CPU. */
1309 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1310 if (res != ERROR_OK) {
1311 LOG_ERROR("Failed to fetch AR regs!");
1312 goto xtensa_fetch_all_regs_done;
1314 xtensa_core_status_check(target);
1316 if (debug_dsrs) {
1317 /* DSR checking: follows order in which registers are requested. */
1318 for (unsigned int i = 0; i < reg_list_size; i++) {
1319 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1320 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1321 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist &&
1322 (rlist[ridx].type != XT_REG_DEBUG) &&
1323 (rlist[ridx].type != XT_REG_RELGEN) &&
1324 (rlist[ridx].type != XT_REG_TIE) &&
1325 (rlist[ridx].type != XT_REG_OTHER)) {
1326 if (buf_get_u32(dsrs[i].buf, 0, 32) & OCDDSR_EXECEXCEPTION) {
1327 LOG_ERROR("Exception reading %s!", reg_list[i].name);
1328 res = ERROR_FAIL;
1329 goto xtensa_fetch_all_regs_done;
1335 if (xtensa->core_config->windowed) {
1336 /* We need the windowbase to decode the general addresses. */
1337 uint32_t wb_idx = (xtensa->core_config->core_type == XT_LX) ?
1338 XT_REG_IDX_WINDOWBASE : xtensa->nx_reg_idx[XT_NX_REG_IDX_WB];
1339 windowbase = buf_get_u32(regvals[wb_idx].buf, 0, 32);
1340 if (xtensa->core_config->core_type == XT_NX)
1341 windowbase = (windowbase & XT_WB_P_MSK) >> XT_WB_P_SHIFT;
1344 /* Decode the result and update the cache. */
1345 for (unsigned int i = 0; i < reg_list_size; i++) {
1346 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1347 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1348 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1349 if ((xtensa->core_config->windowed) && (rlist[ridx].type == XT_REG_GENERAL)) {
1350 /* The 64-value general register set is read from (windowbase) on down.
1351 * We need to get the real register address by subtracting windowbase and
1352 * wrapping around. */
1353 enum xtensa_reg_id realadr = xtensa_canonical_to_windowbase_offset(xtensa, i,
1354 windowbase);
1355 buf_cpy(regvals[realadr].buf, reg_list[i].value, reg_list[i].size);
1356 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1357 buf_cpy(regvals[rlist[ridx].reg_num].buf, reg_list[i].value, reg_list[i].size);
1358 if (xtensa_extra_debug_log) {
1359 xtensa_reg_val_t regval = buf_get_u32(regvals[rlist[ridx].reg_num].buf, 0, 32);
1360 LOG_DEBUG("%s = 0x%x", rlist[ridx].name, regval);
1362 } else {
1363 xtensa_reg_val_t regval = buf_get_u32(regvals[i].buf, 0, 32);
1364 bool is_dirty = (i == XT_REG_IDX_CPENABLE);
1365 if (xtensa_extra_debug_log)
1366 LOG_INFO("Register %s: 0x%X", reg_list[i].name, regval);
1367 if (rlist[ridx].reg_num == XT_PC_REG_NUM_VIRTUAL &&
1368 xtensa->core_config->core_type == XT_NX) {
1369 /* A0 from prior CALL0 points to next instruction; decrement it */
1370 regval -= 3;
1371 is_dirty = 1;
1372 } else if (i == ms_idx) {
1373 LOG_TARGET_DEBUG(target, "Caching MS: 0x%x", ms);
1374 regval = ms;
1375 is_dirty = 1;
1377 xtensa_reg_set(target, i, regval);
1378 reg_list[i].dirty = is_dirty; /*always do this _after_ xtensa_reg_set! */
1380 reg_list[i].valid = true;
1381 } else {
1382 if ((rlist[ridx].flags & XT_REGF_MASK) == XT_REGF_NOREAD) {
1383 /* Report read-only registers all-zero but valid */
1384 reg_list[i].valid = true;
1385 xtensa_reg_set(target, i, 0);
1386 } else {
1387 reg_list[i].valid = false;
1392 if (xtensa->core_config->windowed) {
1393 /* We have used A3 as a scratch register.
1394 * Windowed configs: restore A3's AR (XT_REG_GENERAL) and and flag for write-back.
1396 enum xtensa_reg_id ar3_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A3, windowbase);
1397 xtensa_reg_set(target, ar3_idx, a3);
1398 xtensa_mark_register_dirty(xtensa, ar3_idx);
1400 /* Reset scratch_ars[] on fetch. .chrval tracks AR mapping and changes w/ window */
1401 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval, "ar%d", ar3_idx - XT_REG_IDX_AR0);
1402 enum xtensa_reg_id ar4_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A4, windowbase);
1403 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval, "ar%d", ar4_idx - XT_REG_IDX_AR0);
1404 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
1405 xtensa->scratch_ars[s].intval = false;
1408 /* We have used A3 (XT_REG_RELGEN) as a scratch register. Restore and flag for write-back. */
1409 xtensa_reg_set(target, XT_REG_IDX_A3, a3);
1410 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1411 if (xtensa->core_config->core_type == XT_NX) {
1412 xtensa_reg_set(target, XT_REG_IDX_A0, a0);
1413 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A0);
1416 xtensa->regs_fetched = true;
1417 xtensa_fetch_all_regs_done:
1418 free(regvals);
1419 free(dsrs);
1420 return res;
1423 int xtensa_get_gdb_reg_list(struct target *target,
1424 struct reg **reg_list[],
1425 int *reg_list_size,
1426 enum target_register_class reg_class)
1428 struct xtensa *xtensa = target_to_xtensa(target);
1429 unsigned int num_regs;
1431 if (reg_class == REG_CLASS_GENERAL) {
1432 if ((xtensa->genpkt_regs_num == 0) || !xtensa->contiguous_regs_list) {
1433 LOG_ERROR("reg_class %d unhandled; 'xtgregs' not found", reg_class);
1434 return ERROR_FAIL;
1436 num_regs = xtensa->genpkt_regs_num;
1437 } else {
1438 /* Determine whether to return a contiguous or sparse register map */
1439 num_regs = xtensa->regmap_contiguous ? xtensa->total_regs_num : xtensa->dbregs_num;
1442 LOG_DEBUG("reg_class=%i, num_regs=%d", (int)reg_class, num_regs);
1444 *reg_list = calloc(num_regs, sizeof(struct reg *));
1445 if (!*reg_list)
1446 return ERROR_FAIL;
1448 *reg_list_size = num_regs;
1449 if (xtensa->regmap_contiguous) {
1450 assert((num_regs <= xtensa->total_regs_num) && "contiguous regmap size internal error!");
1451 for (unsigned int i = 0; i < num_regs; i++)
1452 (*reg_list)[i] = xtensa->contiguous_regs_list[i];
1453 return ERROR_OK;
1456 for (unsigned int i = 0; i < num_regs; i++)
1457 (*reg_list)[i] = (struct reg *)&xtensa->empty_regs[i];
1458 unsigned int k = 0;
1459 for (unsigned int i = 0; i < xtensa->core_cache->num_regs && k < num_regs; i++) {
1460 if (xtensa->core_cache->reg_list[i].exist) {
1461 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1462 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1463 int sparse_idx = rlist[ridx].dbreg_num;
1464 if (i == XT_REG_IDX_PS && xtensa->core_config->core_type == XT_LX) {
1465 if (xtensa->eps_dbglevel_idx == 0) {
1466 LOG_ERROR("eps_dbglevel_idx not set\n");
1467 return ERROR_FAIL;
1469 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx];
1470 if (xtensa_extra_debug_log)
1471 LOG_DEBUG("SPARSE GDB reg 0x%x getting EPS%d 0x%x",
1472 sparse_idx, xtensa->core_config->debug.irq_level,
1473 xtensa_reg_get_value((*reg_list)[sparse_idx]));
1474 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1475 (*reg_list)[sparse_idx - XT_REG_IDX_ARFIRST] = &xtensa->core_cache->reg_list[i];
1476 } else {
1477 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[i];
1479 if (i == XT_REG_IDX_PC)
1480 /* Make a duplicate copy of PC for external access */
1481 (*reg_list)[XT_PC_DBREG_NUM_BASE] = &xtensa->core_cache->reg_list[i];
1482 k++;
1486 if (k == num_regs)
1487 LOG_ERROR("SPARSE GDB reg list full (size %d)", k);
1489 return ERROR_OK;
1492 int xtensa_mmu_is_enabled(struct target *target, int *enabled)
1494 struct xtensa *xtensa = target_to_xtensa(target);
1495 *enabled = xtensa->core_config->mmu.itlb_entries_count > 0 ||
1496 xtensa->core_config->mmu.dtlb_entries_count > 0;
1497 return ERROR_OK;
1500 int xtensa_halt(struct target *target)
1502 struct xtensa *xtensa = target_to_xtensa(target);
1504 LOG_TARGET_DEBUG(target, "start");
1505 if (target->state == TARGET_HALTED) {
1506 LOG_TARGET_DEBUG(target, "target was already halted");
1507 return ERROR_OK;
1509 /* First we have to read dsr and check if the target stopped */
1510 int res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1511 if (res != ERROR_OK) {
1512 LOG_TARGET_ERROR(target, "Failed to read core status!");
1513 return res;
1515 LOG_TARGET_DEBUG(target, "Core status 0x%" PRIx32, xtensa_dm_core_status_get(&xtensa->dbg_mod));
1516 if (!xtensa_is_stopped(target)) {
1517 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
1518 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1519 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1520 if (res != ERROR_OK)
1521 LOG_TARGET_ERROR(target, "Failed to set OCDDCR_DEBUGINTERRUPT. Can't halt.");
1524 return res;
1527 int xtensa_prepare_resume(struct target *target,
1528 int current,
1529 target_addr_t address,
1530 int handle_breakpoints,
1531 int debug_execution)
1533 struct xtensa *xtensa = target_to_xtensa(target);
1534 uint32_t bpena = 0;
1536 LOG_TARGET_DEBUG(target,
1537 "current=%d address=" TARGET_ADDR_FMT ", handle_breakpoints=%i, debug_execution=%i)",
1538 current,
1539 address,
1540 handle_breakpoints,
1541 debug_execution);
1543 if (target->state != TARGET_HALTED) {
1544 LOG_TARGET_WARNING(target, "target not halted");
1545 return ERROR_TARGET_NOT_HALTED;
1548 if (address && !current) {
1549 xtensa_reg_set(target, XT_REG_IDX_PC, address);
1550 } else {
1551 uint32_t cause = xtensa_cause_get(target);
1552 LOG_TARGET_DEBUG(target, "DEBUGCAUSE 0x%x (watchpoint %lu) (break %lu)",
1553 cause, (cause & DEBUGCAUSE_DB), (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)));
1554 if (cause & DEBUGCAUSE_DB)
1555 /* We stopped due to a watchpoint. We can't just resume executing the
1556 * instruction again because */
1557 /* that would trigger the watchpoint again. To fix this, we single-step,
1558 * which ignores watchpoints. */
1559 xtensa_do_step(target, current, address, handle_breakpoints);
1560 if (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))
1561 /* We stopped due to a break instruction. We can't just resume executing the
1562 * instruction again because */
1563 /* that would trigger the break again. To fix this, we single-step, which
1564 * ignores break. */
1565 xtensa_do_step(target, current, address, handle_breakpoints);
1568 /* Write back hw breakpoints. Current FreeRTOS SMP code can set a hw breakpoint on an
1569 * exception; we need to clear that and return to the breakpoints gdb has set on resume. */
1570 for (unsigned int slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1571 if (xtensa->hw_brps[slot]) {
1572 /* Write IBREAKA[slot] and set bit #slot in IBREAKENABLE */
1573 xtensa_reg_set(target, XT_REG_IDX_IBREAKA0 + slot, xtensa->hw_brps[slot]->address);
1574 if (xtensa->core_config->core_type == XT_NX)
1575 xtensa_reg_set(target, xtensa->nx_reg_idx[XT_NX_REG_IDX_IBREAKC0] + slot, XT_IBREAKC_FB);
1576 bpena |= BIT(slot);
1579 if (xtensa->core_config->core_type == XT_LX)
1580 xtensa_reg_set(target, XT_REG_IDX_IBREAKENABLE, bpena);
1582 /* Here we write all registers to the targets */
1583 int res = xtensa_write_dirty_registers(target);
1584 if (res != ERROR_OK)
1585 LOG_TARGET_ERROR(target, "Failed to write back register cache.");
1586 return res;
1589 int xtensa_do_resume(struct target *target)
1591 struct xtensa *xtensa = target_to_xtensa(target);
1593 LOG_TARGET_DEBUG(target, "start");
1595 xtensa_cause_reset(target);
1596 xtensa_queue_exec_ins(xtensa, XT_INS_RFDO(xtensa));
1597 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1598 if (res != ERROR_OK) {
1599 LOG_TARGET_ERROR(target, "Failed to exec RFDO %d!", res);
1600 return res;
1602 xtensa_core_status_check(target);
1603 return ERROR_OK;
1606 int xtensa_resume(struct target *target,
1607 int current,
1608 target_addr_t address,
1609 int handle_breakpoints,
1610 int debug_execution)
1612 LOG_TARGET_DEBUG(target, "start");
1613 int res = xtensa_prepare_resume(target, current, address, handle_breakpoints, debug_execution);
1614 if (res != ERROR_OK) {
1615 LOG_TARGET_ERROR(target, "Failed to prepare for resume!");
1616 return res;
1618 res = xtensa_do_resume(target);
1619 if (res != ERROR_OK) {
1620 LOG_TARGET_ERROR(target, "Failed to resume!");
1621 return res;
1624 target->debug_reason = DBG_REASON_NOTHALTED;
1625 if (!debug_execution)
1626 target->state = TARGET_RUNNING;
1627 else
1628 target->state = TARGET_DEBUG_RUNNING;
1630 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1632 return ERROR_OK;
1635 static bool xtensa_pc_in_winexc(struct target *target, target_addr_t pc)
1637 struct xtensa *xtensa = target_to_xtensa(target);
1638 uint8_t insn_buf[XT_ISNS_SZ_MAX];
1639 int err = xtensa_read_buffer(target, pc, sizeof(insn_buf), insn_buf);
1640 if (err != ERROR_OK)
1641 return false;
1643 xtensa_insn_t insn = buf_get_u32(insn_buf, 0, 24);
1644 xtensa_insn_t masked = insn & XT_INS_L32E_S32E_MASK(xtensa);
1645 if (masked == XT_INS_L32E(xtensa, 0, 0, 0) || masked == XT_INS_S32E(xtensa, 0, 0, 0))
1646 return true;
1648 masked = insn & XT_INS_RFWO_RFWU_MASK(xtensa);
1649 if (masked == XT_INS_RFWO(xtensa) || masked == XT_INS_RFWU(xtensa))
1650 return true;
1652 return false;
1655 int xtensa_do_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1657 struct xtensa *xtensa = target_to_xtensa(target);
1658 int res;
1659 const uint32_t icount_val = -2; /* ICOUNT value to load for 1 step */
1660 xtensa_reg_val_t dbreakc[XT_WATCHPOINTS_NUM_MAX];
1661 xtensa_reg_val_t icountlvl, cause;
1662 xtensa_reg_val_t oldps, oldpc, cur_pc;
1663 bool ps_lowered = false;
1665 LOG_TARGET_DEBUG(target, "current=%d, address=" TARGET_ADDR_FMT ", handle_breakpoints=%i",
1666 current, address, handle_breakpoints);
1668 if (target->state != TARGET_HALTED) {
1669 LOG_TARGET_WARNING(target, "target not halted");
1670 return ERROR_TARGET_NOT_HALTED;
1673 if (xtensa->eps_dbglevel_idx == 0 && xtensa->core_config->core_type == XT_LX) {
1674 LOG_TARGET_ERROR(target, "eps_dbglevel_idx not set\n");
1675 return ERROR_FAIL;
1678 /* Save old ps (EPS[dbglvl] on LX), pc */
1679 oldps = xtensa_reg_get(target, (xtensa->core_config->core_type == XT_LX) ?
1680 xtensa->eps_dbglevel_idx : XT_REG_IDX_PS);
1681 oldpc = xtensa_reg_get(target, XT_REG_IDX_PC);
1683 cause = xtensa_cause_get(target);
1684 LOG_TARGET_DEBUG(target, "oldps=%" PRIx32 ", oldpc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1685 oldps,
1686 oldpc,
1687 cause,
1688 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1689 if (handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1690 /* handle hard-coded SW breakpoints (e.g. syscalls) */
1691 LOG_TARGET_DEBUG(target, "Increment PC to pass break instruction...");
1692 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1693 /* pretend that we have stepped */
1694 if (cause & DEBUGCAUSE_BI)
1695 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 3); /* PC = PC+3 */
1696 else
1697 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 2); /* PC = PC+2 */
1698 return ERROR_OK;
1701 /* Xtensa LX has an ICOUNTLEVEL register which sets the maximum interrupt level
1702 * at which the instructions are to be counted while stepping.
1704 * For example, if we need to step by 2 instructions, and an interrupt occurs
1705 * in between, the processor will trigger the interrupt and halt after the 2nd
1706 * instruction within the interrupt vector and/or handler.
1708 * However, sometimes we don't want the interrupt handlers to be executed at all
1709 * while stepping through the code. In this case (XT_STEPPING_ISR_OFF),
1710 * ICOUNTLEVEL can be lowered to the executing code's (level + 1) to prevent ISR
1711 * code from being counted during stepping. Note that C exception handlers must
1712 * run at level 0 and hence will be counted and stepped into, should one occur.
1714 * TODO: Certain instructions should never be single-stepped and should instead
1715 * be emulated (per DUG): RSIL >= DBGLEVEL, RSR/WSR [ICOUNT|ICOUNTLEVEL], and
1716 * RFI >= DBGLEVEL.
1718 if (xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF) {
1719 if (!xtensa->core_config->high_irq.enabled) {
1720 LOG_TARGET_WARNING(
1721 target,
1722 "disabling IRQs while stepping is not implemented w/o high prio IRQs option!");
1723 return ERROR_FAIL;
1725 /* Update ICOUNTLEVEL accordingly */
1726 icountlvl = MIN((oldps & 0xF) + 1, xtensa->core_config->debug.irq_level);
1727 } else {
1728 icountlvl = xtensa->core_config->debug.irq_level;
1731 if (cause & DEBUGCAUSE_DB) {
1732 /* We stopped due to a watchpoint. We can't just resume executing the instruction again because
1733 * that would trigger the watchpoint again. To fix this, we remove watchpoints,single-step and
1734 * re-enable the watchpoint. */
1735 LOG_TARGET_DEBUG(
1736 target,
1737 "Single-stepping to get past instruction that triggered the watchpoint...");
1738 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1739 /* Save all DBREAKCx registers and set to 0 to disable watchpoints */
1740 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
1741 dbreakc[slot] = xtensa_reg_get(target, XT_REG_IDX_DBREAKC0 + slot);
1742 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
1746 if (!handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)))
1747 /* handle normal SW breakpoint */
1748 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1749 if (xtensa->core_config->core_type == XT_LX && ((oldps & 0xf) >= icountlvl)) {
1750 /* Lower interrupt level to allow stepping, but flag eps[dbglvl] to be restored */
1751 ps_lowered = true;
1752 uint32_t newps = (oldps & ~0xf) | (icountlvl - 1);
1753 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, newps);
1754 LOG_TARGET_DEBUG(target,
1755 "Lowering PS.INTLEVEL to allow stepping: %s <- 0x%08" PRIx32 " (was 0x%08" PRIx32 ")",
1756 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1757 newps,
1758 oldps);
1760 do {
1761 if (xtensa->core_config->core_type == XT_LX) {
1762 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, icountlvl);
1763 xtensa_reg_set(target, XT_REG_IDX_ICOUNT, icount_val);
1764 } else {
1765 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, OCDDCR_STEPREQUEST);
1768 /* Now that ICOUNT (LX) or DCR.StepRequest (NX) is set,
1769 * we can resume as if we were going to run
1771 res = xtensa_prepare_resume(target, current, address, 0, 0);
1772 if (res != ERROR_OK) {
1773 LOG_TARGET_ERROR(target, "Failed to prepare resume for single step");
1774 return res;
1776 res = xtensa_do_resume(target);
1777 if (res != ERROR_OK) {
1778 LOG_TARGET_ERROR(target, "Failed to resume after setting up single step");
1779 return res;
1782 /* Wait for stepping to complete */
1783 long long start = timeval_ms();
1784 while (timeval_ms() < start + 500) {
1785 /* Do not use target_poll here, it also triggers other things... just manually read the DSR
1786 *until stepping is complete. */
1787 usleep(1000);
1788 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1789 if (res != ERROR_OK) {
1790 LOG_TARGET_ERROR(target, "Failed to read core status!");
1791 return res;
1793 if (xtensa_is_stopped(target))
1794 break;
1795 usleep(1000);
1797 LOG_TARGET_DEBUG(target, "Finish stepping. dsr=0x%08" PRIx32,
1798 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1799 if (!xtensa_is_stopped(target)) {
1800 LOG_TARGET_WARNING(
1801 target,
1802 "Timed out waiting for target to finish stepping. dsr=0x%08" PRIx32,
1803 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1804 target->debug_reason = DBG_REASON_NOTHALTED;
1805 target->state = TARGET_RUNNING;
1806 return ERROR_FAIL;
1809 xtensa_fetch_all_regs(target);
1810 cur_pc = xtensa_reg_get(target, XT_REG_IDX_PC);
1812 LOG_TARGET_DEBUG(target,
1813 "cur_ps=%" PRIx32 ", cur_pc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1814 xtensa_reg_get(target, XT_REG_IDX_PS),
1815 cur_pc,
1816 xtensa_cause_get(target),
1817 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1819 /* Do not step into WindowOverflow if ISRs are masked.
1820 If we stop in WindowOverflow at breakpoint with masked ISRs and
1821 try to do a step it will get us out of that handler */
1822 if (xtensa->core_config->windowed &&
1823 xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF &&
1824 xtensa_pc_in_winexc(target, cur_pc)) {
1825 /* isrmask = on, need to step out of the window exception handler */
1826 LOG_DEBUG("Stepping out of window exception, PC=%" PRIX32, cur_pc);
1827 oldpc = cur_pc;
1828 address = oldpc + 3;
1829 continue;
1832 if (oldpc == cur_pc)
1833 LOG_TARGET_WARNING(target, "Stepping doesn't seem to change PC! dsr=0x%08" PRIx32,
1834 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1835 else
1836 LOG_DEBUG("Stepped from %" PRIX32 " to %" PRIX32, oldpc, cur_pc);
1837 break;
1838 } while (true);
1840 target->debug_reason = DBG_REASON_SINGLESTEP;
1841 target->state = TARGET_HALTED;
1842 LOG_DEBUG("Done stepping, PC=%" PRIX32, cur_pc);
1844 if (cause & DEBUGCAUSE_DB) {
1845 LOG_TARGET_DEBUG(target, "...Done, re-installing watchpoints.");
1846 /* Restore the DBREAKCx registers */
1847 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++)
1848 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakc[slot]);
1851 /* Restore int level */
1852 if (ps_lowered) {
1853 LOG_DEBUG("Restoring %s after stepping: 0x%08" PRIx32,
1854 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1855 oldps);
1856 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, oldps);
1859 /* write ICOUNTLEVEL back to zero */
1860 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, 0);
1861 /* TODO: can we skip writing dirty registers and re-fetching them? */
1862 res = xtensa_write_dirty_registers(target);
1863 xtensa_fetch_all_regs(target);
1864 return res;
1867 int xtensa_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1869 int retval = xtensa_do_step(target, current, address, handle_breakpoints);
1870 if (retval != ERROR_OK)
1871 return retval;
1872 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1874 return ERROR_OK;
1878 * Returns true if two ranges are overlapping
1880 static inline bool xtensa_memory_regions_overlap(target_addr_t r1_start,
1881 target_addr_t r1_end,
1882 target_addr_t r2_start,
1883 target_addr_t r2_end)
1885 if ((r2_start >= r1_start) && (r2_start < r1_end))
1886 return true; /* r2_start is in r1 region */
1887 if ((r2_end > r1_start) && (r2_end <= r1_end))
1888 return true; /* r2_end is in r1 region */
1889 return false;
1893 * Returns a size of overlapped region of two ranges.
1895 static inline target_addr_t xtensa_get_overlap_size(target_addr_t r1_start,
1896 target_addr_t r1_end,
1897 target_addr_t r2_start,
1898 target_addr_t r2_end)
1900 if (xtensa_memory_regions_overlap(r1_start, r1_end, r2_start, r2_end)) {
1901 target_addr_t ov_start = r1_start < r2_start ? r2_start : r1_start;
1902 target_addr_t ov_end = r1_end > r2_end ? r2_end : r1_end;
1903 return ov_end - ov_start;
1905 return 0;
1909 * Check if the address gets to memory regions, and its access mode
1911 static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
1913 target_addr_t adr_pos = address; /* address cursor set to the beginning start */
1914 target_addr_t adr_end = address + size; /* region end */
1915 target_addr_t overlap_size;
1916 const struct xtensa_local_mem_region_config *cm; /* current mem region */
1918 while (adr_pos < adr_end) {
1919 cm = xtensa_target_memory_region_find(xtensa, adr_pos);
1920 if (!cm) /* address is not belong to anything */
1921 return false;
1922 if ((cm->access & access) != access) /* access check */
1923 return false;
1924 overlap_size = xtensa_get_overlap_size(cm->base, (cm->base + cm->size), adr_pos, adr_end);
1925 assert(overlap_size != 0);
1926 adr_pos += overlap_size;
1928 return true;
1931 int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1933 struct xtensa *xtensa = target_to_xtensa(target);
1934 /* We are going to read memory in 32-bit increments. This may not be what the calling
1935 * function expects, so we may need to allocate a temp buffer and read into that first. */
1936 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1937 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1938 target_addr_t adr = addrstart_al;
1939 uint8_t *albuff;
1940 bool bswap = xtensa->target->endianness == TARGET_BIG_ENDIAN;
1942 if (target->state != TARGET_HALTED) {
1943 LOG_TARGET_WARNING(target, "target not halted");
1944 return ERROR_TARGET_NOT_HALTED;
1947 if (!xtensa->permissive_mode) {
1948 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count),
1949 XT_MEM_ACCESS_READ)) {
1950 LOG_DEBUG("address " TARGET_ADDR_FMT " not readable", address);
1951 return ERROR_FAIL;
1955 unsigned int alloc_bytes = ALIGN_UP(addrend_al - addrstart_al, sizeof(uint32_t));
1956 albuff = calloc(alloc_bytes, 1);
1957 if (!albuff) {
1958 LOG_TARGET_ERROR(target, "Out of memory allocating %" PRId64 " bytes!",
1959 addrend_al - addrstart_al);
1960 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1963 /* We're going to use A3 here */
1964 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1965 /* Write start address to A3 */
1966 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
1967 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1968 /* Now we can safely read data from addrstart_al up to addrend_al into albuff */
1969 if (xtensa->probe_lsddr32p != 0) {
1970 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1971 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t))
1972 xtensa_queue_dbg_reg_read(xtensa,
1973 (adr + sizeof(uint32_t) == addrend_al) ? XDMREG_DDR : XDMREG_DDREXEC,
1974 &albuff[i]);
1975 } else {
1976 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
1977 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1978 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
1979 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A4));
1980 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, &albuff[i]);
1981 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
1982 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1985 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1986 if (res == ERROR_OK) {
1987 bool prev_suppress = xtensa->suppress_dsr_errors;
1988 xtensa->suppress_dsr_errors = true;
1989 res = xtensa_core_status_check(target);
1990 if (xtensa->probe_lsddr32p == -1)
1991 xtensa->probe_lsddr32p = 1;
1992 xtensa->suppress_dsr_errors = prev_suppress;
1994 if (res != ERROR_OK) {
1995 if (xtensa->probe_lsddr32p != 0) {
1996 /* Disable fast memory access instructions and retry before reporting an error */
1997 LOG_TARGET_DEBUG(target, "Disabling LDDR32.P/SDDR32.P");
1998 xtensa->probe_lsddr32p = 0;
1999 res = xtensa_read_memory(target, address, size, count, albuff);
2000 bswap = false;
2001 } else {
2002 LOG_TARGET_WARNING(target, "Failed reading %d bytes at address "TARGET_ADDR_FMT,
2003 count * size, address);
2007 if (bswap)
2008 buf_bswap32(albuff, albuff, addrend_al - addrstart_al);
2009 memcpy(buffer, albuff + (address & 3), (size * count));
2010 free(albuff);
2011 return res;
2014 int xtensa_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2016 /* xtensa_read_memory can also read unaligned stuff. Just pass through to that routine. */
2017 return xtensa_read_memory(target, address, 1, count, buffer);
2020 int xtensa_write_memory(struct target *target,
2021 target_addr_t address,
2022 uint32_t size,
2023 uint32_t count,
2024 const uint8_t *buffer)
2026 /* This memory write function can get thrown nigh everything into it, from
2027 * aligned uint32 writes to unaligned uint8ths. The Xtensa memory doesn't always
2028 * accept anything but aligned uint32 writes, though. That is why we convert
2029 * everything into that. */
2030 struct xtensa *xtensa = target_to_xtensa(target);
2031 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
2032 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
2033 target_addr_t adr = addrstart_al;
2034 int res;
2035 uint8_t *albuff;
2036 bool fill_head_tail = false;
2038 if (target->state != TARGET_HALTED) {
2039 LOG_TARGET_WARNING(target, "target not halted");
2040 return ERROR_TARGET_NOT_HALTED;
2043 if (!xtensa->permissive_mode) {
2044 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count), XT_MEM_ACCESS_WRITE)) {
2045 LOG_WARNING("address " TARGET_ADDR_FMT " not writable", address);
2046 return ERROR_FAIL;
2050 if (size == 0 || count == 0 || !buffer)
2051 return ERROR_COMMAND_SYNTAX_ERROR;
2053 /* Allocate a temporary buffer to put the aligned bytes in, if needed. */
2054 if (addrstart_al == address && addrend_al == address + (size * count)) {
2055 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
2056 /* Need a buffer for byte-swapping */
2057 albuff = malloc(addrend_al - addrstart_al);
2058 else
2059 /* We discard the const here because albuff can also be non-const */
2060 albuff = (uint8_t *)buffer;
2061 } else {
2062 fill_head_tail = true;
2063 albuff = malloc(addrend_al - addrstart_al);
2065 if (!albuff) {
2066 LOG_TARGET_ERROR(target, "Out of memory allocating %" PRId64 " bytes!",
2067 addrend_al - addrstart_al);
2068 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2071 /* We're going to use A3 here */
2072 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
2074 /* If we're using a temp aligned buffer, we need to fill the head and/or tail bit of it. */
2075 if (fill_head_tail) {
2076 /* See if we need to read the first and/or last word. */
2077 if (address & 3) {
2078 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
2079 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2080 if (xtensa->probe_lsddr32p == 1) {
2081 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
2082 } else {
2083 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
2084 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
2086 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, &albuff[0]);
2088 if ((address + (size * count)) & 3) {
2089 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrend_al - 4);
2090 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2091 if (xtensa->probe_lsddr32p == 1) {
2092 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
2093 } else {
2094 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
2095 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
2097 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR,
2098 &albuff[addrend_al - addrstart_al - 4]);
2100 /* Grab bytes */
2101 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2102 if (res != ERROR_OK) {
2103 LOG_ERROR("Error issuing unaligned memory write context instruction(s): %d", res);
2104 if (albuff != buffer)
2105 free(albuff);
2106 return res;
2108 xtensa_core_status_check(target);
2109 if (xtensa->target->endianness == TARGET_BIG_ENDIAN) {
2110 bool swapped_w0 = false;
2111 if (address & 3) {
2112 buf_bswap32(&albuff[0], &albuff[0], 4);
2113 swapped_w0 = true;
2115 if ((address + (size * count)) & 3) {
2116 if ((addrend_al - addrstart_al - 4 == 0) && swapped_w0) {
2117 /* Don't double-swap if buffer start/end are within the same word */
2118 } else {
2119 buf_bswap32(&albuff[addrend_al - addrstart_al - 4],
2120 &albuff[addrend_al - addrstart_al - 4], 4);
2124 /* Copy data to be written into the aligned buffer (in host-endianness) */
2125 memcpy(&albuff[address & 3], buffer, size * count);
2126 /* Now we can write albuff in aligned uint32s. */
2129 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
2130 buf_bswap32(albuff, fill_head_tail ? albuff : buffer, addrend_al - addrstart_al);
2132 /* Write start address to A3 */
2133 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
2134 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2135 /* Write the aligned buffer */
2136 if (xtensa->probe_lsddr32p != 0) {
2137 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
2138 if (i == 0) {
2139 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, buf_get_u32(&albuff[i], 0, 32));
2140 xtensa_queue_exec_ins(xtensa, XT_INS_SDDR32P(xtensa, XT_REG_A3));
2141 } else {
2142 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDREXEC, buf_get_u32(&albuff[i], 0, 32));
2145 } else {
2146 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
2147 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
2148 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, buf_get_u32(&albuff[i], 0, 32));
2149 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2150 xtensa_queue_exec_ins(xtensa, XT_INS_S32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
2151 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
2152 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2156 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2157 if (res == ERROR_OK) {
2158 bool prev_suppress = xtensa->suppress_dsr_errors;
2159 xtensa->suppress_dsr_errors = true;
2160 res = xtensa_core_status_check(target);
2161 if (xtensa->probe_lsddr32p == -1)
2162 xtensa->probe_lsddr32p = 1;
2163 xtensa->suppress_dsr_errors = prev_suppress;
2165 if (res != ERROR_OK) {
2166 if (xtensa->probe_lsddr32p != 0) {
2167 /* Disable fast memory access instructions and retry before reporting an error */
2168 LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
2169 xtensa->probe_lsddr32p = 0;
2170 res = xtensa_write_memory(target, address, size, count, buffer);
2171 } else {
2172 LOG_TARGET_WARNING(target, "Failed writing %d bytes at address "TARGET_ADDR_FMT,
2173 count * size, address);
2175 } else {
2176 /* Invalidate ICACHE, writeback DCACHE if present */
2177 uint32_t issue_ihi = xtensa_is_icacheable(xtensa, address);
2178 uint32_t issue_dhwb = xtensa_is_dcacheable(xtensa, address);
2179 if (issue_ihi || issue_dhwb) {
2180 uint32_t ilinesize = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2181 uint32_t dlinesize = issue_dhwb ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2182 uint32_t linesize = MIN(ilinesize, dlinesize);
2183 uint32_t off = 0;
2184 adr = addrstart_al;
2186 while ((adr + off) < addrend_al) {
2187 if (off == 0) {
2188 /* Write start address to A3 */
2189 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr);
2190 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2192 if (issue_ihi)
2193 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, off));
2194 if (issue_dhwb)
2195 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, off));
2196 off += linesize;
2197 if (off > 1020) {
2198 /* IHI, DHWB have 8-bit immediate operands (0..1020) */
2199 adr += off;
2200 off = 0;
2204 /* Execute cache WB/INV instructions */
2205 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2206 xtensa_core_status_check(target);
2207 if (res != ERROR_OK)
2208 LOG_TARGET_ERROR(target,
2209 "Error issuing cache writeback/invaldate instruction(s): %d",
2210 res);
2213 if (albuff != buffer)
2214 free(albuff);
2216 return res;
2219 int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
2221 /* xtensa_write_memory can handle everything. Just pass on to that. */
2222 return xtensa_write_memory(target, address, 1, count, buffer);
2225 int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
2227 LOG_WARNING("not implemented yet");
2228 return ERROR_FAIL;
2231 int xtensa_poll(struct target *target)
2233 struct xtensa *xtensa = target_to_xtensa(target);
2234 if (xtensa_dm_poll(&xtensa->dbg_mod) != ERROR_OK) {
2235 target->state = TARGET_UNKNOWN;
2236 return ERROR_TARGET_NOT_EXAMINED;
2239 int res = xtensa_dm_power_status_read(&xtensa->dbg_mod, PWRSTAT_DEBUGWASRESET(xtensa) |
2240 PWRSTAT_COREWASRESET(xtensa));
2241 if (xtensa->dbg_mod.power_status.stat != xtensa->dbg_mod.power_status.stath)
2242 LOG_TARGET_DEBUG(target, "PWRSTAT: read 0x%08" PRIx32 ", clear 0x%08lx, reread 0x%08" PRIx32,
2243 xtensa->dbg_mod.power_status.stat,
2244 PWRSTAT_DEBUGWASRESET(xtensa) | PWRSTAT_COREWASRESET(xtensa),
2245 xtensa->dbg_mod.power_status.stath);
2246 if (res != ERROR_OK)
2247 return res;
2249 if (xtensa_dm_tap_was_reset(&xtensa->dbg_mod)) {
2250 LOG_TARGET_INFO(target, "Debug controller was reset.");
2251 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
2252 if (res != ERROR_OK)
2253 return res;
2255 if (xtensa_dm_core_was_reset(&xtensa->dbg_mod))
2256 LOG_TARGET_INFO(target, "Core was reset.");
2257 xtensa_dm_power_status_cache(&xtensa->dbg_mod);
2258 /* Enable JTAG, set reset if needed */
2259 res = xtensa_wakeup(target);
2260 if (res != ERROR_OK)
2261 return res;
2263 uint32_t prev_dsr = xtensa->dbg_mod.core_status.dsr;
2264 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
2265 if (res != ERROR_OK)
2266 return res;
2267 if (prev_dsr != xtensa->dbg_mod.core_status.dsr)
2268 LOG_TARGET_DEBUG(target,
2269 "DSR has changed: was 0x%08" PRIx32 " now 0x%08" PRIx32,
2270 prev_dsr,
2271 xtensa->dbg_mod.core_status.dsr);
2272 if (xtensa->dbg_mod.power_status.stath & PWRSTAT_COREWASRESET(xtensa)) {
2273 /* if RESET state is persitent */
2274 target->state = TARGET_RESET;
2275 } else if (!xtensa_dm_is_powered(&xtensa->dbg_mod)) {
2276 LOG_TARGET_DEBUG(target, "not powered 0x%" PRIX32 "%ld",
2277 xtensa->dbg_mod.core_status.dsr,
2278 xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED);
2279 target->state = TARGET_UNKNOWN;
2280 if (xtensa->come_online_probes_num == 0)
2281 target->examined = false;
2282 else
2283 xtensa->come_online_probes_num--;
2284 } else if (xtensa_is_stopped(target)) {
2285 if (target->state != TARGET_HALTED) {
2286 enum target_state oldstate = target->state;
2287 target->state = TARGET_HALTED;
2288 /* Examine why the target has been halted */
2289 target->debug_reason = DBG_REASON_DBGRQ;
2290 xtensa_fetch_all_regs(target);
2291 /* When setting debug reason DEBUGCAUSE events have the following
2292 * priorities: watchpoint == breakpoint > single step > debug interrupt. */
2293 /* Watchpoint and breakpoint events at the same time results in special
2294 * debug reason: DBG_REASON_WPTANDBKPT. */
2295 uint32_t halt_cause = xtensa_cause_get(target);
2296 /* TODO: Add handling of DBG_REASON_EXC_CATCH */
2297 if (halt_cause & DEBUGCAUSE_IC)
2298 target->debug_reason = DBG_REASON_SINGLESTEP;
2299 if (halt_cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BN | DEBUGCAUSE_BI)) {
2300 if (halt_cause & DEBUGCAUSE_DB)
2301 target->debug_reason = DBG_REASON_WPTANDBKPT;
2302 else
2303 target->debug_reason = DBG_REASON_BREAKPOINT;
2304 } else if (halt_cause & DEBUGCAUSE_DB) {
2305 target->debug_reason = DBG_REASON_WATCHPOINT;
2307 LOG_TARGET_DEBUG(target, "Target halted, pc=0x%08" PRIx32
2308 ", debug_reason=%08" PRIx32 ", oldstate=%08" PRIx32,
2309 xtensa_reg_get(target, XT_REG_IDX_PC),
2310 target->debug_reason,
2311 oldstate);
2312 LOG_TARGET_DEBUG(target, "Halt reason=0x%08" PRIX32 ", exc_cause=%" PRId32 ", dsr=0x%08" PRIx32,
2313 halt_cause,
2314 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE),
2315 xtensa->dbg_mod.core_status.dsr);
2316 xtensa_dm_core_status_clear(
2317 &xtensa->dbg_mod,
2318 OCDDSR_DEBUGPENDBREAK | OCDDSR_DEBUGINTBREAK | OCDDSR_DEBUGPENDTRAX |
2319 OCDDSR_DEBUGINTTRAX |
2320 OCDDSR_DEBUGPENDHOST | OCDDSR_DEBUGINTHOST);
2321 if (xtensa->core_config->core_type == XT_NX) {
2322 /* Enable imprecise exceptions while in halted state */
2323 xtensa_reg_val_t ps = xtensa_reg_get(target, XT_REG_IDX_PS);
2324 xtensa_reg_val_t newps = ps & ~(XT_PS_DIEXC_MSK);
2325 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_PS);
2326 LOG_TARGET_DEBUG(target, "Enabling PS.DIEXC: 0x%08x -> 0x%08x", ps, newps);
2327 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, newps);
2328 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2329 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_PS, XT_REG_A3));
2330 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2331 if (res != ERROR_OK) {
2332 LOG_TARGET_ERROR(target, "Failed to write PS.DIEXC (%d)!", res);
2333 return res;
2335 xtensa_core_status_check(target);
2338 } else {
2339 target->debug_reason = DBG_REASON_NOTHALTED;
2340 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
2341 target->state = TARGET_RUNNING;
2342 target->debug_reason = DBG_REASON_NOTHALTED;
2345 if (xtensa->trace_active) {
2346 /* Detect if tracing was active but has stopped. */
2347 struct xtensa_trace_status trace_status;
2348 res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
2349 if (res == ERROR_OK) {
2350 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
2351 LOG_INFO("Detected end of trace.");
2352 if (trace_status.stat & TRAXSTAT_PCMTG)
2353 LOG_TARGET_INFO(target, "Trace stop triggered by PC match");
2354 if (trace_status.stat & TRAXSTAT_PTITG)
2355 LOG_TARGET_INFO(target, "Trace stop triggered by Processor Trigger Input");
2356 if (trace_status.stat & TRAXSTAT_CTITG)
2357 LOG_TARGET_INFO(target, "Trace stop triggered by Cross-trigger Input");
2358 xtensa->trace_active = false;
2362 return ERROR_OK;
2365 static int xtensa_update_instruction(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2367 struct xtensa *xtensa = target_to_xtensa(target);
2368 unsigned int issue_ihi = xtensa_is_icacheable(xtensa, address);
2369 unsigned int issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
2370 uint32_t icache_line_size = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2371 uint32_t dcache_line_size = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2372 unsigned int same_ic_line = ((address & (icache_line_size - 1)) + size) <= icache_line_size;
2373 unsigned int same_dc_line = ((address & (dcache_line_size - 1)) + size) <= dcache_line_size;
2374 int ret;
2376 if (size > icache_line_size)
2377 return ERROR_FAIL;
2379 if (issue_ihi || issue_dhwbi) {
2380 /* We're going to use A3 here */
2381 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
2383 /* Write start address to A3 and invalidate */
2384 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, address);
2385 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2386 LOG_TARGET_DEBUG(target, "DHWBI, IHI for address "TARGET_ADDR_FMT, address);
2387 if (issue_dhwbi) {
2388 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 0));
2389 if (!same_dc_line) {
2390 LOG_TARGET_DEBUG(target,
2391 "DHWBI second dcache line for address "TARGET_ADDR_FMT,
2392 address + 4);
2393 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 4));
2396 if (issue_ihi) {
2397 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 0));
2398 if (!same_ic_line) {
2399 LOG_TARGET_DEBUG(target,
2400 "IHI second icache line for address "TARGET_ADDR_FMT,
2401 address + 4);
2402 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 4));
2406 /* Execute invalidate instructions */
2407 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2408 xtensa_core_status_check(target);
2409 if (ret != ERROR_OK) {
2410 LOG_ERROR("Error issuing cache invaldate instruction(s): %d", ret);
2411 return ret;
2415 /* Write new instructions to memory */
2416 ret = target_write_buffer(target, address, size, buffer);
2417 if (ret != ERROR_OK) {
2418 LOG_TARGET_ERROR(target, "Error writing instruction to memory: %d", ret);
2419 return ret;
2422 if (issue_dhwbi) {
2423 /* Flush dcache so instruction propagates. A3 may be corrupted during memory write */
2424 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, address);
2425 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2426 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 0));
2427 LOG_DEBUG("DHWB dcache line for address "TARGET_ADDR_FMT, address);
2428 if (!same_dc_line) {
2429 LOG_TARGET_DEBUG(target, "DHWB second dcache line for address "TARGET_ADDR_FMT, address + 4);
2430 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 4));
2433 /* Execute invalidate instructions */
2434 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2435 xtensa_core_status_check(target);
2438 /* TODO: Handle L2 cache if present */
2439 return ret;
2442 static int xtensa_sw_breakpoint_add(struct target *target,
2443 struct breakpoint *breakpoint,
2444 struct xtensa_sw_breakpoint *sw_bp)
2446 struct xtensa *xtensa = target_to_xtensa(target);
2447 int ret = target_read_buffer(target, breakpoint->address, XT_ISNS_SZ_MAX, sw_bp->insn);
2448 if (ret != ERROR_OK) {
2449 LOG_TARGET_ERROR(target, "Failed to read original instruction (%d)!", ret);
2450 return ret;
2453 sw_bp->insn_sz = MIN(XT_ISNS_SZ_MAX, breakpoint->length);
2454 sw_bp->oocd_bp = breakpoint;
2456 uint32_t break_insn = sw_bp->insn_sz == XT_ISNS_SZ_MAX ? XT_INS_BREAK(xtensa, 0, 0) : XT_INS_BREAKN(xtensa, 0);
2458 /* Underlying memory write will convert instruction endianness, don't do that here */
2459 ret = xtensa_update_instruction(target, breakpoint->address, sw_bp->insn_sz, (uint8_t *)&break_insn);
2460 if (ret != ERROR_OK) {
2461 LOG_TARGET_ERROR(target, "Failed to write breakpoint instruction (%d)!", ret);
2462 return ret;
2465 return ERROR_OK;
2468 static int xtensa_sw_breakpoint_remove(struct target *target, struct xtensa_sw_breakpoint *sw_bp)
2470 int ret = xtensa_update_instruction(target, sw_bp->oocd_bp->address, sw_bp->insn_sz, sw_bp->insn);
2471 if (ret != ERROR_OK) {
2472 LOG_TARGET_ERROR(target, "Failed to write insn (%d)!", ret);
2473 return ret;
2475 sw_bp->oocd_bp = NULL;
2476 return ERROR_OK;
2479 int xtensa_breakpoint_add(struct target *target, struct breakpoint *breakpoint)
2481 struct xtensa *xtensa = target_to_xtensa(target);
2482 unsigned int slot;
2484 if (breakpoint->type == BKPT_SOFT) {
2485 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2486 if (!xtensa->sw_brps[slot].oocd_bp ||
2487 xtensa->sw_brps[slot].oocd_bp == breakpoint)
2488 break;
2490 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2491 LOG_TARGET_WARNING(target, "No free slots to add SW breakpoint!");
2492 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2494 int ret = xtensa_sw_breakpoint_add(target, breakpoint, &xtensa->sw_brps[slot]);
2495 if (ret != ERROR_OK) {
2496 LOG_TARGET_ERROR(target, "Failed to add SW breakpoint!");
2497 return ret;
2499 LOG_TARGET_DEBUG(target, "placed SW breakpoint %u @ " TARGET_ADDR_FMT,
2500 slot,
2501 breakpoint->address);
2502 return ERROR_OK;
2505 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2506 if (!xtensa->hw_brps[slot] || xtensa->hw_brps[slot] == breakpoint)
2507 break;
2509 if (slot == xtensa->core_config->debug.ibreaks_num) {
2510 LOG_TARGET_ERROR(target, "No free slots to add HW breakpoint!");
2511 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2514 xtensa->hw_brps[slot] = breakpoint;
2515 /* We will actually write the breakpoints when we resume the target. */
2516 LOG_TARGET_DEBUG(target, "placed HW breakpoint %u @ " TARGET_ADDR_FMT,
2517 slot,
2518 breakpoint->address);
2520 return ERROR_OK;
2523 int xtensa_breakpoint_remove(struct target *target, struct breakpoint *breakpoint)
2525 struct xtensa *xtensa = target_to_xtensa(target);
2526 unsigned int slot;
2528 if (breakpoint->type == BKPT_SOFT) {
2529 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2530 if (xtensa->sw_brps[slot].oocd_bp && xtensa->sw_brps[slot].oocd_bp == breakpoint)
2531 break;
2533 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2534 LOG_TARGET_WARNING(target, "Max SW breakpoints slot reached, slot=%u!", slot);
2535 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2537 int ret = xtensa_sw_breakpoint_remove(target, &xtensa->sw_brps[slot]);
2538 if (ret != ERROR_OK) {
2539 LOG_TARGET_ERROR(target, "Failed to remove SW breakpoint (%d)!", ret);
2540 return ret;
2542 LOG_TARGET_DEBUG(target, "cleared SW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2543 return ERROR_OK;
2546 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2547 if (xtensa->hw_brps[slot] == breakpoint)
2548 break;
2550 if (slot == xtensa->core_config->debug.ibreaks_num) {
2551 LOG_TARGET_ERROR(target, "HW breakpoint not found!");
2552 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2554 xtensa->hw_brps[slot] = NULL;
2555 if (xtensa->core_config->core_type == XT_NX)
2556 xtensa_reg_set(target, xtensa->nx_reg_idx[XT_NX_REG_IDX_IBREAKC0] + slot, 0);
2557 LOG_TARGET_DEBUG(target, "cleared HW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2558 return ERROR_OK;
2561 int xtensa_watchpoint_add(struct target *target, struct watchpoint *watchpoint)
2563 struct xtensa *xtensa = target_to_xtensa(target);
2564 unsigned int slot;
2565 xtensa_reg_val_t dbreakcval;
2567 if (target->state != TARGET_HALTED) {
2568 LOG_TARGET_WARNING(target, "target not halted");
2569 return ERROR_TARGET_NOT_HALTED;
2572 if (watchpoint->mask != ~(uint32_t)0) {
2573 LOG_TARGET_ERROR(target, "watchpoint value masks not supported");
2574 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2577 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2578 if (!xtensa->hw_wps[slot] || xtensa->hw_wps[slot] == watchpoint)
2579 break;
2581 if (slot == xtensa->core_config->debug.dbreaks_num) {
2582 LOG_TARGET_WARNING(target, "No free slots to add HW watchpoint!");
2583 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2586 /* Figure out value for dbreakc5..0
2587 * It's basically 0x3F with an incremental bit removed from the LSB for each extra length power of 2. */
2588 if (watchpoint->length < 1 || watchpoint->length > 64 ||
2589 !IS_PWR_OF_2(watchpoint->length) ||
2590 !IS_ALIGNED(watchpoint->address, watchpoint->length)) {
2591 LOG_TARGET_WARNING(
2592 target,
2593 "Watchpoint with length %d on address " TARGET_ADDR_FMT
2594 " not supported by hardware.",
2595 watchpoint->length,
2596 watchpoint->address);
2597 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2599 dbreakcval = ALIGN_DOWN(0x3F, watchpoint->length);
2601 if (watchpoint->rw == WPT_READ)
2602 dbreakcval |= BIT(30);
2603 if (watchpoint->rw == WPT_WRITE)
2604 dbreakcval |= BIT(31);
2605 if (watchpoint->rw == WPT_ACCESS)
2606 dbreakcval |= BIT(30) | BIT(31);
2608 /* Write DBREAKA[slot] and DBCREAKC[slot] */
2609 xtensa_reg_set(target, XT_REG_IDX_DBREAKA0 + slot, watchpoint->address);
2610 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakcval);
2611 xtensa->hw_wps[slot] = watchpoint;
2612 LOG_TARGET_DEBUG(target, "placed HW watchpoint @ " TARGET_ADDR_FMT,
2613 watchpoint->address);
2614 return ERROR_OK;
2617 int xtensa_watchpoint_remove(struct target *target, struct watchpoint *watchpoint)
2619 struct xtensa *xtensa = target_to_xtensa(target);
2620 unsigned int slot;
2622 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2623 if (xtensa->hw_wps[slot] == watchpoint)
2624 break;
2626 if (slot == xtensa->core_config->debug.dbreaks_num) {
2627 LOG_TARGET_WARNING(target, "HW watchpoint " TARGET_ADDR_FMT " not found!", watchpoint->address);
2628 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2630 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
2631 xtensa->hw_wps[slot] = NULL;
2632 LOG_TARGET_DEBUG(target, "cleared HW watchpoint @ " TARGET_ADDR_FMT,
2633 watchpoint->address);
2634 return ERROR_OK;
2637 static int xtensa_build_reg_cache(struct target *target)
2639 struct xtensa *xtensa = target_to_xtensa(target);
2640 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2641 unsigned int last_dbreg_num = 0;
2643 if (xtensa->core_regs_num + xtensa->num_optregs != xtensa->total_regs_num)
2644 LOG_TARGET_WARNING(target, "Register count MISMATCH: %d core regs, %d extended regs; %d expected",
2645 xtensa->core_regs_num, xtensa->num_optregs, xtensa->total_regs_num);
2647 struct reg_cache *reg_cache = calloc(1, sizeof(struct reg_cache));
2649 if (!reg_cache) {
2650 LOG_ERROR("Failed to alloc reg cache!");
2651 return ERROR_FAIL;
2653 reg_cache->name = "Xtensa registers";
2654 reg_cache->next = NULL;
2655 /* Init reglist */
2656 unsigned int reg_list_size = XT_NUM_REGS + xtensa->num_optregs;
2657 struct reg *reg_list = calloc(reg_list_size, sizeof(struct reg));
2658 if (!reg_list) {
2659 LOG_ERROR("Failed to alloc reg list!");
2660 goto fail;
2662 xtensa->dbregs_num = 0;
2663 unsigned int didx = 0;
2664 for (unsigned int whichlist = 0; whichlist < 2; whichlist++) {
2665 struct xtensa_reg_desc *rlist = (whichlist == 0) ? xtensa_regs : xtensa->optregs;
2666 unsigned int listsize = (whichlist == 0) ? XT_NUM_REGS : xtensa->num_optregs;
2667 for (unsigned int i = 0; i < listsize; i++, didx++) {
2668 reg_list[didx].exist = rlist[i].exist;
2669 reg_list[didx].name = rlist[i].name;
2670 reg_list[didx].size = 32;
2671 reg_list[didx].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2672 if (!reg_list[didx].value) {
2673 LOG_ERROR("Failed to alloc reg list value!");
2674 goto fail;
2676 reg_list[didx].dirty = false;
2677 reg_list[didx].valid = false;
2678 reg_list[didx].type = &xtensa_reg_type;
2679 reg_list[didx].arch_info = xtensa;
2680 if (rlist[i].exist && (rlist[i].dbreg_num > last_dbreg_num))
2681 last_dbreg_num = rlist[i].dbreg_num;
2683 if (xtensa_extra_debug_log) {
2684 LOG_TARGET_DEBUG(target,
2685 "POPULATE %-16s list %d exist %d, idx %d, type %d, dbreg_num 0x%04x",
2686 reg_list[didx].name,
2687 whichlist,
2688 reg_list[didx].exist,
2689 didx,
2690 rlist[i].type,
2691 rlist[i].dbreg_num);
2696 xtensa->dbregs_num = last_dbreg_num + 1;
2697 reg_cache->reg_list = reg_list;
2698 reg_cache->num_regs = reg_list_size;
2700 LOG_TARGET_DEBUG(target, "xtensa->total_regs_num %d reg_list_size %d xtensa->dbregs_num %d",
2701 xtensa->total_regs_num, reg_list_size, xtensa->dbregs_num);
2703 /* Construct empty-register list for handling unknown register requests */
2704 xtensa->empty_regs = calloc(xtensa->dbregs_num, sizeof(struct reg));
2705 if (!xtensa->empty_regs) {
2706 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2707 goto fail;
2709 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2710 xtensa->empty_regs[i].name = calloc(8, sizeof(char));
2711 if (!xtensa->empty_regs[i].name) {
2712 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2713 goto fail;
2715 sprintf((char *)xtensa->empty_regs[i].name, "?0x%04x", i & 0x0000FFFF);
2716 xtensa->empty_regs[i].size = 32;
2717 xtensa->empty_regs[i].type = &xtensa_reg_type;
2718 xtensa->empty_regs[i].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2719 if (!xtensa->empty_regs[i].value) {
2720 LOG_ERROR("Failed to alloc empty reg list value!");
2721 goto fail;
2723 xtensa->empty_regs[i].arch_info = xtensa;
2726 /* Construct contiguous register list from contiguous descriptor list */
2727 if (xtensa->regmap_contiguous && xtensa->contiguous_regs_desc) {
2728 xtensa->contiguous_regs_list = calloc(xtensa->total_regs_num, sizeof(struct reg *));
2729 if (!xtensa->contiguous_regs_list) {
2730 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2731 goto fail;
2733 for (unsigned int i = 0; i < xtensa->total_regs_num; i++) {
2734 unsigned int j;
2735 for (j = 0; j < reg_cache->num_regs; j++) {
2736 if (!strcmp(reg_cache->reg_list[j].name, xtensa->contiguous_regs_desc[i]->name)) {
2737 /* Register number field is not filled above.
2738 Here we are assigning the corresponding index from the contiguous reg list.
2739 These indexes are in the same order with gdb g-packet request/response.
2740 Some more changes may be required for sparse reg lists.
2742 reg_cache->reg_list[j].number = i;
2743 xtensa->contiguous_regs_list[i] = &(reg_cache->reg_list[j]);
2744 LOG_TARGET_DEBUG(target,
2745 "POPULATE contiguous regs list: %-16s, dbreg_num 0x%04x",
2746 xtensa->contiguous_regs_list[i]->name,
2747 xtensa->contiguous_regs_desc[i]->dbreg_num);
2748 break;
2751 if (j == reg_cache->num_regs)
2752 LOG_TARGET_WARNING(target, "contiguous register %s not found",
2753 xtensa->contiguous_regs_desc[i]->name);
2757 xtensa->algo_context_backup = calloc(reg_cache->num_regs, sizeof(void *));
2758 if (!xtensa->algo_context_backup) {
2759 LOG_ERROR("Failed to alloc mem for algorithm context backup!");
2760 goto fail;
2762 for (unsigned int i = 0; i < reg_cache->num_regs; i++) {
2763 struct reg *reg = &reg_cache->reg_list[i];
2764 xtensa->algo_context_backup[i] = calloc(1, reg->size / 8);
2765 if (!xtensa->algo_context_backup[i]) {
2766 LOG_ERROR("Failed to alloc mem for algorithm context!");
2767 goto fail;
2770 xtensa->core_cache = reg_cache;
2771 if (cache_p)
2772 *cache_p = reg_cache;
2773 return ERROR_OK;
2775 fail:
2776 if (reg_list) {
2777 for (unsigned int i = 0; i < reg_list_size; i++)
2778 free(reg_list[i].value);
2779 free(reg_list);
2781 if (xtensa->empty_regs) {
2782 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2783 free((void *)xtensa->empty_regs[i].name);
2784 free(xtensa->empty_regs[i].value);
2786 free(xtensa->empty_regs);
2788 if (xtensa->algo_context_backup) {
2789 for (unsigned int i = 0; i < reg_cache->num_regs; i++)
2790 free(xtensa->algo_context_backup[i]);
2791 free(xtensa->algo_context_backup);
2793 free(reg_cache);
2795 return ERROR_FAIL;
2798 static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target *target, char *opstr)
2800 struct xtensa *xtensa = target_to_xtensa(target);
2801 int32_t status = ERROR_COMMAND_ARGUMENT_INVALID;
2802 /* Process op[] list */
2803 while (opstr && (*opstr == ':')) {
2804 uint8_t ops[32];
2805 unsigned int oplen = strtoul(opstr + 1, &opstr, 16);
2806 if (oplen > 32) {
2807 LOG_TARGET_ERROR(target, "TIE access instruction too long (%d)\n", oplen);
2808 break;
2810 unsigned int i = 0;
2811 while ((i < oplen) && opstr && (*opstr == ':'))
2812 ops[i++] = strtoul(opstr + 1, &opstr, 16);
2813 if (i != oplen) {
2814 LOG_TARGET_ERROR(target, "TIE access instruction malformed (%d)\n", i);
2815 break;
2818 char insn_buf[128];
2819 sprintf(insn_buf, "Exec %d-byte TIE sequence: ", oplen);
2820 for (i = 0; i < oplen; i++)
2821 sprintf(insn_buf + strlen(insn_buf), "%02x:", ops[i]);
2822 LOG_TARGET_DEBUG(target, "%s", insn_buf);
2823 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
2824 status = ERROR_OK;
2826 return status;
2829 static int xtensa_gdbqc_qxtreg(struct target *target, const char *packet, char **response_p)
2831 struct xtensa *xtensa = target_to_xtensa(target);
2832 bool iswrite = (packet[0] == 'Q');
2833 enum xtensa_qerr_e error;
2835 /* Read/write TIE register. Requires spill location.
2836 * qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]
2837 * Qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]=<value>
2839 if (!(xtensa->spill_buf)) {
2840 LOG_ERROR("Spill location not specified. Try 'target remote <host>:3333 &spill_location0'");
2841 error = XT_QERR_FAIL;
2842 goto xtensa_gdbqc_qxtreg_fail;
2845 char *delim;
2846 uint32_t regnum = strtoul(packet + 6, &delim, 16);
2847 if (*delim != ':') {
2848 LOG_ERROR("Malformed qxtreg packet");
2849 error = XT_QERR_INVAL;
2850 goto xtensa_gdbqc_qxtreg_fail;
2852 uint32_t reglen = strtoul(delim + 1, &delim, 16);
2853 if (*delim != ':') {
2854 LOG_ERROR("Malformed qxtreg packet");
2855 error = XT_QERR_INVAL;
2856 goto xtensa_gdbqc_qxtreg_fail;
2858 uint8_t regbuf[XT_QUERYPKT_RESP_MAX];
2859 memset(regbuf, 0, XT_QUERYPKT_RESP_MAX);
2860 LOG_DEBUG("TIE reg 0x%08" PRIx32 " %s (%d bytes)", regnum, iswrite ? "write" : "read", reglen);
2861 if (reglen * 2 + 1 > XT_QUERYPKT_RESP_MAX) {
2862 LOG_ERROR("TIE register too large");
2863 error = XT_QERR_MEM;
2864 goto xtensa_gdbqc_qxtreg_fail;
2867 /* (1) Save spill memory, (1.5) [if write then store value to spill location],
2868 * (2) read old a4, (3) write spill address to a4.
2869 * NOTE: ensure a4 is restored properly by all error handling logic
2871 unsigned int memop_size = (xtensa->spill_loc & 3) ? 1 : 4;
2872 int status = xtensa_read_memory(target, xtensa->spill_loc, memop_size,
2873 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2874 if (status != ERROR_OK) {
2875 LOG_ERROR("Spill memory save");
2876 error = XT_QERR_MEM;
2877 goto xtensa_gdbqc_qxtreg_fail;
2879 if (iswrite) {
2880 /* Extract value and store in spill memory */
2881 unsigned int b = 0;
2882 char *valbuf = strchr(delim, '=');
2883 if (!(valbuf && (*valbuf == '='))) {
2884 LOG_ERROR("Malformed Qxtreg packet");
2885 error = XT_QERR_INVAL;
2886 goto xtensa_gdbqc_qxtreg_fail;
2888 valbuf++;
2889 while (*valbuf && *(valbuf + 1)) {
2890 char bytestr[3] = { 0, 0, 0 };
2891 strncpy(bytestr, valbuf, 2);
2892 regbuf[b++] = strtoul(bytestr, NULL, 16);
2893 valbuf += 2;
2895 if (b != reglen) {
2896 LOG_ERROR("Malformed Qxtreg packet");
2897 error = XT_QERR_INVAL;
2898 goto xtensa_gdbqc_qxtreg_fail;
2900 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
2901 reglen / memop_size, regbuf);
2902 if (status != ERROR_OK) {
2903 LOG_ERROR("TIE value store");
2904 error = XT_QERR_MEM;
2905 goto xtensa_gdbqc_qxtreg_fail;
2908 xtensa_reg_val_t orig_a4 = xtensa_reg_get(target, XT_REG_IDX_A4);
2909 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, xtensa->spill_loc);
2910 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2912 int32_t tieop_status = xtensa_gdbqc_parse_exec_tie_ops(target, delim);
2914 /* Restore a4 but not yet spill memory. Execute it all... */
2915 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, orig_a4);
2916 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2917 status = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2918 if (status != ERROR_OK) {
2919 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
2920 tieop_status = status;
2922 status = xtensa_core_status_check(target);
2923 if (status != ERROR_OK) {
2924 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
2925 tieop_status = status;
2928 if (tieop_status == ERROR_OK) {
2929 if (iswrite) {
2930 /* TIE write succeeded; send OK */
2931 strcpy(*response_p, "OK");
2932 } else {
2933 /* TIE read succeeded; copy result from spill memory */
2934 status = xtensa_read_memory(target, xtensa->spill_loc, memop_size, reglen, regbuf);
2935 if (status != ERROR_OK) {
2936 LOG_TARGET_ERROR(target, "TIE result read");
2937 tieop_status = status;
2939 unsigned int i;
2940 for (i = 0; i < reglen; i++)
2941 sprintf(*response_p + 2 * i, "%02x", regbuf[i]);
2942 *(*response_p + 2 * i) = '\0';
2943 LOG_TARGET_DEBUG(target, "TIE response: %s", *response_p);
2947 /* Restore spill memory first, then report any previous errors */
2948 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
2949 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2950 if (status != ERROR_OK) {
2951 LOG_ERROR("Spill memory restore");
2952 error = XT_QERR_MEM;
2953 goto xtensa_gdbqc_qxtreg_fail;
2955 if (tieop_status != ERROR_OK) {
2956 LOG_ERROR("TIE execution");
2957 error = XT_QERR_FAIL;
2958 goto xtensa_gdbqc_qxtreg_fail;
2960 return ERROR_OK;
2962 xtensa_gdbqc_qxtreg_fail:
2963 strcpy(*response_p, xt_qerr[error].chrval);
2964 return xt_qerr[error].intval;
2967 int xtensa_gdb_query_custom(struct target *target, const char *packet, char **response_p)
2969 struct xtensa *xtensa = target_to_xtensa(target);
2970 enum xtensa_qerr_e error;
2971 if (!packet || !response_p) {
2972 LOG_TARGET_ERROR(target, "invalid parameter: packet %p response_p %p", packet, response_p);
2973 return ERROR_FAIL;
2976 *response_p = xtensa->qpkt_resp;
2977 if (strncmp(packet, "qxtn", 4) == 0) {
2978 strcpy(*response_p, "OpenOCD");
2979 return ERROR_OK;
2980 } else if (strncasecmp(packet, "qxtgdbversion=", 14) == 0) {
2981 return ERROR_OK;
2982 } else if ((strncmp(packet, "Qxtsis=", 7) == 0) || (strncmp(packet, "Qxtsds=", 7) == 0)) {
2983 /* Confirm host cache params match core .cfg file */
2984 struct xtensa_cache_config *cachep = (packet[4] == 'i') ?
2985 &xtensa->core_config->icache : &xtensa->core_config->dcache;
2986 unsigned int line_size = 0, size = 0, way_count = 0;
2987 sscanf(&packet[7], "%x,%x,%x", &line_size, &size, &way_count);
2988 if ((cachep->line_size != line_size) ||
2989 (cachep->size != size) ||
2990 (cachep->way_count != way_count)) {
2991 LOG_TARGET_WARNING(target, "%cCache mismatch; check xtensa-core-XXX.cfg file",
2992 cachep == &xtensa->core_config->icache ? 'I' : 'D');
2994 strcpy(*response_p, "OK");
2995 return ERROR_OK;
2996 } else if ((strncmp(packet, "Qxtiram=", 8) == 0) || (strncmp(packet, "Qxtirom=", 8) == 0)) {
2997 /* Confirm host IRAM/IROM params match core .cfg file */
2998 struct xtensa_local_mem_config *memp = (packet[5] == 'a') ?
2999 &xtensa->core_config->iram : &xtensa->core_config->irom;
3000 unsigned int base = 0, size = 0, i;
3001 char *pkt = (char *)&packet[7];
3002 do {
3003 pkt++;
3004 size = strtoul(pkt, &pkt, 16);
3005 pkt++;
3006 base = strtoul(pkt, &pkt, 16);
3007 LOG_TARGET_DEBUG(target, "memcheck: %dB @ 0x%08x", size, base);
3008 for (i = 0; i < memp->count; i++) {
3009 if ((memp->regions[i].base == base) && (memp->regions[i].size == size))
3010 break;
3012 if (i == memp->count) {
3013 LOG_TARGET_WARNING(target, "%s mismatch; check xtensa-core-XXX.cfg file",
3014 memp == &xtensa->core_config->iram ? "IRAM" : "IROM");
3015 break;
3017 for (i = 0; i < 11; i++) {
3018 pkt++;
3019 strtoul(pkt, &pkt, 16);
3021 } while (pkt && (pkt[0] == ','));
3022 strcpy(*response_p, "OK");
3023 return ERROR_OK;
3024 } else if (strncmp(packet, "Qxtexcmlvl=", 11) == 0) {
3025 /* Confirm host EXCM_LEVEL matches core .cfg file */
3026 unsigned int excm_level = strtoul(&packet[11], NULL, 0);
3027 if (!xtensa->core_config->high_irq.enabled ||
3028 (excm_level != xtensa->core_config->high_irq.excm_level))
3029 LOG_TARGET_WARNING(target, "EXCM_LEVEL mismatch; check xtensa-core-XXX.cfg file");
3030 strcpy(*response_p, "OK");
3031 return ERROR_OK;
3032 } else if ((strncmp(packet, "Qxtl2cs=", 8) == 0) ||
3033 (strncmp(packet, "Qxtl2ca=", 8) == 0) ||
3034 (strncmp(packet, "Qxtdensity=", 11) == 0)) {
3035 strcpy(*response_p, "OK");
3036 return ERROR_OK;
3037 } else if (strncmp(packet, "Qxtspill=", 9) == 0) {
3038 char *delim;
3039 uint32_t spill_loc = strtoul(packet + 9, &delim, 16);
3040 if (*delim != ':') {
3041 LOG_ERROR("Malformed Qxtspill packet");
3042 error = XT_QERR_INVAL;
3043 goto xtensa_gdb_query_custom_fail;
3045 xtensa->spill_loc = spill_loc;
3046 xtensa->spill_bytes = strtoul(delim + 1, NULL, 16);
3047 if (xtensa->spill_buf)
3048 free(xtensa->spill_buf);
3049 xtensa->spill_buf = calloc(1, xtensa->spill_bytes);
3050 if (!xtensa->spill_buf) {
3051 LOG_ERROR("Spill buf alloc");
3052 error = XT_QERR_MEM;
3053 goto xtensa_gdb_query_custom_fail;
3055 LOG_TARGET_DEBUG(target, "Set spill 0x%08" PRIx32 " (%d)", xtensa->spill_loc, xtensa->spill_bytes);
3056 strcpy(*response_p, "OK");
3057 return ERROR_OK;
3058 } else if (strncasecmp(packet, "qxtreg", 6) == 0) {
3059 return xtensa_gdbqc_qxtreg(target, packet, response_p);
3060 } else if ((strncmp(packet, "qTStatus", 8) == 0) ||
3061 (strncmp(packet, "qxtftie", 7) == 0) ||
3062 (strncmp(packet, "qxtstie", 7) == 0)) {
3063 /* Return empty string to indicate trace, TIE wire debug are unsupported */
3064 strcpy(*response_p, "");
3065 return ERROR_OK;
3068 /* Warn for all other queries, but do not return errors */
3069 LOG_TARGET_WARNING(target, "Unknown target-specific query packet: %s", packet);
3070 strcpy(*response_p, "");
3071 return ERROR_OK;
3073 xtensa_gdb_query_custom_fail:
3074 strcpy(*response_p, xt_qerr[error].chrval);
3075 return xt_qerr[error].intval;
3078 int xtensa_init_arch_info(struct target *target, struct xtensa *xtensa,
3079 const struct xtensa_debug_module_config *dm_cfg)
3081 target->arch_info = xtensa;
3082 xtensa->common_magic = XTENSA_COMMON_MAGIC;
3083 xtensa->target = target;
3084 xtensa->stepping_isr_mode = XT_STEPPING_ISR_ON;
3086 xtensa->core_config = calloc(1, sizeof(struct xtensa_config));
3087 if (!xtensa->core_config) {
3088 LOG_ERROR("Xtensa configuration alloc failed\n");
3089 return ERROR_FAIL;
3092 /* Default cache settings are disabled with 1 way */
3093 xtensa->core_config->icache.way_count = 1;
3094 xtensa->core_config->dcache.way_count = 1;
3096 /* chrval: AR3/AR4 register names will change with window mapping.
3097 * intval: tracks whether scratch register was set through gdb P packet.
3099 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
3100 xtensa->scratch_ars[s].chrval = calloc(8, sizeof(char));
3101 if (!xtensa->scratch_ars[s].chrval) {
3102 for (enum xtensa_ar_scratch_set_e f = 0; f < s; f++)
3103 free(xtensa->scratch_ars[f].chrval);
3104 free(xtensa->core_config);
3105 LOG_ERROR("Xtensa scratch AR alloc failed\n");
3106 return ERROR_FAIL;
3108 xtensa->scratch_ars[s].intval = false;
3109 sprintf(xtensa->scratch_ars[s].chrval, "%s%d",
3110 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_A4)) ? "a" : "ar",
3111 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_AR3)) ? 3 : 4);
3114 return xtensa_dm_init(&xtensa->dbg_mod, dm_cfg);
3117 void xtensa_set_permissive_mode(struct target *target, bool state)
3119 target_to_xtensa(target)->permissive_mode = state;
3122 int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
3124 struct xtensa *xtensa = target_to_xtensa(target);
3126 xtensa->come_online_probes_num = 3;
3127 xtensa->hw_brps = calloc(XT_HW_IBREAK_MAX_NUM, sizeof(struct breakpoint *));
3128 if (!xtensa->hw_brps) {
3129 LOG_ERROR("Failed to alloc memory for HW breakpoints!");
3130 return ERROR_FAIL;
3132 xtensa->hw_wps = calloc(XT_HW_DBREAK_MAX_NUM, sizeof(struct watchpoint *));
3133 if (!xtensa->hw_wps) {
3134 free(xtensa->hw_brps);
3135 LOG_ERROR("Failed to alloc memory for HW watchpoints!");
3136 return ERROR_FAIL;
3138 xtensa->sw_brps = calloc(XT_SW_BREAKPOINTS_MAX_NUM, sizeof(struct xtensa_sw_breakpoint));
3139 if (!xtensa->sw_brps) {
3140 free(xtensa->hw_brps);
3141 free(xtensa->hw_wps);
3142 LOG_ERROR("Failed to alloc memory for SW breakpoints!");
3143 return ERROR_FAIL;
3146 xtensa->spill_loc = 0xffffffff;
3147 xtensa->spill_bytes = 0;
3148 xtensa->spill_buf = NULL;
3149 xtensa->probe_lsddr32p = -1; /* Probe for fast load/store operations */
3151 return xtensa_build_reg_cache(target);
3154 static void xtensa_free_reg_cache(struct target *target)
3156 struct xtensa *xtensa = target_to_xtensa(target);
3157 struct reg_cache *cache = xtensa->core_cache;
3159 if (cache) {
3160 register_unlink_cache(&target->reg_cache, cache);
3161 for (unsigned int i = 0; i < cache->num_regs; i++) {
3162 free(xtensa->algo_context_backup[i]);
3163 free(cache->reg_list[i].value);
3165 free(xtensa->algo_context_backup);
3166 free(cache->reg_list);
3167 free(cache);
3169 xtensa->core_cache = NULL;
3170 xtensa->algo_context_backup = NULL;
3172 if (xtensa->empty_regs) {
3173 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
3174 free((void *)xtensa->empty_regs[i].name);
3175 free(xtensa->empty_regs[i].value);
3177 free(xtensa->empty_regs);
3179 xtensa->empty_regs = NULL;
3180 if (xtensa->optregs) {
3181 for (unsigned int i = 0; i < xtensa->num_optregs; i++)
3182 free((void *)xtensa->optregs[i].name);
3183 free(xtensa->optregs);
3185 xtensa->optregs = NULL;
3188 void xtensa_target_deinit(struct target *target)
3190 struct xtensa *xtensa = target_to_xtensa(target);
3192 LOG_DEBUG("start");
3194 if (target_was_examined(target)) {
3195 int ret = xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRCLR, OCDDCR_ENABLEOCD);
3196 if (ret != ERROR_OK) {
3197 LOG_ERROR("Failed to queue OCDDCR_ENABLEOCD clear operation!");
3198 return;
3200 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
3201 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
3202 if (ret != ERROR_OK) {
3203 LOG_ERROR("Failed to clear OCDDCR_ENABLEOCD!");
3204 return;
3206 xtensa_dm_deinit(&xtensa->dbg_mod);
3208 xtensa_free_reg_cache(target);
3209 free(xtensa->hw_brps);
3210 free(xtensa->hw_wps);
3211 free(xtensa->sw_brps);
3212 if (xtensa->spill_buf) {
3213 free(xtensa->spill_buf);
3214 xtensa->spill_buf = NULL;
3216 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
3217 free(xtensa->scratch_ars[s].chrval);
3218 free(xtensa->core_config);
3221 const char *xtensa_get_gdb_arch(struct target *target)
3223 return "xtensa";
3226 /* exe <ascii-encoded hexadecimal instruction bytes> */
3227 static COMMAND_HELPER(xtensa_cmd_exe_do, struct target *target)
3229 struct xtensa *xtensa = target_to_xtensa(target);
3231 if (CMD_ARGC != 1)
3232 return ERROR_COMMAND_SYNTAX_ERROR;
3234 /* Process ascii-encoded hex byte string */
3235 const char *parm = CMD_ARGV[0];
3236 unsigned int parm_len = strlen(parm);
3237 if ((parm_len >= 64) || (parm_len & 1)) {
3238 LOG_ERROR("Invalid parameter length (%d): must be even, < 64 characters", parm_len);
3239 return ERROR_FAIL;
3242 uint8_t ops[32];
3243 memset(ops, 0, 32);
3244 unsigned int oplen = parm_len / 2;
3245 char encoded_byte[3] = { 0, 0, 0 };
3246 for (unsigned int i = 0; i < oplen; i++) {
3247 encoded_byte[0] = *parm++;
3248 encoded_byte[1] = *parm++;
3249 ops[i] = strtoul(encoded_byte, NULL, 16);
3252 /* GDB must handle state save/restore.
3253 * Flush reg cache in case spill location is in an AR
3254 * Update CPENABLE only for this execution; later restore cached copy
3255 * Keep a copy of exccause in case executed code triggers an exception
3257 int status = xtensa_write_dirty_registers(target);
3258 if (status != ERROR_OK) {
3259 LOG_ERROR("%s: Failed to write back register cache.", target_name(target));
3260 return ERROR_FAIL;
3262 xtensa_reg_val_t exccause = xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE);
3263 xtensa_reg_val_t cpenable = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
3264 xtensa_reg_val_t a3 = xtensa_reg_get(target, XT_REG_IDX_A3);
3265 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, 0xffffffff);
3266 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3267 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
3268 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
3269 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, a3);
3270 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3272 /* Queue instruction list and execute everything */
3273 LOG_TARGET_DEBUG(target, "execute stub: %s", CMD_ARGV[0]);
3274 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
3275 status = xtensa_dm_queue_execute(&xtensa->dbg_mod);
3276 if (status != ERROR_OK)
3277 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
3278 status = xtensa_core_status_check(target);
3279 if (status != ERROR_OK)
3280 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
3282 /* Reread register cache and restore saved regs after instruction execution */
3283 if (xtensa_fetch_all_regs(target) != ERROR_OK)
3284 LOG_TARGET_ERROR(target, "%s: Failed to fetch register cache (post-exec).", target_name(target));
3285 xtensa_reg_set(target, XT_REG_IDX_EXCCAUSE, exccause);
3286 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
3287 return status;
3290 COMMAND_HANDLER(xtensa_cmd_exe)
3292 return CALL_COMMAND_HANDLER(xtensa_cmd_exe_do, get_current_target(CMD_CTX));
3295 /* xtdef <name> */
3296 COMMAND_HELPER(xtensa_cmd_xtdef_do, struct xtensa *xtensa)
3298 if (CMD_ARGC != 1)
3299 return ERROR_COMMAND_SYNTAX_ERROR;
3301 const char *core_name = CMD_ARGV[0];
3302 if (strcasecmp(core_name, "LX") == 0) {
3303 xtensa->core_config->core_type = XT_LX;
3304 } else if (strcasecmp(core_name, "NX") == 0) {
3305 xtensa->core_config->core_type = XT_NX;
3306 } else {
3307 LOG_ERROR("xtdef [LX|NX]\n");
3308 return ERROR_COMMAND_SYNTAX_ERROR;
3310 return ERROR_OK;
3313 COMMAND_HANDLER(xtensa_cmd_xtdef)
3315 return CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do,
3316 target_to_xtensa(get_current_target(CMD_CTX)));
3319 static inline bool xtensa_cmd_xtopt_legal_val(char *opt, int val, int min, int max)
3321 if ((val < min) || (val > max)) {
3322 LOG_ERROR("xtopt %s (%d) out of range [%d..%d]\n", opt, val, min, max);
3323 return false;
3325 return true;
3328 /* xtopt <name> <value> */
3329 COMMAND_HELPER(xtensa_cmd_xtopt_do, struct xtensa *xtensa)
3331 if (CMD_ARGC != 2)
3332 return ERROR_COMMAND_SYNTAX_ERROR;
3334 const char *opt_name = CMD_ARGV[0];
3335 int opt_val = strtol(CMD_ARGV[1], NULL, 0);
3336 if (strcasecmp(opt_name, "arnum") == 0) {
3337 if (!xtensa_cmd_xtopt_legal_val("arnum", opt_val, 0, 64))
3338 return ERROR_COMMAND_ARGUMENT_INVALID;
3339 xtensa->core_config->aregs_num = opt_val;
3340 } else if (strcasecmp(opt_name, "windowed") == 0) {
3341 if (!xtensa_cmd_xtopt_legal_val("windowed", opt_val, 0, 1))
3342 return ERROR_COMMAND_ARGUMENT_INVALID;
3343 xtensa->core_config->windowed = opt_val;
3344 } else if (strcasecmp(opt_name, "cpenable") == 0) {
3345 if (!xtensa_cmd_xtopt_legal_val("cpenable", opt_val, 0, 1))
3346 return ERROR_COMMAND_ARGUMENT_INVALID;
3347 xtensa->core_config->coproc = opt_val;
3348 } else if (strcasecmp(opt_name, "exceptions") == 0) {
3349 if (!xtensa_cmd_xtopt_legal_val("exceptions", opt_val, 0, 1))
3350 return ERROR_COMMAND_ARGUMENT_INVALID;
3351 xtensa->core_config->exceptions = opt_val;
3352 } else if (strcasecmp(opt_name, "intnum") == 0) {
3353 if (!xtensa_cmd_xtopt_legal_val("intnum", opt_val, 0, 32))
3354 return ERROR_COMMAND_ARGUMENT_INVALID;
3355 xtensa->core_config->irq.enabled = (opt_val > 0);
3356 xtensa->core_config->irq.irq_num = opt_val;
3357 } else if (strcasecmp(opt_name, "hipriints") == 0) {
3358 if (!xtensa_cmd_xtopt_legal_val("hipriints", opt_val, 0, 1))
3359 return ERROR_COMMAND_ARGUMENT_INVALID;
3360 xtensa->core_config->high_irq.enabled = opt_val;
3361 } else if (strcasecmp(opt_name, "excmlevel") == 0) {
3362 if (!xtensa_cmd_xtopt_legal_val("excmlevel", opt_val, 1, 6))
3363 return ERROR_COMMAND_ARGUMENT_INVALID;
3364 if (!xtensa->core_config->high_irq.enabled) {
3365 LOG_ERROR("xtopt excmlevel requires hipriints\n");
3366 return ERROR_COMMAND_ARGUMENT_INVALID;
3368 xtensa->core_config->high_irq.excm_level = opt_val;
3369 } else if (strcasecmp(opt_name, "intlevels") == 0) {
3370 if (xtensa->core_config->core_type == XT_LX) {
3371 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 2, 6))
3372 return ERROR_COMMAND_ARGUMENT_INVALID;
3373 } else {
3374 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 1, 255))
3375 return ERROR_COMMAND_ARGUMENT_INVALID;
3377 if (!xtensa->core_config->high_irq.enabled) {
3378 LOG_ERROR("xtopt intlevels requires hipriints\n");
3379 return ERROR_COMMAND_ARGUMENT_INVALID;
3381 xtensa->core_config->high_irq.level_num = opt_val;
3382 } else if (strcasecmp(opt_name, "debuglevel") == 0) {
3383 if (xtensa->core_config->core_type == XT_LX) {
3384 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 2, 6))
3385 return ERROR_COMMAND_ARGUMENT_INVALID;
3386 } else {
3387 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 0, 0))
3388 return ERROR_COMMAND_ARGUMENT_INVALID;
3390 xtensa->core_config->debug.enabled = 1;
3391 xtensa->core_config->debug.irq_level = opt_val;
3392 } else if (strcasecmp(opt_name, "ibreaknum") == 0) {
3393 if (!xtensa_cmd_xtopt_legal_val("ibreaknum", opt_val, 0, 2))
3394 return ERROR_COMMAND_ARGUMENT_INVALID;
3395 xtensa->core_config->debug.ibreaks_num = opt_val;
3396 } else if (strcasecmp(opt_name, "dbreaknum") == 0) {
3397 if (!xtensa_cmd_xtopt_legal_val("dbreaknum", opt_val, 0, 2))
3398 return ERROR_COMMAND_ARGUMENT_INVALID;
3399 xtensa->core_config->debug.dbreaks_num = opt_val;
3400 } else if (strcasecmp(opt_name, "tracemem") == 0) {
3401 if (!xtensa_cmd_xtopt_legal_val("tracemem", opt_val, 0, 256 * 1024))
3402 return ERROR_COMMAND_ARGUMENT_INVALID;
3403 xtensa->core_config->trace.mem_sz = opt_val;
3404 xtensa->core_config->trace.enabled = (opt_val > 0);
3405 } else if (strcasecmp(opt_name, "tracememrev") == 0) {
3406 if (!xtensa_cmd_xtopt_legal_val("tracememrev", opt_val, 0, 1))
3407 return ERROR_COMMAND_ARGUMENT_INVALID;
3408 xtensa->core_config->trace.reversed_mem_access = opt_val;
3409 } else if (strcasecmp(opt_name, "perfcount") == 0) {
3410 if (!xtensa_cmd_xtopt_legal_val("perfcount", opt_val, 0, 8))
3411 return ERROR_COMMAND_ARGUMENT_INVALID;
3412 xtensa->core_config->debug.perfcount_num = opt_val;
3413 } else {
3414 LOG_WARNING("Unknown xtensa command ignored: \"xtopt %s %s\"", CMD_ARGV[0], CMD_ARGV[1]);
3415 return ERROR_OK;
3418 return ERROR_OK;
3421 COMMAND_HANDLER(xtensa_cmd_xtopt)
3423 return CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do,
3424 target_to_xtensa(get_current_target(CMD_CTX)));
3427 /* xtmem <type> [parameters] */
3428 COMMAND_HELPER(xtensa_cmd_xtmem_do, struct xtensa *xtensa)
3430 struct xtensa_cache_config *cachep = NULL;
3431 struct xtensa_local_mem_config *memp = NULL;
3432 int mem_access = 0;
3433 bool is_dcache = false;
3435 if (CMD_ARGC == 0) {
3436 LOG_ERROR("xtmem <type> [parameters]\n");
3437 return ERROR_COMMAND_SYNTAX_ERROR;
3440 const char *mem_name = CMD_ARGV[0];
3441 if (strcasecmp(mem_name, "icache") == 0) {
3442 cachep = &xtensa->core_config->icache;
3443 } else if (strcasecmp(mem_name, "dcache") == 0) {
3444 cachep = &xtensa->core_config->dcache;
3445 is_dcache = true;
3446 } else if (strcasecmp(mem_name, "l2cache") == 0) {
3447 /* TODO: support L2 cache */
3448 } else if (strcasecmp(mem_name, "l2addr") == 0) {
3449 /* TODO: support L2 cache */
3450 } else if (strcasecmp(mem_name, "iram") == 0) {
3451 memp = &xtensa->core_config->iram;
3452 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3453 } else if (strcasecmp(mem_name, "dram") == 0) {
3454 memp = &xtensa->core_config->dram;
3455 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3456 } else if (strcasecmp(mem_name, "sram") == 0) {
3457 memp = &xtensa->core_config->sram;
3458 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3459 } else if (strcasecmp(mem_name, "irom") == 0) {
3460 memp = &xtensa->core_config->irom;
3461 mem_access = XT_MEM_ACCESS_READ;
3462 } else if (strcasecmp(mem_name, "drom") == 0) {
3463 memp = &xtensa->core_config->drom;
3464 mem_access = XT_MEM_ACCESS_READ;
3465 } else if (strcasecmp(mem_name, "srom") == 0) {
3466 memp = &xtensa->core_config->srom;
3467 mem_access = XT_MEM_ACCESS_READ;
3468 } else {
3469 LOG_ERROR("xtmem types: <icache|dcache|l2cache|l2addr|iram|irom|dram|drom|sram|srom>\n");
3470 return ERROR_COMMAND_ARGUMENT_INVALID;
3473 if (cachep) {
3474 if ((CMD_ARGC != 4) && (CMD_ARGC != 5)) {
3475 LOG_ERROR("xtmem <cachetype> <linebytes> <cachebytes> <ways> [writeback]\n");
3476 return ERROR_COMMAND_SYNTAX_ERROR;
3478 cachep->line_size = strtoul(CMD_ARGV[1], NULL, 0);
3479 cachep->size = strtoul(CMD_ARGV[2], NULL, 0);
3480 cachep->way_count = strtoul(CMD_ARGV[3], NULL, 0);
3481 cachep->writeback = ((CMD_ARGC == 5) && is_dcache) ?
3482 strtoul(CMD_ARGV[4], NULL, 0) : 0;
3483 } else if (memp) {
3484 if (CMD_ARGC != 3) {
3485 LOG_ERROR("xtmem <memtype> <baseaddr> <bytes>\n");
3486 return ERROR_COMMAND_SYNTAX_ERROR;
3488 struct xtensa_local_mem_region_config *memcfgp = &memp->regions[memp->count];
3489 memcfgp->base = strtoul(CMD_ARGV[1], NULL, 0);
3490 memcfgp->size = strtoul(CMD_ARGV[2], NULL, 0);
3491 memcfgp->access = mem_access;
3492 memp->count++;
3495 return ERROR_OK;
3498 COMMAND_HANDLER(xtensa_cmd_xtmem)
3500 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do,
3501 target_to_xtensa(get_current_target(CMD_CTX)));
3504 /* xtmpu <num FG seg> <min seg size> <lockable> <executeonly> */
3505 COMMAND_HELPER(xtensa_cmd_xtmpu_do, struct xtensa *xtensa)
3507 if (CMD_ARGC != 4) {
3508 LOG_ERROR("xtmpu <num FG seg> <min seg size> <lockable> <executeonly>\n");
3509 return ERROR_COMMAND_SYNTAX_ERROR;
3512 unsigned int nfgseg = strtoul(CMD_ARGV[0], NULL, 0);
3513 unsigned int minsegsize = strtoul(CMD_ARGV[1], NULL, 0);
3514 unsigned int lockable = strtoul(CMD_ARGV[2], NULL, 0);
3515 unsigned int execonly = strtoul(CMD_ARGV[3], NULL, 0);
3517 if ((nfgseg > 32)) {
3518 LOG_ERROR("<nfgseg> must be within [0..32]\n");
3519 return ERROR_COMMAND_ARGUMENT_INVALID;
3520 } else if (minsegsize & (minsegsize - 1)) {
3521 LOG_ERROR("<minsegsize> must be a power of 2 >= 32\n");
3522 return ERROR_COMMAND_ARGUMENT_INVALID;
3523 } else if (lockable > 1) {
3524 LOG_ERROR("<lockable> must be 0 or 1\n");
3525 return ERROR_COMMAND_ARGUMENT_INVALID;
3526 } else if (execonly > 1) {
3527 LOG_ERROR("<execonly> must be 0 or 1\n");
3528 return ERROR_COMMAND_ARGUMENT_INVALID;
3531 xtensa->core_config->mpu.enabled = true;
3532 xtensa->core_config->mpu.nfgseg = nfgseg;
3533 xtensa->core_config->mpu.minsegsize = minsegsize;
3534 xtensa->core_config->mpu.lockable = lockable;
3535 xtensa->core_config->mpu.execonly = execonly;
3536 return ERROR_OK;
3539 COMMAND_HANDLER(xtensa_cmd_xtmpu)
3541 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do,
3542 target_to_xtensa(get_current_target(CMD_CTX)));
3545 /* xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56> */
3546 COMMAND_HELPER(xtensa_cmd_xtmmu_do, struct xtensa *xtensa)
3548 if (CMD_ARGC != 2) {
3549 LOG_ERROR("xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES>\n");
3550 return ERROR_COMMAND_SYNTAX_ERROR;
3553 unsigned int nirefillentries = strtoul(CMD_ARGV[0], NULL, 0);
3554 unsigned int ndrefillentries = strtoul(CMD_ARGV[1], NULL, 0);
3555 if ((nirefillentries != 16) && (nirefillentries != 32)) {
3556 LOG_ERROR("<nirefillentries> must be 16 or 32\n");
3557 return ERROR_COMMAND_ARGUMENT_INVALID;
3558 } else if ((ndrefillentries != 16) && (ndrefillentries != 32)) {
3559 LOG_ERROR("<ndrefillentries> must be 16 or 32\n");
3560 return ERROR_COMMAND_ARGUMENT_INVALID;
3563 xtensa->core_config->mmu.enabled = true;
3564 xtensa->core_config->mmu.itlb_entries_count = nirefillentries;
3565 xtensa->core_config->mmu.dtlb_entries_count = ndrefillentries;
3566 return ERROR_OK;
3569 COMMAND_HANDLER(xtensa_cmd_xtmmu)
3571 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do,
3572 target_to_xtensa(get_current_target(CMD_CTX)));
3575 /* xtregs <numregs>
3576 * xtreg <regname> <regnum> */
3577 COMMAND_HELPER(xtensa_cmd_xtreg_do, struct xtensa *xtensa)
3579 if (CMD_ARGC == 1) {
3580 int32_t numregs = strtoul(CMD_ARGV[0], NULL, 0);
3581 if ((numregs <= 0) || (numregs > UINT16_MAX)) {
3582 LOG_ERROR("xtreg <numregs>: Invalid 'numregs' (%d)", numregs);
3583 return ERROR_COMMAND_SYNTAX_ERROR;
3585 if ((xtensa->genpkt_regs_num > 0) && (numregs < (int32_t)xtensa->genpkt_regs_num)) {
3586 LOG_ERROR("xtregs (%d) must be larger than numgenregs (%d) (if xtregfmt specified)",
3587 numregs, xtensa->genpkt_regs_num);
3588 return ERROR_COMMAND_SYNTAX_ERROR;
3590 xtensa->total_regs_num = numregs;
3591 xtensa->core_regs_num = 0;
3592 xtensa->num_optregs = 0;
3593 /* A little more memory than required, but saves a second initialization pass */
3594 xtensa->optregs = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc));
3595 if (!xtensa->optregs) {
3596 LOG_ERROR("Failed to allocate xtensa->optregs!");
3597 return ERROR_FAIL;
3599 return ERROR_OK;
3600 } else if (CMD_ARGC != 2) {
3601 return ERROR_COMMAND_SYNTAX_ERROR;
3604 /* "xtregfmt contiguous" must be specified prior to the first "xtreg" definition
3605 * if general register (g-packet) requests or contiguous register maps are supported */
3606 if (xtensa->regmap_contiguous && !xtensa->contiguous_regs_desc) {
3607 xtensa->contiguous_regs_desc = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc *));
3608 if (!xtensa->contiguous_regs_desc) {
3609 LOG_ERROR("Failed to allocate xtensa->contiguous_regs_desc!");
3610 return ERROR_FAIL;
3614 const char *regname = CMD_ARGV[0];
3615 unsigned int regnum = strtoul(CMD_ARGV[1], NULL, 0);
3616 if (regnum > UINT16_MAX) {
3617 LOG_ERROR("<regnum> must be a 16-bit number");
3618 return ERROR_COMMAND_ARGUMENT_INVALID;
3621 if ((xtensa->num_optregs + xtensa->core_regs_num) >= xtensa->total_regs_num) {
3622 if (xtensa->total_regs_num)
3623 LOG_ERROR("'xtreg %s 0x%04x': Too many registers (%d expected, %d core %d extended)",
3624 regname, regnum,
3625 xtensa->total_regs_num, xtensa->core_regs_num, xtensa->num_optregs);
3626 else
3627 LOG_ERROR("'xtreg %s 0x%04x': Number of registers unspecified",
3628 regname, regnum);
3629 return ERROR_FAIL;
3632 /* Determine whether register belongs in xtensa_regs[] or xtensa->xtensa_spec_regs[] */
3633 struct xtensa_reg_desc *rptr = &xtensa->optregs[xtensa->num_optregs];
3634 bool is_extended_reg = true;
3635 unsigned int ridx;
3636 for (ridx = 0; ridx < XT_NUM_REGS; ridx++) {
3637 if (strcmp(CMD_ARGV[0], xtensa_regs[ridx].name) == 0) {
3638 /* Flag core register as defined */
3639 rptr = &xtensa_regs[ridx];
3640 xtensa->core_regs_num++;
3641 is_extended_reg = false;
3642 break;
3646 rptr->exist = true;
3647 if (is_extended_reg) {
3648 /* Register ID, debugger-visible register ID */
3649 rptr->name = strdup(CMD_ARGV[0]);
3650 rptr->dbreg_num = regnum;
3651 rptr->reg_num = (regnum & XT_REG_INDEX_MASK);
3652 xtensa->num_optregs++;
3654 /* Register type */
3655 if ((regnum & XT_REG_GENERAL_MASK) == XT_REG_GENERAL_VAL) {
3656 rptr->type = XT_REG_GENERAL;
3657 } else if ((regnum & XT_REG_USER_MASK) == XT_REG_USER_VAL) {
3658 rptr->type = XT_REG_USER;
3659 } else if ((regnum & XT_REG_FR_MASK) == XT_REG_FR_VAL) {
3660 rptr->type = XT_REG_FR;
3661 } else if ((regnum & XT_REG_SPECIAL_MASK) == XT_REG_SPECIAL_VAL) {
3662 rptr->type = XT_REG_SPECIAL;
3663 } else if ((regnum & XT_REG_RELGEN_MASK) == XT_REG_RELGEN_VAL) {
3664 /* WARNING: For these registers, regnum points to the
3665 * index of the corresponding ARx registers, NOT to
3666 * the processor register number! */
3667 rptr->type = XT_REG_RELGEN;
3668 rptr->reg_num += XT_REG_IDX_ARFIRST;
3669 rptr->dbreg_num += XT_REG_IDX_ARFIRST;
3670 } else if ((regnum & XT_REG_TIE_MASK) != 0) {
3671 rptr->type = XT_REG_TIE;
3672 } else {
3673 rptr->type = XT_REG_OTHER;
3676 /* Register flags */
3677 if ((strcmp(rptr->name, "mmid") == 0) || (strcmp(rptr->name, "eraccess") == 0) ||
3678 (strcmp(rptr->name, "ddr") == 0) || (strcmp(rptr->name, "intset") == 0) ||
3679 (strcmp(rptr->name, "intclear") == 0))
3680 rptr->flags = XT_REGF_NOREAD;
3681 else
3682 rptr->flags = 0;
3684 if (rptr->reg_num == (XT_EPS_REG_NUM_BASE + xtensa->core_config->debug.irq_level) &&
3685 xtensa->core_config->core_type == XT_LX && rptr->type == XT_REG_SPECIAL) {
3686 xtensa->eps_dbglevel_idx = XT_NUM_REGS + xtensa->num_optregs - 1;
3687 LOG_DEBUG("Setting PS (%s) index to %d", rptr->name, xtensa->eps_dbglevel_idx);
3689 if (xtensa->core_config->core_type == XT_NX) {
3690 enum xtensa_nx_reg_idx idx = XT_NX_REG_IDX_NUM;
3691 if (strcmp(rptr->name, "ibreakc0") == 0)
3692 idx = XT_NX_REG_IDX_IBREAKC0;
3693 else if (strcmp(rptr->name, "wb") == 0)
3694 idx = XT_NX_REG_IDX_WB;
3695 else if (strcmp(rptr->name, "ms") == 0)
3696 idx = XT_NX_REG_IDX_MS;
3697 else if (strcmp(rptr->name, "ievec") == 0)
3698 idx = XT_NX_REG_IDX_IEVEC;
3699 else if (strcmp(rptr->name, "ieextern") == 0)
3700 idx = XT_NX_REG_IDX_IEEXTERN;
3701 else if (strcmp(rptr->name, "mesr") == 0)
3702 idx = XT_NX_REG_IDX_MESR;
3703 else if (strcmp(rptr->name, "mesrclr") == 0)
3704 idx = XT_NX_REG_IDX_MESRCLR;
3705 if (idx < XT_NX_REG_IDX_NUM) {
3706 if (xtensa->nx_reg_idx[idx] != 0) {
3707 LOG_ERROR("nx_reg_idx[%d] previously set to %d",
3708 idx, xtensa->nx_reg_idx[idx]);
3709 return ERROR_FAIL;
3711 xtensa->nx_reg_idx[idx] = XT_NUM_REGS + xtensa->num_optregs - 1;
3712 LOG_DEBUG("NX reg %s: index %d (%d)",
3713 rptr->name, xtensa->nx_reg_idx[idx], idx);
3716 } else if (strcmp(rptr->name, "cpenable") == 0) {
3717 xtensa->core_config->coproc = true;
3720 /* Build out list of contiguous registers in specified order */
3721 unsigned int running_reg_count = xtensa->num_optregs + xtensa->core_regs_num;
3722 if (xtensa->contiguous_regs_desc) {
3723 assert((running_reg_count <= xtensa->total_regs_num) && "contiguous register address internal error!");
3724 xtensa->contiguous_regs_desc[running_reg_count - 1] = rptr;
3726 if (xtensa_extra_debug_log)
3727 LOG_DEBUG("Added %s register %-16s: 0x%04x/0x%02x t%d (%d of %d)",
3728 is_extended_reg ? "config-specific" : "core",
3729 rptr->name, rptr->dbreg_num, rptr->reg_num, rptr->type,
3730 is_extended_reg ? xtensa->num_optregs : ridx,
3731 is_extended_reg ? xtensa->total_regs_num : XT_NUM_REGS);
3732 return ERROR_OK;
3735 COMMAND_HANDLER(xtensa_cmd_xtreg)
3737 return CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do,
3738 target_to_xtensa(get_current_target(CMD_CTX)));
3741 /* xtregfmt <contiguous|sparse> [numgregs] */
3742 COMMAND_HELPER(xtensa_cmd_xtregfmt_do, struct xtensa *xtensa)
3744 if ((CMD_ARGC == 1) || (CMD_ARGC == 2)) {
3745 if (!strcasecmp(CMD_ARGV[0], "sparse")) {
3746 return ERROR_OK;
3747 } else if (!strcasecmp(CMD_ARGV[0], "contiguous")) {
3748 xtensa->regmap_contiguous = true;
3749 if (CMD_ARGC == 2) {
3750 unsigned int numgregs = strtoul(CMD_ARGV[1], NULL, 0);
3751 if ((numgregs <= 0) ||
3752 ((numgregs > xtensa->total_regs_num) &&
3753 (xtensa->total_regs_num > 0))) {
3754 LOG_ERROR("xtregfmt: if specified, numgregs (%d) must be <= numregs (%d)",
3755 numgregs, xtensa->total_regs_num);
3756 return ERROR_COMMAND_SYNTAX_ERROR;
3758 xtensa->genpkt_regs_num = numgregs;
3760 return ERROR_OK;
3763 return ERROR_COMMAND_SYNTAX_ERROR;
3766 COMMAND_HANDLER(xtensa_cmd_xtregfmt)
3768 return CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do,
3769 target_to_xtensa(get_current_target(CMD_CTX)));
3772 COMMAND_HELPER(xtensa_cmd_permissive_mode_do, struct xtensa *xtensa)
3774 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
3775 &xtensa->permissive_mode, "xtensa permissive mode");
3778 COMMAND_HANDLER(xtensa_cmd_permissive_mode)
3780 return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do,
3781 target_to_xtensa(get_current_target(CMD_CTX)));
3784 /* perfmon_enable <counter_id> <select> [mask] [kernelcnt] [tracelevel] */
3785 COMMAND_HELPER(xtensa_cmd_perfmon_enable_do, struct xtensa *xtensa)
3787 struct xtensa_perfmon_config config = {
3788 .mask = 0xffff,
3789 .kernelcnt = 0,
3790 .tracelevel = -1 /* use DEBUGLEVEL by default */
3793 if (CMD_ARGC < 2 || CMD_ARGC > 6)
3794 return ERROR_COMMAND_SYNTAX_ERROR;
3796 unsigned int counter_id = strtoul(CMD_ARGV[0], NULL, 0);
3797 if (counter_id >= XTENSA_MAX_PERF_COUNTERS) {
3798 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3799 return ERROR_COMMAND_ARGUMENT_INVALID;
3802 config.select = strtoul(CMD_ARGV[1], NULL, 0);
3803 if (config.select > XTENSA_MAX_PERF_SELECT) {
3804 command_print(CMD, "select should be < %d", XTENSA_MAX_PERF_SELECT);
3805 return ERROR_COMMAND_ARGUMENT_INVALID;
3808 if (CMD_ARGC >= 3) {
3809 config.mask = strtoul(CMD_ARGV[2], NULL, 0);
3810 if (config.mask > XTENSA_MAX_PERF_MASK) {
3811 command_print(CMD, "mask should be < %d", XTENSA_MAX_PERF_MASK);
3812 return ERROR_COMMAND_ARGUMENT_INVALID;
3816 if (CMD_ARGC >= 4) {
3817 config.kernelcnt = strtoul(CMD_ARGV[3], NULL, 0);
3818 if (config.kernelcnt > 1) {
3819 command_print(CMD, "kernelcnt should be 0 or 1");
3820 return ERROR_COMMAND_ARGUMENT_INVALID;
3824 if (CMD_ARGC >= 5) {
3825 config.tracelevel = strtoul(CMD_ARGV[4], NULL, 0);
3826 if (config.tracelevel > 7) {
3827 command_print(CMD, "tracelevel should be <=7");
3828 return ERROR_COMMAND_ARGUMENT_INVALID;
3832 if (config.tracelevel == -1)
3833 config.tracelevel = xtensa->core_config->debug.irq_level;
3835 return xtensa_dm_perfmon_enable(&xtensa->dbg_mod, counter_id, &config);
3838 COMMAND_HANDLER(xtensa_cmd_perfmon_enable)
3840 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do,
3841 target_to_xtensa(get_current_target(CMD_CTX)));
3844 /* perfmon_dump [counter_id] */
3845 COMMAND_HELPER(xtensa_cmd_perfmon_dump_do, struct xtensa *xtensa)
3847 if (CMD_ARGC > 1)
3848 return ERROR_COMMAND_SYNTAX_ERROR;
3850 int counter_id = -1;
3851 if (CMD_ARGC == 1) {
3852 counter_id = strtol(CMD_ARGV[0], NULL, 0);
3853 if (counter_id > XTENSA_MAX_PERF_COUNTERS) {
3854 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3855 return ERROR_COMMAND_ARGUMENT_INVALID;
3859 unsigned int counter_start = (counter_id < 0) ? 0 : counter_id;
3860 unsigned int counter_end = (counter_id < 0) ? XTENSA_MAX_PERF_COUNTERS : counter_id + 1;
3861 for (unsigned int counter = counter_start; counter < counter_end; ++counter) {
3862 char result_buf[128] = { 0 };
3863 size_t result_pos = snprintf(result_buf, sizeof(result_buf), "Counter %d: ", counter);
3864 struct xtensa_perfmon_result result;
3865 int res = xtensa_dm_perfmon_dump(&xtensa->dbg_mod, counter, &result);
3866 if (res != ERROR_OK)
3867 return res;
3868 snprintf(result_buf + result_pos, sizeof(result_buf) - result_pos,
3869 "%-12" PRIu64 "%s",
3870 result.value,
3871 result.overflow ? " (overflow)" : "");
3872 LOG_INFO("%s", result_buf);
3875 return ERROR_OK;
3878 COMMAND_HANDLER(xtensa_cmd_perfmon_dump)
3880 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
3881 target_to_xtensa(get_current_target(CMD_CTX)));
3884 COMMAND_HELPER(xtensa_cmd_mask_interrupts_do, struct xtensa *xtensa)
3886 int state = -1;
3888 if (CMD_ARGC < 1) {
3889 const char *st;
3890 state = xtensa->stepping_isr_mode;
3891 if (state == XT_STEPPING_ISR_ON)
3892 st = "OFF";
3893 else if (state == XT_STEPPING_ISR_OFF)
3894 st = "ON";
3895 else
3896 st = "UNKNOWN";
3897 command_print(CMD, "Current ISR step mode: %s", st);
3898 return ERROR_OK;
3901 if (xtensa->core_config->core_type == XT_NX) {
3902 command_print(CMD, "ERROR: ISR step mode only supported on Xtensa LX");
3903 return ERROR_FAIL;
3906 /* Masking is ON -> interrupts during stepping are OFF, and vice versa */
3907 if (!strcasecmp(CMD_ARGV[0], "off"))
3908 state = XT_STEPPING_ISR_ON;
3909 else if (!strcasecmp(CMD_ARGV[0], "on"))
3910 state = XT_STEPPING_ISR_OFF;
3912 if (state == -1) {
3913 command_print(CMD, "Argument unknown. Please pick one of ON, OFF");
3914 return ERROR_FAIL;
3916 xtensa->stepping_isr_mode = state;
3917 return ERROR_OK;
3920 COMMAND_HANDLER(xtensa_cmd_mask_interrupts)
3922 return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do,
3923 target_to_xtensa(get_current_target(CMD_CTX)));
3926 COMMAND_HELPER(xtensa_cmd_smpbreak_do, struct target *target)
3928 int res;
3929 uint32_t val = 0;
3931 if (CMD_ARGC >= 1) {
3932 for (unsigned int i = 0; i < CMD_ARGC; i++) {
3933 if (!strcasecmp(CMD_ARGV[0], "none")) {
3934 val = 0;
3935 } else if (!strcasecmp(CMD_ARGV[i], "BreakIn")) {
3936 val |= OCDDCR_BREAKINEN;
3937 } else if (!strcasecmp(CMD_ARGV[i], "BreakOut")) {
3938 val |= OCDDCR_BREAKOUTEN;
3939 } else if (!strcasecmp(CMD_ARGV[i], "RunStallIn")) {
3940 val |= OCDDCR_RUNSTALLINEN;
3941 } else if (!strcasecmp(CMD_ARGV[i], "DebugModeOut")) {
3942 val |= OCDDCR_DEBUGMODEOUTEN;
3943 } else if (!strcasecmp(CMD_ARGV[i], "BreakInOut")) {
3944 val |= OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN;
3945 } else if (!strcasecmp(CMD_ARGV[i], "RunStall")) {
3946 val |= OCDDCR_RUNSTALLINEN | OCDDCR_DEBUGMODEOUTEN;
3947 } else {
3948 command_print(CMD, "Unknown arg %s", CMD_ARGV[i]);
3949 command_print(
3950 CMD,
3951 "use either BreakInOut, None or RunStall as arguments, or any combination of BreakIn, BreakOut, RunStallIn and DebugModeOut.");
3952 return ERROR_OK;
3955 res = xtensa_smpbreak_set(target, val);
3956 if (res != ERROR_OK)
3957 command_print(CMD, "Failed to set smpbreak config %d", res);
3958 } else {
3959 struct xtensa *xtensa = target_to_xtensa(target);
3960 res = xtensa_smpbreak_read(xtensa, &val);
3961 if (res == ERROR_OK)
3962 command_print(CMD, "Current bits set:%s%s%s%s",
3963 (val & OCDDCR_BREAKINEN) ? " BreakIn" : "",
3964 (val & OCDDCR_BREAKOUTEN) ? " BreakOut" : "",
3965 (val & OCDDCR_RUNSTALLINEN) ? " RunStallIn" : "",
3966 (val & OCDDCR_DEBUGMODEOUTEN) ? " DebugModeOut" : ""
3968 else
3969 command_print(CMD, "Failed to get smpbreak config %d", res);
3971 return res;
3974 COMMAND_HANDLER(xtensa_cmd_smpbreak)
3976 return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do,
3977 get_current_target(CMD_CTX));
3980 COMMAND_HELPER(xtensa_cmd_tracestart_do, struct xtensa *xtensa)
3982 struct xtensa_trace_status trace_status;
3983 struct xtensa_trace_start_config cfg = {
3984 .stoppc = 0,
3985 .stopmask = XTENSA_STOPMASK_DISABLED,
3986 .after = 0,
3987 .after_is_words = false
3990 /* Parse arguments */
3991 for (unsigned int i = 0; i < CMD_ARGC; i++) {
3992 if ((!strcasecmp(CMD_ARGV[i], "pc")) && CMD_ARGC > i) {
3993 char *e;
3994 i++;
3995 cfg.stoppc = strtol(CMD_ARGV[i], &e, 0);
3996 cfg.stopmask = 0;
3997 if (*e == '/')
3998 cfg.stopmask = strtol(e, NULL, 0);
3999 } else if ((!strcasecmp(CMD_ARGV[i], "after")) && CMD_ARGC > i) {
4000 i++;
4001 cfg.after = strtol(CMD_ARGV[i], NULL, 0);
4002 } else if (!strcasecmp(CMD_ARGV[i], "ins")) {
4003 cfg.after_is_words = 0;
4004 } else if (!strcasecmp(CMD_ARGV[i], "words")) {
4005 cfg.after_is_words = 1;
4006 } else {
4007 command_print(CMD, "Did not understand %s", CMD_ARGV[i]);
4008 return ERROR_FAIL;
4012 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
4013 if (res != ERROR_OK)
4014 return res;
4015 if (trace_status.stat & TRAXSTAT_TRACT) {
4016 LOG_WARNING("Silently stop active tracing!");
4017 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, false);
4018 if (res != ERROR_OK)
4019 return res;
4022 res = xtensa_dm_trace_start(&xtensa->dbg_mod, &cfg);
4023 if (res != ERROR_OK)
4024 return res;
4026 xtensa->trace_active = true;
4027 command_print(CMD, "Trace started.");
4028 return ERROR_OK;
4031 COMMAND_HANDLER(xtensa_cmd_tracestart)
4033 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do,
4034 target_to_xtensa(get_current_target(CMD_CTX)));
4037 COMMAND_HELPER(xtensa_cmd_tracestop_do, struct xtensa *xtensa)
4039 struct xtensa_trace_status trace_status;
4041 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
4042 if (res != ERROR_OK)
4043 return res;
4045 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
4046 command_print(CMD, "No trace is currently active.");
4047 return ERROR_FAIL;
4050 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, true);
4051 if (res != ERROR_OK)
4052 return res;
4054 xtensa->trace_active = false;
4055 command_print(CMD, "Trace stop triggered.");
4056 return ERROR_OK;
4059 COMMAND_HANDLER(xtensa_cmd_tracestop)
4061 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do,
4062 target_to_xtensa(get_current_target(CMD_CTX)));
4065 COMMAND_HELPER(xtensa_cmd_tracedump_do, struct xtensa *xtensa, const char *fname)
4067 struct xtensa_trace_config trace_config;
4068 struct xtensa_trace_status trace_status;
4069 uint32_t memsz, wmem;
4071 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
4072 if (res != ERROR_OK)
4073 return res;
4075 if (trace_status.stat & TRAXSTAT_TRACT) {
4076 command_print(CMD, "Tracing is still active. Please stop it first.");
4077 return ERROR_FAIL;
4080 res = xtensa_dm_trace_config_read(&xtensa->dbg_mod, &trace_config);
4081 if (res != ERROR_OK)
4082 return res;
4084 if (!(trace_config.ctrl & TRAXCTRL_TREN)) {
4085 command_print(CMD, "No active trace found; nothing to dump.");
4086 return ERROR_FAIL;
4089 memsz = trace_config.memaddr_end - trace_config.memaddr_start + 1;
4090 LOG_INFO("Total trace memory: %d words", memsz);
4091 if ((trace_config.addr &
4092 ((TRAXADDR_TWRAP_MASK << TRAXADDR_TWRAP_SHIFT) | TRAXADDR_TWSAT)) == 0) {
4093 /*Memory hasn't overwritten itself yet. */
4094 wmem = trace_config.addr & TRAXADDR_TADDR_MASK;
4095 LOG_INFO("...but trace is only %d words", wmem);
4096 if (wmem < memsz)
4097 memsz = wmem;
4098 } else {
4099 if (trace_config.addr & TRAXADDR_TWSAT) {
4100 LOG_INFO("Real trace is many times longer than that (overflow)");
4101 } else {
4102 uint32_t trc_sz = (trace_config.addr >> TRAXADDR_TWRAP_SHIFT) & TRAXADDR_TWRAP_MASK;
4103 trc_sz = (trc_sz * memsz) + (trace_config.addr & TRAXADDR_TADDR_MASK);
4104 LOG_INFO("Real trace is %d words, but the start has been truncated.", trc_sz);
4108 uint8_t *tracemem = malloc(memsz * 4);
4109 if (!tracemem) {
4110 command_print(CMD, "Failed to alloc memory for trace data!");
4111 return ERROR_FAIL;
4113 res = xtensa_dm_trace_data_read(&xtensa->dbg_mod, tracemem, memsz * 4);
4114 if (res != ERROR_OK) {
4115 free(tracemem);
4116 return res;
4119 int f = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4120 if (f <= 0) {
4121 free(tracemem);
4122 command_print(CMD, "Unable to open file %s", fname);
4123 return ERROR_FAIL;
4125 if (write(f, tracemem, memsz * 4) != (int)memsz * 4)
4126 command_print(CMD, "Unable to write to file %s", fname);
4127 else
4128 command_print(CMD, "Written %d bytes of trace data to %s", memsz * 4, fname);
4129 close(f);
4131 bool is_all_zeroes = true;
4132 for (unsigned int i = 0; i < memsz * 4; i++) {
4133 if (tracemem[i] != 0) {
4134 is_all_zeroes = false;
4135 break;
4138 free(tracemem);
4139 if (is_all_zeroes)
4140 command_print(
4141 CMD,
4142 "WARNING: File written is all zeroes. Are you sure you enabled trace memory?");
4144 return ERROR_OK;
4147 COMMAND_HANDLER(xtensa_cmd_tracedump)
4149 if (CMD_ARGC != 1) {
4150 command_print(CMD, "Command takes exactly 1 parameter.Need filename to dump to as output!");
4151 return ERROR_FAIL;
4154 return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do,
4155 target_to_xtensa(get_current_target(CMD_CTX)), CMD_ARGV[0]);
4158 static const struct command_registration xtensa_any_command_handlers[] = {
4160 .name = "xtdef",
4161 .handler = xtensa_cmd_xtdef,
4162 .mode = COMMAND_CONFIG,
4163 .help = "Configure Xtensa core type",
4164 .usage = "<type>",
4167 .name = "xtopt",
4168 .handler = xtensa_cmd_xtopt,
4169 .mode = COMMAND_CONFIG,
4170 .help = "Configure Xtensa core option",
4171 .usage = "<name> <value>",
4174 .name = "xtmem",
4175 .handler = xtensa_cmd_xtmem,
4176 .mode = COMMAND_CONFIG,
4177 .help = "Configure Xtensa memory/cache option",
4178 .usage = "<type> [parameters]",
4181 .name = "xtmmu",
4182 .handler = xtensa_cmd_xtmmu,
4183 .mode = COMMAND_CONFIG,
4184 .help = "Configure Xtensa MMU option",
4185 .usage = "<NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56>",
4188 .name = "xtmpu",
4189 .handler = xtensa_cmd_xtmpu,
4190 .mode = COMMAND_CONFIG,
4191 .help = "Configure Xtensa MPU option",
4192 .usage = "<num FG seg> <min seg size> <lockable> <executeonly>",
4195 .name = "xtreg",
4196 .handler = xtensa_cmd_xtreg,
4197 .mode = COMMAND_CONFIG,
4198 .help = "Configure Xtensa register",
4199 .usage = "<regname> <regnum>",
4202 .name = "xtregs",
4203 .handler = xtensa_cmd_xtreg,
4204 .mode = COMMAND_CONFIG,
4205 .help = "Configure number of Xtensa registers",
4206 .usage = "<numregs>",
4209 .name = "xtregfmt",
4210 .handler = xtensa_cmd_xtregfmt,
4211 .mode = COMMAND_CONFIG,
4212 .help = "Configure format of Xtensa register map",
4213 .usage = "<contiguous|sparse> [numgregs]",
4216 .name = "set_permissive",
4217 .handler = xtensa_cmd_permissive_mode,
4218 .mode = COMMAND_ANY,
4219 .help = "When set to 1, enable Xtensa permissive mode (fewer client-side checks)",
4220 .usage = "[0|1]",
4223 .name = "maskisr",
4224 .handler = xtensa_cmd_mask_interrupts,
4225 .mode = COMMAND_ANY,
4226 .help = "mask Xtensa interrupts at step",
4227 .usage = "['on'|'off']",
4230 .name = "smpbreak",
4231 .handler = xtensa_cmd_smpbreak,
4232 .mode = COMMAND_ANY,
4233 .help = "Set the way the CPU chains OCD breaks",
4234 .usage = "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
4237 .name = "perfmon_enable",
4238 .handler = xtensa_cmd_perfmon_enable,
4239 .mode = COMMAND_EXEC,
4240 .help = "Enable and start performance counter",
4241 .usage = "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
4244 .name = "perfmon_dump",
4245 .handler = xtensa_cmd_perfmon_dump,
4246 .mode = COMMAND_EXEC,
4247 .help = "Dump performance counter value. If no argument specified, dumps all counters.",
4248 .usage = "[counter_id]",
4251 .name = "tracestart",
4252 .handler = xtensa_cmd_tracestart,
4253 .mode = COMMAND_EXEC,
4254 .help =
4255 "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
4256 .usage = "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
4259 .name = "tracestop",
4260 .handler = xtensa_cmd_tracestop,
4261 .mode = COMMAND_EXEC,
4262 .help = "Tracing: Stop current trace as started by the tracestart command",
4263 .usage = "",
4266 .name = "tracedump",
4267 .handler = xtensa_cmd_tracedump,
4268 .mode = COMMAND_EXEC,
4269 .help = "Tracing: Dump trace memory to a files. One file per core.",
4270 .usage = "<outfile>",
4273 .name = "exe",
4274 .handler = xtensa_cmd_exe,
4275 .mode = COMMAND_ANY,
4276 .help = "Xtensa stub execution",
4277 .usage = "<ascii-encoded hexadecimal instruction bytes>",
4279 COMMAND_REGISTRATION_DONE
4282 const struct command_registration xtensa_command_handlers[] = {
4284 .name = "xtensa",
4285 .mode = COMMAND_ANY,
4286 .help = "Xtensa command group",
4287 .usage = "",
4288 .chain = xtensa_any_command_handlers,
4290 COMMAND_REGISTRATION_DONE