helper/command: register full-name commands in jim
[openocd.git] / src / target / lakemont.c
blob31b521b3a8418f97c5442ce2a03031d24f1e64fd
1 /*
2 * Copyright(c) 2013-2016 Intel Corporation.
4 * Adrian Burns (adrian.burns@intel.com)
5 * Thomas Faust (thomas.faust@intel.com)
6 * Ivan De Cesaris (ivan.de.cesaris@intel.com)
7 * Julien Carreno (julien.carreno@intel.com)
8 * Jeffrey Maxwell (jeffrey.r.maxwell@intel.com)
9 * Jessica Gomez (jessica.gomez.hernandez@intel.com)
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * Contact Information:
25 * Intel Corporation
29 * @file
30 * This implements the probemode operations for Lakemont 1 (LMT1).
33 #ifdef HAVE_CONFIG_H
34 #include "config.h"
35 #endif
37 #include <helper/log.h>
39 #include "target.h"
40 #include "target_type.h"
41 #include "lakemont.h"
42 #include "register.h"
43 #include "breakpoints.h"
44 #include "x86_32_common.h"
46 static int irscan(struct target *t, uint8_t *out,
47 uint8_t *in, uint8_t ir_len);
48 static int drscan(struct target *t, uint8_t *out, uint8_t *in, uint8_t len);
49 static int save_context(struct target *target);
50 static int restore_context(struct target *target);
51 static uint32_t get_tapstatus(struct target *t);
52 static int enter_probemode(struct target *t);
53 static int exit_probemode(struct target *t);
54 static int halt_prep(struct target *t);
55 static int do_halt(struct target *t);
56 static int do_resume(struct target *t);
57 static int read_all_core_hw_regs(struct target *t);
58 static int write_all_core_hw_regs(struct target *t);
59 static int read_hw_reg(struct target *t,
60 int reg, uint32_t *regval, uint8_t cache);
61 static int write_hw_reg(struct target *t,
62 int reg, uint32_t regval, uint8_t cache);
63 static struct reg_cache *lakemont_build_reg_cache
64 (struct target *target);
65 static int submit_reg_pir(struct target *t, int num);
66 static int submit_instruction_pir(struct target *t, int num);
67 static int submit_pir(struct target *t, uint64_t op);
68 static int lakemont_get_core_reg(struct reg *reg);
69 static int lakemont_set_core_reg(struct reg *reg, uint8_t *buf);
71 static struct scan_blk scan;
73 /* registers and opcodes for register access, pm_idx is used to identify the
74 * registers that are modified for lakemont probemode specific operations
76 static const struct {
77 uint8_t id;
78 const char *name;
79 uint64_t op;
80 uint8_t pm_idx;
81 unsigned bits;
82 enum reg_type type;
83 const char *group;
84 const char *feature;
85 } regs[] = {
86 /* general purpose registers */
87 { EAX, "eax", 0x000000D01D660000, 0, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
88 { ECX, "ecx", 0x000000501D660000, 1, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
89 { EDX, "edx", 0x000000901D660000, 2, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
90 { EBX, "ebx", 0x000000101D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
91 { ESP, "esp", 0x000000E01D660000, NOT_PMREG, 32, REG_TYPE_DATA_PTR, "general", "org.gnu.gdb.i386.core" },
92 { EBP, "ebp", 0x000000601D660000, NOT_PMREG, 32, REG_TYPE_DATA_PTR, "general", "org.gnu.gdb.i386.core" },
93 { ESI, "esi", 0x000000A01D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
94 { EDI, "edi", 0x000000201D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
96 /* instruction pointer & flags */
97 { EIP, "eip", 0x000000C01D660000, 3, 32, REG_TYPE_CODE_PTR, "general", "org.gnu.gdb.i386.core" },
98 { EFLAGS, "eflags", 0x000000401D660000, 4, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
100 /* segment registers */
101 { CS, "cs", 0x000000281D660000, 5, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
102 { SS, "ss", 0x000000C81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
103 { DS, "ds", 0x000000481D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
104 { ES, "es", 0x000000A81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
105 { FS, "fs", 0x000000881D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
106 { GS, "gs", 0x000000081D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
108 /* floating point unit registers - not accessible via JTAG - here to satisfy GDB */
109 { ST0, "st0", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
110 { ST1, "st1", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
111 { ST2, "st2", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
112 { ST3, "st3", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
113 { ST4, "st4", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
114 { ST5, "st5", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
115 { ST6, "st6", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
116 { ST7, "st7", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
117 { FCTRL, "fctrl", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
118 { FSTAT, "fstat", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
119 { FTAG, "ftag", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
120 { FISEG, "fiseg", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
121 { FIOFF, "fioff", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
122 { FOSEG, "foseg", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
123 { FOOFF, "fooff", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
124 { FOP, "fop", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
126 /* control registers */
127 { CR0, "cr0", 0x000000001D660000, 6, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
128 { CR2, "cr2", 0x000000BC1D660000, 7, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
129 { CR3, "cr3", 0x000000801D660000, 8, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
130 { CR4, "cr4", 0x0000002C1D660000, 9, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
132 /* debug registers */
133 { DR0, "dr0", 0x0000007C1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
134 { DR1, "dr1", 0x000000FC1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
135 { DR2, "dr2", 0x000000021D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
136 { DR3, "dr3", 0x000000821D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
137 { DR6, "dr6", 0x000000301D660000, 10, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
138 { DR7, "dr7", 0x000000B01D660000, 11, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
140 /* descriptor tables */
141 { IDTB, "idtbase", 0x000000581D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
142 { IDTL, "idtlimit", 0x000000D81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
143 { IDTAR, "idtar", 0x000000981D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
144 { GDTB, "gdtbase", 0x000000B81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
145 { GDTL, "gdtlimit", 0x000000781D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
146 { GDTAR, "gdtar", 0x000000381D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
147 { TR, "tr", 0x000000701D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
148 { LDTR, "ldtr", 0x000000F01D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
149 { LDTB, "ldbase", 0x000000041D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
150 { LDTL, "ldlimit", 0x000000841D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
151 { LDTAR, "ldtar", 0x000000F81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
153 /* segment registers */
154 { CSB, "csbase", 0x000000F41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
155 { CSL, "cslimit", 0x0000000C1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
156 { CSAR, "csar", 0x000000741D660000, 12, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
157 { DSB, "dsbase", 0x000000941D660000, 13, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
158 { DSL, "dslimit", 0x000000541D660000, 14, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
159 { DSAR, "dsar", 0x000000141D660000, 15, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
160 { ESB, "esbase", 0x0000004C1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
161 { ESL, "eslimit", 0x000000CC1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
162 { ESAR, "esar", 0x0000008C1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
163 { FSB, "fsbase", 0x000000641D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
164 { FSL, "fslimit", 0x000000E41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
165 { FSAR, "fsar", 0x000000A41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
166 { GSB, "gsbase", 0x000000C41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
167 { GSL, "gslimit", 0x000000241D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
168 { GSAR, "gsar", 0x000000441D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
169 { SSB, "ssbase", 0x000000341D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
170 { SSL, "sslimit", 0x000000B41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
171 { SSAR, "ssar", 0x000000D41D660000, 16, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
172 { TSSB, "tssbase", 0x000000E81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
173 { TSSL, "tsslimit", 0x000000181D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
174 { TSSAR, "tssar", 0x000000681D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
175 /* probemode control register */
176 { PMCR, "pmcr", 0x000000421D660000, 17, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
179 static const struct {
180 uint8_t id;
181 const char *name;
182 uint64_t op;
183 } instructions[] = {
184 /* memory read/write */
185 { MEMRDB32, "MEMRDB32", 0x0909090909090851 },
186 { MEMRDB16, "MEMRDB16", 0x09090909090851E6 },
187 { MEMRDH32, "MEMRDH32", 0x090909090908D166 },
188 { MEMRDH16, "MEMRDH16", 0x090909090908D1E6 },
189 { MEMRDW32, "MEMRDW32", 0x09090909090908D1 },
190 { MEMRDW16, "MEMRDW16", 0x0909090908D1E666 },
191 { MEMWRB32, "MEMWRB32", 0x0909090909090811 },
192 { MEMWRB16, "MEMWRB16", 0x09090909090811E6 },
193 { MEMWRH32, "MEMWRH32", 0x0909090909089166 },
194 { MEMWRH16, "MEMWRH16", 0x09090909090891E6 },
195 { MEMWRW32, "MEMWRW32", 0x0909090909090891 },
196 { MEMWRW16, "MEMWRW16", 0x090909090891E666 },
197 /* IO read/write */
198 { IORDB32, "IORDB32", 0x0909090909090937 },
199 { IORDB16, "IORDB16", 0x09090909090937E6 },
200 { IORDH32, "IORDH32", 0x090909090909B766 },
201 { IORDH16, "IORDH16", 0x090909090909B7E6 },
202 { IORDW32, "IORDW32", 0x09090909090909B7 },
203 { IORDW16, "IORDW16", 0x0909090909B7E666 },
204 { IOWRB32, "IOWRB32", 0x0909090909090977 },
205 { IOWRB16, "IOWRB16", 0x09090909090977E6 },
206 { IOWRH32, "IOWRH32", 0x090909090909F766 },
207 { IOWRH16, "IOWRH16", 0x090909090909F7E6 },
208 { IOWRW32, "IOWRW32", 0x09090909090909F7 },
209 { IOWRW16, "IOWRW16", 0x0909090909F7E666 },
210 /* lakemont1 core shadow ram access opcodes */
211 { SRAMACCESS, "SRAMACCESS", 0x0000000E9D660000 },
212 { SRAM2PDR, "SRAM2PDR", 0x4CF0000000000000 },
213 { PDR2SRAM, "PDR2SRAM", 0x0CF0000000000000 },
214 { WBINVD, "WBINVD", 0x09090909090990F0 },
217 bool check_not_halted(const struct target *t)
219 bool halted = t->state == TARGET_HALTED;
220 if (!halted)
221 LOG_ERROR("target running, halt it first");
222 return !halted;
225 static int irscan(struct target *t, uint8_t *out,
226 uint8_t *in, uint8_t ir_len)
228 int retval = ERROR_OK;
229 struct x86_32_common *x86_32 = target_to_x86_32(t);
230 if (NULL == t->tap) {
231 retval = ERROR_FAIL;
232 LOG_ERROR("%s invalid target tap", __func__);
233 return retval;
235 if (ir_len != t->tap->ir_length) {
236 retval = ERROR_FAIL;
237 if (t->tap->enabled)
238 LOG_ERROR("%s tap enabled but tap irlen=%d",
239 __func__, t->tap->ir_length);
240 else
241 LOG_ERROR("%s tap not enabled and irlen=%d",
242 __func__, t->tap->ir_length);
243 return retval;
245 struct scan_field *fields = &scan.field;
246 fields->num_bits = ir_len;
247 fields->out_value = out;
248 fields->in_value = in;
249 jtag_add_ir_scan(x86_32->curr_tap, fields, TAP_IDLE);
250 if (x86_32->flush) {
251 retval = jtag_execute_queue();
252 if (retval != ERROR_OK)
253 LOG_ERROR("%s failed to execute queue", __func__);
255 return retval;
258 static int drscan(struct target *t, uint8_t *out, uint8_t *in, uint8_t len)
260 int retval = ERROR_OK;
261 uint64_t data = 0;
262 struct x86_32_common *x86_32 = target_to_x86_32(t);
263 if (NULL == t->tap) {
264 retval = ERROR_FAIL;
265 LOG_ERROR("%s invalid target tap", __func__);
266 return retval;
268 if (len > MAX_SCAN_SIZE || 0 == len) {
269 retval = ERROR_FAIL;
270 LOG_ERROR("%s data len is %d bits, max is %d bits",
271 __func__, len, MAX_SCAN_SIZE);
272 return retval;
274 struct scan_field *fields = &scan.field;
275 fields->out_value = out;
276 fields->in_value = in;
277 fields->num_bits = len;
278 jtag_add_dr_scan(x86_32->curr_tap, 1, fields, TAP_IDLE);
279 if (x86_32->flush) {
280 retval = jtag_execute_queue();
281 if (retval != ERROR_OK) {
282 LOG_ERROR("%s drscan failed to execute queue", __func__);
283 return retval;
286 if (in != NULL) {
287 if (len >= 8) {
288 for (int n = (len / 8) - 1 ; n >= 0; n--)
289 data = (data << 8) + *(in+n);
290 } else
291 LOG_DEBUG("dr in 0x%02" PRIx8, *in);
292 } else {
293 LOG_ERROR("%s no drscan data", __func__);
294 retval = ERROR_FAIL;
296 return retval;
299 static int save_context(struct target *t)
301 int err;
302 /* read core registers from lakemont sram */
303 err = read_all_core_hw_regs(t);
304 if (err != ERROR_OK) {
305 LOG_ERROR("%s error reading regs", __func__);
306 return err;
308 return ERROR_OK;
311 static int restore_context(struct target *t)
313 int err = ERROR_OK;
314 uint32_t i;
315 struct x86_32_common *x86_32 = target_to_x86_32(t);
317 /* write core regs into the core PM SRAM from the reg_cache */
318 err = write_all_core_hw_regs(t);
319 if (err != ERROR_OK) {
320 LOG_ERROR("%s error writing regs", __func__);
321 return err;
324 for (i = 0; i < (x86_32->cache->num_regs); i++) {
325 x86_32->cache->reg_list[i].dirty = false;
326 x86_32->cache->reg_list[i].valid = false;
328 return err;
332 * we keep reg_cache in sync with hardware at halt/resume time, we avoid
333 * writing to real hardware here because pm_regs reflects the hardware
334 * while we are halted then reg_cache syncs with hw on resume
335 * TODO - in order for "reg eip force" to work it assume get/set reads
336 * and writes from hardware, may be other reasons also because generally
337 * other openocd targets read/write from hardware in get/set - watch this!
339 static int lakemont_get_core_reg(struct reg *reg)
341 int retval = ERROR_OK;
342 struct lakemont_core_reg *lakemont_reg = reg->arch_info;
343 struct target *t = lakemont_reg->target;
344 if (check_not_halted(t))
345 return ERROR_TARGET_NOT_HALTED;
346 LOG_DEBUG("reg=%s, value=0x%08" PRIx32, reg->name,
347 buf_get_u32(reg->value, 0, 32));
348 return retval;
351 static int lakemont_set_core_reg(struct reg *reg, uint8_t *buf)
353 struct lakemont_core_reg *lakemont_reg = reg->arch_info;
354 struct target *t = lakemont_reg->target;
355 uint32_t value = buf_get_u32(buf, 0, 32);
356 LOG_DEBUG("reg=%s, newval=0x%08" PRIx32, reg->name, value);
357 if (check_not_halted(t))
358 return ERROR_TARGET_NOT_HALTED;
359 buf_set_u32(reg->value, 0, 32, value);
360 reg->dirty = true;
361 reg->valid = true;
362 return ERROR_OK;
365 static const struct reg_arch_type lakemont_reg_type = {
366 /* these get called if reg_cache doesn't have a "valid" value
367 * of an individual reg eg "reg eip" but not for "reg" block
369 .get = lakemont_get_core_reg,
370 .set = lakemont_set_core_reg,
373 struct reg_cache *lakemont_build_reg_cache(struct target *t)
375 struct x86_32_common *x86_32 = target_to_x86_32(t);
376 int num_regs = ARRAY_SIZE(regs);
377 struct reg_cache **cache_p = register_get_last_cache_p(&t->reg_cache);
378 struct reg_cache *cache = malloc(sizeof(struct reg_cache));
379 struct reg *reg_list = calloc(num_regs, sizeof(struct reg));
380 struct lakemont_core_reg *arch_info = malloc(sizeof(struct lakemont_core_reg) * num_regs);
381 struct reg_feature *feature;
382 int i;
384 if (cache == NULL || reg_list == NULL || arch_info == NULL) {
385 free(cache);
386 free(reg_list);
387 free(arch_info);
388 LOG_ERROR("%s out of memory", __func__);
389 return NULL;
392 /* Build the process context cache */
393 cache->name = "lakemont registers";
394 cache->next = NULL;
395 cache->reg_list = reg_list;
396 cache->num_regs = num_regs;
397 (*cache_p) = cache;
398 x86_32->cache = cache;
400 for (i = 0; i < num_regs; i++) {
401 arch_info[i].target = t;
402 arch_info[i].x86_32_common = x86_32;
403 arch_info[i].op = regs[i].op;
404 arch_info[i].pm_idx = regs[i].pm_idx;
405 reg_list[i].name = regs[i].name;
406 reg_list[i].size = 32;
407 reg_list[i].value = calloc(1, 4);
408 reg_list[i].dirty = false;
409 reg_list[i].valid = false;
410 reg_list[i].type = &lakemont_reg_type;
411 reg_list[i].arch_info = &arch_info[i];
413 reg_list[i].group = regs[i].group;
414 reg_list[i].number = i;
415 reg_list[i].exist = true;
416 reg_list[i].caller_save = true; /* gdb defaults to true */
418 feature = calloc(1, sizeof(struct reg_feature));
419 if (feature) {
420 feature->name = regs[i].feature;
421 reg_list[i].feature = feature;
422 } else
423 LOG_ERROR("%s unable to allocate feature list", __func__);
425 reg_list[i].reg_data_type = calloc(1, sizeof(struct reg_data_type));
426 if (reg_list[i].reg_data_type)
427 reg_list[i].reg_data_type->type = regs[i].type;
428 else
429 LOG_ERROR("%s unable to allocate reg type list", __func__);
431 return cache;
434 static uint32_t get_tapstatus(struct target *t)
436 scan.out[0] = TAPSTATUS;
437 if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
438 return 0;
439 if (drscan(t, NULL, scan.out, TS_SIZE) != ERROR_OK)
440 return 0;
441 return buf_get_u32(scan.out, 0, 32);
444 static int enter_probemode(struct target *t)
446 uint32_t tapstatus = 0;
447 int retries = 100;
449 tapstatus = get_tapstatus(t);
450 LOG_DEBUG("TS before PM enter = 0x%08" PRIx32, tapstatus);
451 if (tapstatus & TS_PM_BIT) {
452 LOG_DEBUG("core already in probemode");
453 return ERROR_OK;
455 scan.out[0] = PROBEMODE;
456 if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
457 return ERROR_FAIL;
458 scan.out[0] = 1;
459 if (drscan(t, scan.out, scan.in, 1) != ERROR_OK)
460 return ERROR_FAIL;
462 while (retries--) {
463 tapstatus = get_tapstatus(t);
464 LOG_DEBUG("TS after PM enter = 0x%08" PRIx32, tapstatus);
465 if ((tapstatus & TS_PM_BIT) && (!(tapstatus & TS_EN_PM_BIT)))
466 return ERROR_OK;
469 LOG_ERROR("%s PM enter error, tapstatus = 0x%08" PRIx32
470 , __func__, tapstatus);
471 return ERROR_FAIL;
474 static int exit_probemode(struct target *t)
476 uint32_t tapstatus = get_tapstatus(t);
477 LOG_DEBUG("TS before PM exit = 0x%08" PRIx32, tapstatus);
479 if (!(tapstatus & TS_PM_BIT)) {
480 LOG_USER("core not in PM");
481 return ERROR_OK;
483 scan.out[0] = PROBEMODE;
484 if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
485 return ERROR_FAIL;
486 scan.out[0] = 0;
487 if (drscan(t, scan.out, scan.in, 1) != ERROR_OK)
488 return ERROR_FAIL;
489 return ERROR_OK;
492 /* do whats needed to properly enter probemode for debug on lakemont */
493 static int halt_prep(struct target *t)
495 struct x86_32_common *x86_32 = target_to_x86_32(t);
496 if (write_hw_reg(t, DSB, PM_DSB, 0) != ERROR_OK)
497 return ERROR_FAIL;
498 LOG_DEBUG("write %s 0x%08" PRIx32, regs[DSB].name, PM_DSB);
499 if (write_hw_reg(t, DSL, PM_DSL, 0) != ERROR_OK)
500 return ERROR_FAIL;
501 LOG_DEBUG("write %s 0x%08" PRIx32, regs[DSL].name, PM_DSL);
502 if (write_hw_reg(t, DSAR, PM_DSAR, 0) != ERROR_OK)
503 return ERROR_FAIL;
504 LOG_DEBUG("write DSAR 0x%08" PRIx32, PM_DSAR);
505 if (write_hw_reg(t, CSB, PM_DSB, 0) != ERROR_OK)
506 return ERROR_FAIL;
507 LOG_DEBUG("write %s 0x%08" PRIx32, regs[CSB].name, PM_DSB);
508 if (write_hw_reg(t, CSL, PM_DSL, 0) != ERROR_OK)
509 return ERROR_FAIL;
510 LOG_DEBUG("write %s 0x%08" PRIx32, regs[CSL].name, PM_DSL);
511 if (write_hw_reg(t, DR7, PM_DR7, 0) != ERROR_OK)
512 return ERROR_FAIL;
513 LOG_DEBUG("write DR7 0x%08" PRIx32, PM_DR7);
515 uint32_t eflags = buf_get_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32);
516 uint32_t csar = buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32);
517 uint32_t ssar = buf_get_u32(x86_32->cache->reg_list[SSAR].value, 0, 32);
518 uint32_t cr0 = buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32);
520 /* clear VM86 and IF bits if they are set */
521 LOG_DEBUG("EFLAGS = 0x%08" PRIx32 ", VM86 = %d, IF = %d", eflags,
522 eflags & EFLAGS_VM86 ? 1 : 0,
523 eflags & EFLAGS_IF ? 1 : 0);
524 if ((eflags & EFLAGS_VM86) || (eflags & EFLAGS_IF)) {
525 x86_32->pm_regs[I(EFLAGS)] = eflags & ~(EFLAGS_VM86 | EFLAGS_IF);
526 if (write_hw_reg(t, EFLAGS, x86_32->pm_regs[I(EFLAGS)], 0) != ERROR_OK)
527 return ERROR_FAIL;
528 LOG_DEBUG("EFLAGS now = 0x%08" PRIx32 ", VM86 = %d, IF = %d",
529 x86_32->pm_regs[I(EFLAGS)],
530 x86_32->pm_regs[I(EFLAGS)] & EFLAGS_VM86 ? 1 : 0,
531 x86_32->pm_regs[I(EFLAGS)] & EFLAGS_IF ? 1 : 0);
534 /* set CPL to 0 for memory access */
535 if (csar & CSAR_DPL) {
536 x86_32->pm_regs[I(CSAR)] = csar & ~CSAR_DPL;
537 if (write_hw_reg(t, CSAR, x86_32->pm_regs[I(CSAR)], 0) != ERROR_OK)
538 return ERROR_FAIL;
539 LOG_DEBUG("write CSAR_CPL to 0 0x%08" PRIx32, x86_32->pm_regs[I(CSAR)]);
541 if (ssar & SSAR_DPL) {
542 x86_32->pm_regs[I(SSAR)] = ssar & ~SSAR_DPL;
543 if (write_hw_reg(t, SSAR, x86_32->pm_regs[I(SSAR)], 0) != ERROR_OK)
544 return ERROR_FAIL;
545 LOG_DEBUG("write SSAR_CPL to 0 0x%08" PRIx32, x86_32->pm_regs[I(SSAR)]);
548 /* if cache's are enabled, disable and flush, depending on the core version */
549 if (!(x86_32->core_type == LMT3_5) && !(cr0 & CR0_CD)) {
550 LOG_DEBUG("caching enabled CR0 = 0x%08" PRIx32, cr0);
551 if (cr0 & CR0_PG) {
552 x86_32->pm_regs[I(CR0)] = cr0 & ~CR0_PG;
553 if (write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0) != ERROR_OK)
554 return ERROR_FAIL;
555 LOG_DEBUG("cleared paging CR0_PG = 0x%08" PRIx32, x86_32->pm_regs[I(CR0)]);
556 /* submit wbinvd to flush cache */
557 if (submit_reg_pir(t, WBINVD) != ERROR_OK)
558 return ERROR_FAIL;
559 x86_32->pm_regs[I(CR0)] =
560 x86_32->pm_regs[I(CR0)] | (CR0_CD | CR0_NW | CR0_PG);
561 if (write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0) != ERROR_OK)
562 return ERROR_FAIL;
563 LOG_DEBUG("set CD, NW and PG, CR0 = 0x%08" PRIx32, x86_32->pm_regs[I(CR0)]);
566 return ERROR_OK;
569 static int do_halt(struct target *t)
571 /* needs proper handling later if doing a halt errors out */
572 t->state = TARGET_DEBUG_RUNNING;
573 if (enter_probemode(t) != ERROR_OK)
574 return ERROR_FAIL;
576 return lakemont_update_after_probemode_entry(t);
579 /* we need to expose the update to be able to complete the reset at SoC level */
580 int lakemont_update_after_probemode_entry(struct target *t)
582 if (save_context(t) != ERROR_OK)
583 return ERROR_FAIL;
584 if (halt_prep(t) != ERROR_OK)
585 return ERROR_FAIL;
586 t->state = TARGET_HALTED;
588 return target_call_event_callbacks(t, TARGET_EVENT_HALTED);
591 static int do_resume(struct target *t)
593 /* needs proper handling later */
594 t->state = TARGET_DEBUG_RUNNING;
595 if (restore_context(t) != ERROR_OK)
596 return ERROR_FAIL;
597 if (exit_probemode(t) != ERROR_OK)
598 return ERROR_FAIL;
599 t->state = TARGET_RUNNING;
601 t->debug_reason = DBG_REASON_NOTHALTED;
602 LOG_USER("target running");
604 return target_call_event_callbacks(t, TARGET_EVENT_RESUMED);
607 static int read_all_core_hw_regs(struct target *t)
609 int err;
610 uint32_t regval;
611 unsigned i;
612 struct x86_32_common *x86_32 = target_to_x86_32(t);
613 for (i = 0; i < (x86_32->cache->num_regs); i++) {
614 if (NOT_AVAIL_REG == regs[i].pm_idx)
615 continue;
616 err = read_hw_reg(t, regs[i].id, &regval, 1);
617 if (err != ERROR_OK) {
618 LOG_ERROR("%s error saving reg %s",
619 __func__, x86_32->cache->reg_list[i].name);
620 return err;
623 LOG_DEBUG("read_all_core_hw_regs read %u registers ok", i);
624 return ERROR_OK;
627 static int write_all_core_hw_regs(struct target *t)
629 int err;
630 unsigned i;
631 struct x86_32_common *x86_32 = target_to_x86_32(t);
632 for (i = 0; i < (x86_32->cache->num_regs); i++) {
633 if (NOT_AVAIL_REG == regs[i].pm_idx)
634 continue;
635 err = write_hw_reg(t, i, 0, 1);
636 if (err != ERROR_OK) {
637 LOG_ERROR("%s error restoring reg %s",
638 __func__, x86_32->cache->reg_list[i].name);
639 return err;
642 LOG_DEBUG("write_all_core_hw_regs wrote %u registers ok", i);
643 return ERROR_OK;
646 /* read reg from lakemont core shadow ram, update reg cache if needed */
647 static int read_hw_reg(struct target *t, int reg, uint32_t *regval, uint8_t cache)
649 struct x86_32_common *x86_32 = target_to_x86_32(t);
650 struct lakemont_core_reg *arch_info;
651 arch_info = x86_32->cache->reg_list[reg].arch_info;
652 x86_32->flush = 0; /* don't flush scans till we have a batch */
653 if (submit_reg_pir(t, reg) != ERROR_OK)
654 return ERROR_FAIL;
655 if (submit_instruction_pir(t, SRAMACCESS) != ERROR_OK)
656 return ERROR_FAIL;
657 if (submit_instruction_pir(t, SRAM2PDR) != ERROR_OK)
658 return ERROR_FAIL;
659 x86_32->flush = 1;
660 scan.out[0] = RDWRPDR;
661 if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
662 return ERROR_FAIL;
663 if (drscan(t, NULL, scan.out, PDR_SIZE) != ERROR_OK)
664 return ERROR_FAIL;
666 jtag_add_sleep(DELAY_SUBMITPIR);
667 *regval = buf_get_u32(scan.out, 0, 32);
668 if (cache) {
669 buf_set_u32(x86_32->cache->reg_list[reg].value, 0, 32, *regval);
670 x86_32->cache->reg_list[reg].valid = true;
671 x86_32->cache->reg_list[reg].dirty = false;
673 LOG_DEBUG("reg=%s, op=0x%016" PRIx64 ", val=0x%08" PRIx32,
674 x86_32->cache->reg_list[reg].name,
675 arch_info->op,
676 *regval);
677 return ERROR_OK;
680 /* write lakemont core shadow ram reg, update reg cache if needed */
681 static int write_hw_reg(struct target *t, int reg, uint32_t regval, uint8_t cache)
683 struct x86_32_common *x86_32 = target_to_x86_32(t);
684 struct lakemont_core_reg *arch_info;
685 arch_info = x86_32->cache->reg_list[reg].arch_info;
687 uint8_t reg_buf[4];
688 if (cache)
689 regval = buf_get_u32(x86_32->cache->reg_list[reg].value, 0, 32);
690 buf_set_u32(reg_buf, 0, 32, regval);
691 LOG_DEBUG("reg=%s, op=0x%016" PRIx64 ", val=0x%08" PRIx32,
692 x86_32->cache->reg_list[reg].name,
693 arch_info->op,
694 regval);
696 x86_32->flush = 0; /* don't flush scans till we have a batch */
697 if (submit_reg_pir(t, reg) != ERROR_OK)
698 return ERROR_FAIL;
699 if (submit_instruction_pir(t, SRAMACCESS) != ERROR_OK)
700 return ERROR_FAIL;
701 scan.out[0] = RDWRPDR;
702 if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
703 return ERROR_FAIL;
704 if (drscan(t, reg_buf, scan.out, PDR_SIZE) != ERROR_OK)
705 return ERROR_FAIL;
706 x86_32->flush = 1;
707 if (submit_instruction_pir(t, PDR2SRAM) != ERROR_OK)
708 return ERROR_FAIL;
710 /* we are writing from the cache so ensure we reset flags */
711 if (cache) {
712 x86_32->cache->reg_list[reg].dirty = false;
713 x86_32->cache->reg_list[reg].valid = false;
715 return ERROR_OK;
718 static bool is_paging_enabled(struct target *t)
720 struct x86_32_common *x86_32 = target_to_x86_32(t);
721 if (x86_32->pm_regs[I(CR0)] & CR0_PG)
722 return true;
723 else
724 return false;
727 static uint8_t get_num_user_regs(struct target *t)
729 struct x86_32_common *x86_32 = target_to_x86_32(t);
730 return x86_32->cache->num_regs;
732 /* value of the CR0.PG (paging enabled) bit influences memory reads/writes */
733 static int disable_paging(struct target *t)
735 struct x86_32_common *x86_32 = target_to_x86_32(t);
736 x86_32->pm_regs[I(CR0)] = x86_32->pm_regs[I(CR0)] & ~CR0_PG;
737 int err = x86_32->write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0);
738 if (err != ERROR_OK) {
739 LOG_ERROR("%s error disabling paging", __func__);
740 return err;
742 return err;
745 static int enable_paging(struct target *t)
747 struct x86_32_common *x86_32 = target_to_x86_32(t);
748 x86_32->pm_regs[I(CR0)] = (x86_32->pm_regs[I(CR0)] | CR0_PG);
749 int err = x86_32->write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0);
750 if (err != ERROR_OK) {
751 LOG_ERROR("%s error enabling paging", __func__);
752 return err;
754 return err;
757 static bool sw_bpts_supported(struct target *t)
759 uint32_t tapstatus = get_tapstatus(t);
760 if (tapstatus & TS_SBP_BIT)
761 return true;
762 else
763 return false;
766 static int transaction_status(struct target *t)
768 uint32_t tapstatus = get_tapstatus(t);
769 if ((TS_EN_PM_BIT | TS_PRDY_BIT) & tapstatus) {
770 LOG_ERROR("%s transaction error tapstatus = 0x%08" PRIx32
771 , __func__, tapstatus);
772 return ERROR_FAIL;
773 } else {
774 return ERROR_OK;
778 static int submit_instruction(struct target *t, int num)
780 int err = submit_instruction_pir(t, num);
781 if (err != ERROR_OK) {
782 LOG_ERROR("%s error submitting pir", __func__);
783 return err;
785 return err;
788 static int submit_reg_pir(struct target *t, int num)
790 LOG_DEBUG("reg %s op=0x%016" PRIx64, regs[num].name, regs[num].op);
791 int err = submit_pir(t, regs[num].op);
792 if (err != ERROR_OK) {
793 LOG_ERROR("%s error submitting pir", __func__);
794 return err;
796 return err;
799 static int submit_instruction_pir(struct target *t, int num)
801 LOG_DEBUG("%s op=0x%016" PRIx64, instructions[num].name,
802 instructions[num].op);
803 int err = submit_pir(t, instructions[num].op);
804 if (err != ERROR_OK) {
805 LOG_ERROR("%s error submitting pir", __func__);
806 return err;
808 return err;
812 * PIR (Probe Mode Instruction Register), SUBMITPIR is an "IR only" TAP
813 * command; there is no corresponding data register
815 static int submit_pir(struct target *t, uint64_t op)
817 struct x86_32_common *x86_32 = target_to_x86_32(t);
819 uint8_t op_buf[8];
820 buf_set_u64(op_buf, 0, 64, op);
821 int flush = x86_32->flush;
822 x86_32->flush = 0;
823 scan.out[0] = WRPIR;
824 if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
825 return ERROR_FAIL;
826 if (drscan(t, op_buf, scan.out, PIR_SIZE) != ERROR_OK)
827 return ERROR_FAIL;
828 scan.out[0] = SUBMITPIR;
829 x86_32->flush = flush;
830 if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
831 return ERROR_FAIL;
832 jtag_add_sleep(DELAY_SUBMITPIR);
833 return ERROR_OK;
836 int lakemont_init_target(struct command_context *cmd_ctx, struct target *t)
838 lakemont_build_reg_cache(t);
839 t->state = TARGET_RUNNING;
840 t->debug_reason = DBG_REASON_NOTHALTED;
841 return ERROR_OK;
844 int lakemont_init_arch_info(struct target *t, struct x86_32_common *x86_32)
846 x86_32->submit_instruction = submit_instruction;
847 x86_32->transaction_status = transaction_status;
848 x86_32->read_hw_reg = read_hw_reg;
849 x86_32->write_hw_reg = write_hw_reg;
850 x86_32->sw_bpts_supported = sw_bpts_supported;
851 x86_32->get_num_user_regs = get_num_user_regs;
852 x86_32->is_paging_enabled = is_paging_enabled;
853 x86_32->disable_paging = disable_paging;
854 x86_32->enable_paging = enable_paging;
855 return ERROR_OK;
858 int lakemont_poll(struct target *t)
860 /* LMT1 PMCR register currently allows code breakpoints, data breakpoints,
861 * single stepping and shutdowns to be redirected to PM but does not allow
862 * redirecting into PM as a result of SMM enter and SMM exit
864 uint32_t ts = get_tapstatus(t);
866 if (ts == 0xFFFFFFFF && t->state != TARGET_DEBUG_RUNNING) {
867 /* something is wrong here */
868 LOG_ERROR("tapstatus invalid - scan_chain serialization or locked JTAG access issues");
869 /* TODO: Give a hint that unlocking is wrong or maybe a
870 * 'jtag arp_init' helps
872 t->state = TARGET_DEBUG_RUNNING;
873 return ERROR_OK;
876 if (t->state == TARGET_HALTED && (!(ts & TS_PM_BIT))) {
877 LOG_INFO("target running for unknown reason");
878 t->state = TARGET_RUNNING;
881 if (t->state == TARGET_RUNNING &&
882 t->state != TARGET_DEBUG_RUNNING) {
884 if ((ts & TS_PM_BIT) && (ts & TS_PMCR_BIT)) {
886 LOG_DEBUG("redirect to PM, tapstatus=0x%08" PRIx32, get_tapstatus(t));
888 t->state = TARGET_DEBUG_RUNNING;
889 if (save_context(t) != ERROR_OK)
890 return ERROR_FAIL;
891 if (halt_prep(t) != ERROR_OK)
892 return ERROR_FAIL;
893 t->state = TARGET_HALTED;
894 t->debug_reason = DBG_REASON_UNDEFINED;
896 struct x86_32_common *x86_32 = target_to_x86_32(t);
897 uint32_t eip = buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32);
898 uint32_t dr6 = buf_get_u32(x86_32->cache->reg_list[DR6].value, 0, 32);
899 uint32_t hwbreakpoint = (uint32_t)-1;
901 if (dr6 & DR6_BRKDETECT_0)
902 hwbreakpoint = 0;
903 if (dr6 & DR6_BRKDETECT_1)
904 hwbreakpoint = 1;
905 if (dr6 & DR6_BRKDETECT_2)
906 hwbreakpoint = 2;
907 if (dr6 & DR6_BRKDETECT_3)
908 hwbreakpoint = 3;
910 if (hwbreakpoint != (uint32_t)-1) {
911 uint32_t dr7 = buf_get_u32(x86_32->cache->reg_list[DR7].value, 0, 32);
912 uint32_t type = dr7 & (0x03 << (DR7_RW_SHIFT + hwbreakpoint*DR7_RW_LEN_SIZE));
913 if (type == DR7_BP_EXECUTE) {
914 LOG_USER("hit hardware breakpoint (hwreg=%" PRIu32 ") at 0x%08" PRIx32, hwbreakpoint, eip);
915 } else {
916 uint32_t address = 0;
917 switch (hwbreakpoint) {
918 default:
919 case 0:
920 address = buf_get_u32(x86_32->cache->reg_list[DR0].value, 0, 32);
921 break;
922 case 1:
923 address = buf_get_u32(x86_32->cache->reg_list[DR1].value, 0, 32);
924 break;
925 case 2:
926 address = buf_get_u32(x86_32->cache->reg_list[DR2].value, 0, 32);
927 break;
928 case 3:
929 address = buf_get_u32(x86_32->cache->reg_list[DR3].value, 0, 32);
930 break;
932 LOG_USER("hit '%s' watchpoint for 0x%08" PRIx32 " (hwreg=%" PRIu32 ") at 0x%08" PRIx32,
933 type == DR7_BP_WRITE ? "write" : "access", address,
934 hwbreakpoint, eip);
936 t->debug_reason = DBG_REASON_BREAKPOINT;
937 } else {
938 /* Check if the target hit a software breakpoint.
939 * ! Watch out: EIP is currently pointing after the breakpoint opcode
941 struct breakpoint *bp = NULL;
942 bp = breakpoint_find(t, eip-1);
943 if (bp != NULL) {
944 t->debug_reason = DBG_REASON_BREAKPOINT;
945 if (bp->type == BKPT_SOFT) {
946 /* The EIP is now pointing the next byte after the
947 * breakpoint instruction. This needs to be corrected.
949 buf_set_u32(x86_32->cache->reg_list[EIP].value, 0, 32, eip-1);
950 x86_32->cache->reg_list[EIP].dirty = true;
951 x86_32->cache->reg_list[EIP].valid = true;
952 LOG_USER("hit software breakpoint at 0x%08" PRIx32, eip-1);
953 } else {
954 /* it's not a hardware breakpoint (checked already in DR6 state)
955 * and it's also not a software breakpoint ...
957 LOG_USER("hit unknown breakpoint at 0x%08" PRIx32, eip);
959 } else {
961 /* There is also the case that we hit an breakpoint instruction,
962 * which was not set by us. This needs to be handled be the
963 * application that introduced the breakpoint.
966 LOG_USER("unknown break reason at 0x%08" PRIx32, eip);
970 return target_call_event_callbacks(t, TARGET_EVENT_HALTED);
974 return ERROR_OK;
977 int lakemont_arch_state(struct target *t)
979 struct x86_32_common *x86_32 = target_to_x86_32(t);
981 LOG_USER("target halted due to %s at 0x%08" PRIx32 " in %s mode",
982 debug_reason_name(t),
983 buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32),
984 (buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32) & CR0_PE) ? "protected" : "real");
986 return ERROR_OK;
989 int lakemont_halt(struct target *t)
991 if (t->state == TARGET_RUNNING) {
992 t->debug_reason = DBG_REASON_DBGRQ;
993 if (do_halt(t) != ERROR_OK)
994 return ERROR_FAIL;
995 return ERROR_OK;
996 } else {
997 LOG_ERROR("%s target not running", __func__);
998 return ERROR_FAIL;
1002 int lakemont_resume(struct target *t, int current, target_addr_t address,
1003 int handle_breakpoints, int debug_execution)
1005 struct breakpoint *bp = NULL;
1006 struct x86_32_common *x86_32 = target_to_x86_32(t);
1008 if (check_not_halted(t))
1009 return ERROR_TARGET_NOT_HALTED;
1010 /* TODO lakemont_enable_breakpoints(t); */
1011 if (t->state == TARGET_HALTED) {
1013 /* running away for a software breakpoint needs some special handling */
1014 uint32_t eip = buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32);
1015 bp = breakpoint_find(t, eip);
1016 if (bp != NULL /*&& bp->type == BKPT_SOFT*/) {
1017 /* the step will step over the breakpoint */
1018 if (lakemont_step(t, 0, 0, 1) != ERROR_OK) {
1019 LOG_ERROR("%s stepping over a software breakpoint at 0x%08" PRIx32 " "
1020 "failed to resume the target", __func__, eip);
1021 return ERROR_FAIL;
1025 /* if breakpoints are enabled, we need to redirect these into probe mode */
1026 struct breakpoint *activeswbp = t->breakpoints;
1027 while (activeswbp != NULL && activeswbp->set == 0)
1028 activeswbp = activeswbp->next;
1029 struct watchpoint *activehwbp = t->watchpoints;
1030 while (activehwbp != NULL && activehwbp->set == 0)
1031 activehwbp = activehwbp->next;
1032 if (activeswbp != NULL || activehwbp != NULL)
1033 buf_set_u32(x86_32->cache->reg_list[PMCR].value, 0, 32, 1);
1034 if (do_resume(t) != ERROR_OK)
1035 return ERROR_FAIL;
1036 } else {
1037 LOG_USER("target not halted");
1038 return ERROR_FAIL;
1040 return ERROR_OK;
1043 int lakemont_step(struct target *t, int current,
1044 target_addr_t address, int handle_breakpoints)
1046 struct x86_32_common *x86_32 = target_to_x86_32(t);
1047 uint32_t eflags = buf_get_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32);
1048 uint32_t eip = buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32);
1049 uint32_t pmcr = buf_get_u32(x86_32->cache->reg_list[PMCR].value, 0, 32);
1050 struct breakpoint *bp = NULL;
1051 int retval = ERROR_OK;
1052 uint32_t tapstatus = 0;
1054 if (check_not_halted(t))
1055 return ERROR_TARGET_NOT_HALTED;
1056 bp = breakpoint_find(t, eip);
1057 if (retval == ERROR_OK && bp != NULL/*&& bp->type == BKPT_SOFT*/) {
1058 /* TODO: This should only be done for software breakpoints.
1059 * Stepping from hardware breakpoints should be possible with the resume flag
1060 * Needs testing.
1062 retval = x86_32_common_remove_breakpoint(t, bp);
1065 /* Set EFLAGS[TF] and PMCR[IR], exit pm and wait for PRDY# */
1066 LOG_DEBUG("modifying PMCR = 0x%08" PRIx32 " and EFLAGS = 0x%08" PRIx32, pmcr, eflags);
1067 eflags = eflags | (EFLAGS_TF | EFLAGS_RF);
1068 buf_set_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32, eflags);
1069 buf_set_u32(x86_32->cache->reg_list[PMCR].value, 0, 32, 1);
1070 LOG_DEBUG("EFLAGS [TF] [RF] bits set=0x%08" PRIx32 ", PMCR=0x%08" PRIx32 ", EIP=0x%08" PRIx32,
1071 eflags, pmcr, eip);
1073 tapstatus = get_tapstatus(t);
1075 t->debug_reason = DBG_REASON_SINGLESTEP;
1076 t->state = TARGET_DEBUG_RUNNING;
1077 if (restore_context(t) != ERROR_OK)
1078 return ERROR_FAIL;
1079 if (exit_probemode(t) != ERROR_OK)
1080 return ERROR_FAIL;
1082 target_call_event_callbacks(t, TARGET_EVENT_RESUMED);
1084 tapstatus = get_tapstatus(t);
1085 if (tapstatus & (TS_PM_BIT | TS_EN_PM_BIT | TS_PRDY_BIT | TS_PMCR_BIT)) {
1086 /* target has stopped */
1087 if (save_context(t) != ERROR_OK)
1088 return ERROR_FAIL;
1089 if (halt_prep(t) != ERROR_OK)
1090 return ERROR_FAIL;
1091 t->state = TARGET_HALTED;
1093 LOG_USER("step done from EIP 0x%08" PRIx32 " to 0x%08" PRIx32, eip,
1094 buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32));
1095 target_call_event_callbacks(t, TARGET_EVENT_HALTED);
1096 } else {
1097 /* target didn't stop
1098 * I hope the poll() will catch it, but the deleted breakpoint is gone
1100 LOG_ERROR("%s target didn't stop after executing a single step", __func__);
1101 t->state = TARGET_RUNNING;
1102 return ERROR_FAIL;
1105 /* try to re-apply the breakpoint, even of step failed
1106 * TODO: When a bp was set, we should try to stop the target - fix the return above
1108 if (bp != NULL/*&& bp->type == BKPT_SOFT*/) {
1109 /* TODO: This should only be done for software breakpoints.
1110 * Stepping from hardware breakpoints should be possible with the resume flag
1111 * Needs testing.
1113 retval = x86_32_common_add_breakpoint(t, bp);
1116 return retval;
1119 static int lakemont_reset_break(struct target *t)
1121 struct x86_32_common *x86_32 = target_to_x86_32(t);
1122 struct jtag_tap *saved_tap = x86_32->curr_tap;
1123 struct scan_field *fields = &scan.field;
1125 int retval = ERROR_OK;
1127 LOG_DEBUG("issuing port 0xcf9 reset");
1129 /* prepare resetbreak setting the proper bits in CLTAPC_CPU_VPREQ */
1130 x86_32->curr_tap = jtag_tap_by_position(1);
1131 if (x86_32->curr_tap == NULL) {
1132 x86_32->curr_tap = saved_tap;
1133 LOG_ERROR("%s could not select quark_x10xx.cltap", __func__);
1134 return ERROR_FAIL;
1137 fields->in_value = NULL;
1138 fields->num_bits = 8;
1140 /* select CLTAPC_CPU_VPREQ instruction*/
1141 scan.out[0] = 0x51;
1142 fields->out_value = ((uint8_t *)scan.out);
1143 jtag_add_ir_scan(x86_32->curr_tap, fields, TAP_IDLE);
1144 retval = jtag_execute_queue();
1145 if (retval != ERROR_OK) {
1146 x86_32->curr_tap = saved_tap;
1147 LOG_ERROR("%s irscan failed to execute queue", __func__);
1148 return retval;
1151 /* set enable_preq_on_reset & enable_preq_on_reset2 bits*/
1152 scan.out[0] = 0x06;
1153 fields->out_value = ((uint8_t *)scan.out);
1154 jtag_add_dr_scan(x86_32->curr_tap, 1, fields, TAP_IDLE);
1155 retval = jtag_execute_queue();
1156 if (retval != ERROR_OK) {
1157 LOG_ERROR("%s drscan failed to execute queue", __func__);
1158 x86_32->curr_tap = saved_tap;
1159 return retval;
1162 /* restore current tap */
1163 x86_32->curr_tap = saved_tap;
1165 return ERROR_OK;
1169 * If we ever get an adapter with support for PREQ# and PRDY#, we should
1170 * update this function to add support for using those two signals.
1172 * Meanwhile, we're assuming that we only support reset break.
1174 int lakemont_reset_assert(struct target *t)
1176 struct x86_32_common *x86_32 = target_to_x86_32(t);
1177 /* write 0x6 to I/O port 0xcf9 to cause the reset */
1178 uint8_t cf9_reset_val = 0x6;
1179 int retval;
1181 LOG_DEBUG(" ");
1183 if (t->state != TARGET_HALTED) {
1184 LOG_DEBUG("target must be halted first");
1185 retval = lakemont_halt(t);
1186 if (retval != ERROR_OK) {
1187 LOG_ERROR("could not halt target");
1188 return retval;
1190 x86_32->forced_halt_for_reset = true;
1193 if (t->reset_halt) {
1194 retval = lakemont_reset_break(t);
1195 if (retval != ERROR_OK)
1196 return retval;
1199 retval = x86_32_common_write_io(t, 0xcf9, BYTE, &cf9_reset_val);
1200 if (retval != ERROR_OK) {
1201 LOG_ERROR("could not write to port 0xcf9");
1202 return retval;
1205 if (!t->reset_halt && x86_32->forced_halt_for_reset) {
1206 x86_32->forced_halt_for_reset = false;
1207 retval = lakemont_resume(t, true, 0x00, false, true);
1208 if (retval != ERROR_OK)
1209 return retval;
1212 /* remove breakpoints and watchpoints */
1213 x86_32_common_reset_breakpoints_watchpoints(t);
1215 return ERROR_OK;
1218 int lakemont_reset_deassert(struct target *t)
1220 int retval;
1222 LOG_DEBUG(" ");
1224 if (target_was_examined(t)) {
1225 retval = lakemont_poll(t);
1226 if (retval != ERROR_OK)
1227 return retval;
1230 if (t->reset_halt) {
1231 /* entered PM after reset, update the state */
1232 retval = lakemont_update_after_probemode_entry(t);
1233 if (retval != ERROR_OK) {
1234 LOG_ERROR("could not update state after probemode entry");
1235 return retval;
1238 if (t->state != TARGET_HALTED) {
1239 LOG_WARNING("%s: ran after reset and before halt ...",
1240 target_name(t));
1241 if (target_was_examined(t)) {
1242 retval = target_halt(t);
1243 if (retval != ERROR_OK)
1244 return retval;
1245 } else {
1246 t->state = TARGET_UNKNOWN;
1251 return ERROR_OK;