4 * Copyright (c) 2003-2005 Fabrice Bellard
5 * Copyright (c) 2013 SUSE LINUX Products GmbH
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu-common.h"
23 #include "exec/gdbstub.h"
26 static const int gpr_map
[16] = {
27 R_EAX
, R_EBX
, R_ECX
, R_EDX
, R_ESI
, R_EDI
, R_EBP
, R_ESP
,
28 8, 9, 10, 11, 12, 13, 14, 15
31 #define gpr_map gpr_map32
33 static const int gpr_map32
[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
36 * Keep these in sync with assignment to
37 * gdb_num_core_regs in target/i386/cpu.c
38 * and with the machine description
42 * SEG: 6 segments, plus fs_base, gs_base, kernel_gs_base
46 * general regs -----> 8 or 16
49 #define IDX_NB_FLAGS 1
50 #define IDX_NB_SEG (6 + 3)
54 * fpu regs ----------> 8 or 16
56 #define IDX_NB_MXCSR 1
58 * total ----> 8+1+1+9+6+16+8+1=50 or 16+1+1+9+6+16+16+1=66
61 #define IDX_IP_REG CPU_NB_REGS
62 #define IDX_FLAGS_REG (IDX_IP_REG + IDX_NB_IP)
63 #define IDX_SEG_REGS (IDX_FLAGS_REG + IDX_NB_FLAGS)
64 #define IDX_CTL_REGS (IDX_SEG_REGS + IDX_NB_SEG)
65 #define IDX_FP_REGS (IDX_CTL_REGS + IDX_NB_CTL)
66 #define IDX_XMM_REGS (IDX_FP_REGS + IDX_NB_FP)
67 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
69 #define IDX_CTL_CR0_REG (IDX_CTL_REGS + 0)
70 #define IDX_CTL_CR2_REG (IDX_CTL_REGS + 1)
71 #define IDX_CTL_CR3_REG (IDX_CTL_REGS + 2)
72 #define IDX_CTL_CR4_REG (IDX_CTL_REGS + 3)
73 #define IDX_CTL_CR8_REG (IDX_CTL_REGS + 4)
74 #define IDX_CTL_EFER_REG (IDX_CTL_REGS + 5)
77 #define GDB_FORCE_64 1
79 #define GDB_FORCE_64 0
83 int x86_cpu_gdb_read_register(CPUState
*cs
, uint8_t *mem_buf
, int n
)
85 X86CPU
*cpu
= X86_CPU(cs
);
86 CPUX86State
*env
= &cpu
->env
;
90 /* N.B. GDB can't deal with changes in registers or sizes in the middle
91 of a session. So if we're in 32-bit mode on a 64-bit cpu, still act
92 as if we're on a 64-bit cpu. */
94 if (n
< CPU_NB_REGS
) {
95 if (TARGET_LONG_BITS
== 64) {
96 if (env
->hflags
& HF_CS64_MASK
) {
97 return gdb_get_reg64(mem_buf
, env
->regs
[gpr_map
[n
]]);
98 } else if (n
< CPU_NB_REGS32
) {
99 return gdb_get_reg64(mem_buf
,
100 env
->regs
[gpr_map
[n
]] & 0xffffffffUL
);
102 memset(mem_buf
, 0, sizeof(target_ulong
));
103 return sizeof(target_ulong
);
106 return gdb_get_reg32(mem_buf
, env
->regs
[gpr_map32
[n
]]);
108 } else if (n
>= IDX_FP_REGS
&& n
< IDX_FP_REGS
+ 8) {
109 #ifdef USE_X86LDOUBLE
110 /* FIXME: byteswap float values - after fixing fpregs layout. */
111 memcpy(mem_buf
, &env
->fpregs
[n
- IDX_FP_REGS
], 10);
113 memset(mem_buf
, 0, 10);
116 } else if (n
>= IDX_XMM_REGS
&& n
< IDX_XMM_REGS
+ CPU_NB_REGS
) {
118 if (n
< CPU_NB_REGS32
|| TARGET_LONG_BITS
== 64) {
119 stq_p(mem_buf
, env
->xmm_regs
[n
].ZMM_Q(0));
120 stq_p(mem_buf
+ 8, env
->xmm_regs
[n
].ZMM_Q(1));
126 if (TARGET_LONG_BITS
== 64) {
127 if (env
->hflags
& HF_CS64_MASK
) {
128 return gdb_get_reg64(mem_buf
, env
->eip
);
130 return gdb_get_reg64(mem_buf
, env
->eip
& 0xffffffffUL
);
133 return gdb_get_reg32(mem_buf
, env
->eip
);
136 return gdb_get_reg32(mem_buf
, env
->eflags
);
139 return gdb_get_reg32(mem_buf
, env
->segs
[R_CS
].selector
);
140 case IDX_SEG_REGS
+ 1:
141 return gdb_get_reg32(mem_buf
, env
->segs
[R_SS
].selector
);
142 case IDX_SEG_REGS
+ 2:
143 return gdb_get_reg32(mem_buf
, env
->segs
[R_DS
].selector
);
144 case IDX_SEG_REGS
+ 3:
145 return gdb_get_reg32(mem_buf
, env
->segs
[R_ES
].selector
);
146 case IDX_SEG_REGS
+ 4:
147 return gdb_get_reg32(mem_buf
, env
->segs
[R_FS
].selector
);
148 case IDX_SEG_REGS
+ 5:
149 return gdb_get_reg32(mem_buf
, env
->segs
[R_GS
].selector
);
151 case IDX_SEG_REGS
+ 6:
152 if ((env
->hflags
& HF_CS64_MASK
) || GDB_FORCE_64
) {
153 return gdb_get_reg64(mem_buf
, env
->segs
[R_FS
].base
);
155 return gdb_get_reg32(mem_buf
, env
->segs
[R_FS
].base
);
157 case IDX_SEG_REGS
+ 7:
158 if ((env
->hflags
& HF_CS64_MASK
) || GDB_FORCE_64
) {
159 return gdb_get_reg64(mem_buf
, env
->segs
[R_GS
].base
);
161 return gdb_get_reg32(mem_buf
, env
->segs
[R_GS
].base
);
163 case IDX_SEG_REGS
+ 8:
165 if ((env
->hflags
& HF_CS64_MASK
) || GDB_FORCE_64
) {
166 return gdb_get_reg64(mem_buf
, env
->kernelgsbase
);
168 return gdb_get_reg32(mem_buf
, env
->kernelgsbase
);
170 return gdb_get_reg32(mem_buf
, 0);
173 case IDX_FP_REGS
+ 8:
174 return gdb_get_reg32(mem_buf
, env
->fpuc
);
175 case IDX_FP_REGS
+ 9:
176 return gdb_get_reg32(mem_buf
, (env
->fpus
& ~0x3800) |
177 (env
->fpstt
& 0x7) << 11);
178 case IDX_FP_REGS
+ 10:
179 return gdb_get_reg32(mem_buf
, 0); /* ftag */
180 case IDX_FP_REGS
+ 11:
181 return gdb_get_reg32(mem_buf
, 0); /* fiseg */
182 case IDX_FP_REGS
+ 12:
183 return gdb_get_reg32(mem_buf
, 0); /* fioff */
184 case IDX_FP_REGS
+ 13:
185 return gdb_get_reg32(mem_buf
, 0); /* foseg */
186 case IDX_FP_REGS
+ 14:
187 return gdb_get_reg32(mem_buf
, 0); /* fooff */
188 case IDX_FP_REGS
+ 15:
189 return gdb_get_reg32(mem_buf
, 0); /* fop */
192 return gdb_get_reg32(mem_buf
, env
->mxcsr
);
194 case IDX_CTL_CR0_REG
:
195 if ((env
->hflags
& HF_CS64_MASK
) || GDB_FORCE_64
) {
196 return gdb_get_reg64(mem_buf
, env
->cr
[0]);
198 return gdb_get_reg32(mem_buf
, env
->cr
[0]);
200 case IDX_CTL_CR2_REG
:
201 if ((env
->hflags
& HF_CS64_MASK
) || GDB_FORCE_64
) {
202 return gdb_get_reg64(mem_buf
, env
->cr
[2]);
204 return gdb_get_reg32(mem_buf
, env
->cr
[2]);
206 case IDX_CTL_CR3_REG
:
207 if ((env
->hflags
& HF_CS64_MASK
) || GDB_FORCE_64
) {
208 return gdb_get_reg64(mem_buf
, env
->cr
[3]);
210 return gdb_get_reg32(mem_buf
, env
->cr
[3]);
212 case IDX_CTL_CR4_REG
:
213 if ((env
->hflags
& HF_CS64_MASK
) || GDB_FORCE_64
) {
214 return gdb_get_reg64(mem_buf
, env
->cr
[4]);
216 return gdb_get_reg32(mem_buf
, env
->cr
[4]);
218 case IDX_CTL_CR8_REG
:
219 #ifdef CONFIG_SOFTMMU
220 tpr
= cpu_get_apic_tpr(cpu
->apic_state
);
224 if ((env
->hflags
& HF_CS64_MASK
) || GDB_FORCE_64
) {
225 return gdb_get_reg64(mem_buf
, tpr
);
227 return gdb_get_reg32(mem_buf
, tpr
);
229 case IDX_CTL_EFER_REG
:
230 if ((env
->hflags
& HF_CS64_MASK
) || GDB_FORCE_64
) {
231 return gdb_get_reg64(mem_buf
, env
->efer
);
233 return gdb_get_reg32(mem_buf
, env
->efer
);
239 static int x86_cpu_gdb_load_seg(X86CPU
*cpu
, int sreg
, uint8_t *mem_buf
)
241 CPUX86State
*env
= &cpu
->env
;
242 uint16_t selector
= ldl_p(mem_buf
);
244 if (selector
!= env
->segs
[sreg
].selector
) {
245 #if defined(CONFIG_USER_ONLY)
246 cpu_x86_load_seg(env
, sreg
, selector
);
248 unsigned int limit
, flags
;
251 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
252 int dpl
= (env
->eflags
& VM_MASK
) ? 3 : 0;
253 base
= selector
<< 4;
255 flags
= DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
256 DESC_A_MASK
| (dpl
<< DESC_DPL_SHIFT
);
258 if (!cpu_x86_get_descr_debug(env
, selector
, &base
, &limit
,
263 cpu_x86_load_seg_cache(env
, sreg
, selector
, base
, limit
, flags
);
269 int x86_cpu_gdb_write_register(CPUState
*cs
, uint8_t *mem_buf
, int n
)
271 X86CPU
*cpu
= X86_CPU(cs
);
272 CPUX86State
*env
= &cpu
->env
;
275 /* N.B. GDB can't deal with changes in registers or sizes in the middle
276 of a session. So if we're in 32-bit mode on a 64-bit cpu, still act
277 as if we're on a 64-bit cpu. */
279 if (n
< CPU_NB_REGS
) {
280 if (TARGET_LONG_BITS
== 64) {
281 if (env
->hflags
& HF_CS64_MASK
) {
282 env
->regs
[gpr_map
[n
]] = ldtul_p(mem_buf
);
283 } else if (n
< CPU_NB_REGS32
) {
284 env
->regs
[gpr_map
[n
]] = ldtul_p(mem_buf
) & 0xffffffffUL
;
286 return sizeof(target_ulong
);
287 } else if (n
< CPU_NB_REGS32
) {
289 env
->regs
[n
] &= ~0xffffffffUL
;
290 env
->regs
[n
] |= (uint32_t)ldl_p(mem_buf
);
293 } else if (n
>= IDX_FP_REGS
&& n
< IDX_FP_REGS
+ 8) {
294 #ifdef USE_X86LDOUBLE
295 /* FIXME: byteswap float values - after fixing fpregs layout. */
296 memcpy(&env
->fpregs
[n
- IDX_FP_REGS
], mem_buf
, 10);
299 } else if (n
>= IDX_XMM_REGS
&& n
< IDX_XMM_REGS
+ CPU_NB_REGS
) {
301 if (n
< CPU_NB_REGS32
|| TARGET_LONG_BITS
== 64) {
302 env
->xmm_regs
[n
].ZMM_Q(0) = ldq_p(mem_buf
);
303 env
->xmm_regs
[n
].ZMM_Q(1) = ldq_p(mem_buf
+ 8);
309 if (TARGET_LONG_BITS
== 64) {
310 if (env
->hflags
& HF_CS64_MASK
) {
311 env
->eip
= ldq_p(mem_buf
);
313 env
->eip
= ldq_p(mem_buf
) & 0xffffffffUL
;
317 env
->eip
&= ~0xffffffffUL
;
318 env
->eip
|= (uint32_t)ldl_p(mem_buf
);
322 env
->eflags
= ldl_p(mem_buf
);
326 return x86_cpu_gdb_load_seg(cpu
, R_CS
, mem_buf
);
327 case IDX_SEG_REGS
+ 1:
328 return x86_cpu_gdb_load_seg(cpu
, R_SS
, mem_buf
);
329 case IDX_SEG_REGS
+ 2:
330 return x86_cpu_gdb_load_seg(cpu
, R_DS
, mem_buf
);
331 case IDX_SEG_REGS
+ 3:
332 return x86_cpu_gdb_load_seg(cpu
, R_ES
, mem_buf
);
333 case IDX_SEG_REGS
+ 4:
334 return x86_cpu_gdb_load_seg(cpu
, R_FS
, mem_buf
);
335 case IDX_SEG_REGS
+ 5:
336 return x86_cpu_gdb_load_seg(cpu
, R_GS
, mem_buf
);
338 case IDX_SEG_REGS
+ 6:
339 if (env
->hflags
& HF_CS64_MASK
) {
340 env
->segs
[R_FS
].base
= ldq_p(mem_buf
);
343 env
->segs
[R_FS
].base
= ldl_p(mem_buf
);
346 case IDX_SEG_REGS
+ 7:
347 if (env
->hflags
& HF_CS64_MASK
) {
348 env
->segs
[R_GS
].base
= ldq_p(mem_buf
);
351 env
->segs
[R_GS
].base
= ldl_p(mem_buf
);
355 case IDX_SEG_REGS
+ 8:
356 if (env
->hflags
& HF_CS64_MASK
) {
357 env
->kernelgsbase
= ldq_p(mem_buf
);
360 env
->kernelgsbase
= ldl_p(mem_buf
);
364 case IDX_FP_REGS
+ 8:
365 cpu_set_fpuc(env
, ldl_p(mem_buf
));
367 case IDX_FP_REGS
+ 9:
368 tmp
= ldl_p(mem_buf
);
369 env
->fpstt
= (tmp
>> 11) & 7;
370 env
->fpus
= tmp
& ~0x3800;
372 case IDX_FP_REGS
+ 10: /* ftag */
374 case IDX_FP_REGS
+ 11: /* fiseg */
376 case IDX_FP_REGS
+ 12: /* fioff */
378 case IDX_FP_REGS
+ 13: /* foseg */
380 case IDX_FP_REGS
+ 14: /* fooff */
382 case IDX_FP_REGS
+ 15: /* fop */
386 cpu_set_mxcsr(env
, ldl_p(mem_buf
));
389 case IDX_CTL_CR0_REG
:
390 if (env
->hflags
& HF_CS64_MASK
) {
391 cpu_x86_update_cr0(env
, ldq_p(mem_buf
));
394 cpu_x86_update_cr0(env
, ldl_p(mem_buf
));
397 case IDX_CTL_CR2_REG
:
398 if (env
->hflags
& HF_CS64_MASK
) {
399 env
->cr
[2] = ldq_p(mem_buf
);
402 env
->cr
[2] = ldl_p(mem_buf
);
405 case IDX_CTL_CR3_REG
:
406 if (env
->hflags
& HF_CS64_MASK
) {
407 cpu_x86_update_cr3(env
, ldq_p(mem_buf
));
410 cpu_x86_update_cr3(env
, ldl_p(mem_buf
));
413 case IDX_CTL_CR4_REG
:
414 if (env
->hflags
& HF_CS64_MASK
) {
415 cpu_x86_update_cr4(env
, ldq_p(mem_buf
));
418 cpu_x86_update_cr4(env
, ldl_p(mem_buf
));
421 case IDX_CTL_CR8_REG
:
422 if (env
->hflags
& HF_CS64_MASK
) {
423 #ifdef CONFIG_SOFTMMU
424 cpu_set_apic_tpr(cpu
->apic_state
, ldq_p(mem_buf
));
428 #ifdef CONFIG_SOFTMMU
429 cpu_set_apic_tpr(cpu
->apic_state
, ldl_p(mem_buf
));
433 case IDX_CTL_EFER_REG
:
434 if (env
->hflags
& HF_CS64_MASK
) {
435 cpu_load_efer(env
, ldq_p(mem_buf
));
438 cpu_load_efer(env
, ldl_p(mem_buf
));
443 /* Unrecognised register. */