2 * Copyright (C) 2016 Veertu Inc,
3 * Copyright (C) 2017 Veertu Inc,
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
22 typedef struct x86_register
{
25 uint64_t rrx
; /* full 64 bit */
28 uint32_t erx
; /* low 32 bit part */
29 uint32_t hi32_unused1
;
32 uint16_t rx
; /* low 16 bit part */
33 uint16_t hi16_unused1
;
34 uint32_t hi32_unused2
;
37 uint8_t lx
; /* low 8 bit part */
38 uint8_t hx
; /* high 8 bit */
39 uint16_t hi16_unused2
;
40 uint32_t hi32_unused3
;
43 } __attribute__ ((__packed__
)) x86_register
;
45 typedef enum x86_rflags
{
46 RFLAGS_CF
= (1L << 0),
47 RFLAGS_PF
= (1L << 2),
48 RFLAGS_AF
= (1L << 4),
49 RFLAGS_ZF
= (1L << 6),
50 RFLAGS_SF
= (1L << 7),
51 RFLAGS_TF
= (1L << 8),
52 RFLAGS_IF
= (1L << 9),
53 RFLAGS_DF
= (1L << 10),
54 RFLAGS_OF
= (1L << 11),
55 RFLAGS_IOPL
= (3L << 12),
56 RFLAGS_NT
= (1L << 14),
57 RFLAGS_RF
= (1L << 16),
58 RFLAGS_VM
= (1L << 17),
59 RFLAGS_AC
= (1L << 18),
60 RFLAGS_VIF
= (1L << 19),
61 RFLAGS_VIP
= (1L << 20),
62 RFLAGS_ID
= (1L << 21),
66 typedef struct x86_reg_flags
{
73 uint32_t hi32_unused1
;
98 uint32_t hi32_unused2
;
101 } __attribute__ ((__packed__
)) x86_reg_flags
;
103 typedef enum x86_reg_cr0
{
117 typedef enum x86_reg_cr4
{
127 CR4_OSFXSR
= (1L << 9),
128 CR4_OSXMMEXCPT
= (1L << 10),
129 CR4_VMXE
= (1L << 13),
130 CR4_SMXE
= (1L << 14),
131 CR4_FSGSBASE
= (1L << 16),
132 CR4_PCIDE
= (1L << 17),
133 CR4_OSXSAVE
= (1L << 18),
134 CR4_SMEP
= (1L << 20),
137 /* 16 bit Task State Segment */
138 typedef struct x86_tss_segment16
{
161 } __attribute__((packed
)) x86_tss_segment16
;
163 /* 32 bit Task State Segment */
164 typedef struct x86_tss_segment32
{
192 } __attribute__ ((__packed__
)) x86_tss_segment32
;
194 /* 64 bit Task State Segment */
195 typedef struct x86_tss_segment64
{
211 } __attribute__ ((__packed__
)) x86_tss_segment64
;
213 /* segment descriptors */
214 typedef struct x86_segment_descriptor
{
228 } __attribute__ ((__packed__
)) x86_segment_descriptor
;
230 static inline uint32_t x86_segment_base(x86_segment_descriptor
*desc
)
232 return (uint32_t)((desc
->base2
<< 24) | (desc
->base1
<< 16) | desc
->base0
);
235 static inline void x86_set_segment_base(x86_segment_descriptor
*desc
,
238 desc
->base2
= base
>> 24;
239 desc
->base1
= (base
>> 16) & 0xff;
240 desc
->base0
= base
& 0xffff;
243 static inline uint32_t x86_segment_limit(x86_segment_descriptor
*desc
)
245 uint32_t limit
= (uint32_t)((desc
->limit1
<< 16) | desc
->limit0
);
247 return (limit
<< 12) | 0xfff;
252 static inline void x86_set_segment_limit(x86_segment_descriptor
*desc
,
255 desc
->limit0
= limit
& 0xffff;
256 desc
->limit1
= limit
>> 16;
259 typedef struct x86_call_gate
{
261 uint64_t selector
:16;
262 uint64_t param_count
:4;
268 } __attribute__ ((__packed__
)) x86_call_gate
;
270 static inline uint32_t x86_call_gate_offset(x86_call_gate
*gate
)
272 return (uint32_t)((gate
->offset1
<< 16) | gate
->offset0
);
278 typedef struct x68_segment_selector
{
287 } __attribute__ ((__packed__
)) x68_segment_selector
;
289 typedef struct lazy_flags
{
291 target_ulong auxbits
;
294 /* Definition of hvf_x86_state is here */
295 struct HVFX86EmulatorState
{
299 struct x86_register regs
[16];
300 struct x86_reg_flags rflags
;
301 struct lazy_flags lflags
;
302 uint8_t mmio_buf
[4096];
305 /* useful register access macros */
306 #define RIP(cpu) (cpu->hvf_emul->rip)
307 #define EIP(cpu) ((uint32_t)cpu->hvf_emul->rip)
308 #define RFLAGS(cpu) (cpu->hvf_emul->rflags.rflags)
309 #define EFLAGS(cpu) (cpu->hvf_emul->rflags.eflags)
311 #define RRX(cpu, reg) (cpu->hvf_emul->regs[reg].rrx)
312 #define RAX(cpu) RRX(cpu, R_EAX)
313 #define RCX(cpu) RRX(cpu, R_ECX)
314 #define RDX(cpu) RRX(cpu, R_EDX)
315 #define RBX(cpu) RRX(cpu, R_EBX)
316 #define RSP(cpu) RRX(cpu, R_ESP)
317 #define RBP(cpu) RRX(cpu, R_EBP)
318 #define RSI(cpu) RRX(cpu, R_ESI)
319 #define RDI(cpu) RRX(cpu, R_EDI)
320 #define R8(cpu) RRX(cpu, R_R8)
321 #define R9(cpu) RRX(cpu, R_R9)
322 #define R10(cpu) RRX(cpu, R_R10)
323 #define R11(cpu) RRX(cpu, R_R11)
324 #define R12(cpu) RRX(cpu, R_R12)
325 #define R13(cpu) RRX(cpu, R_R13)
326 #define R14(cpu) RRX(cpu, R_R14)
327 #define R15(cpu) RRX(cpu, R_R15)
329 #define ERX(cpu, reg) (cpu->hvf_emul->regs[reg].erx)
330 #define EAX(cpu) ERX(cpu, R_EAX)
331 #define ECX(cpu) ERX(cpu, R_ECX)
332 #define EDX(cpu) ERX(cpu, R_EDX)
333 #define EBX(cpu) ERX(cpu, R_EBX)
334 #define ESP(cpu) ERX(cpu, R_ESP)
335 #define EBP(cpu) ERX(cpu, R_EBP)
336 #define ESI(cpu) ERX(cpu, R_ESI)
337 #define EDI(cpu) ERX(cpu, R_EDI)
339 #define RX(cpu, reg) (cpu->hvf_emul->regs[reg].rx)
340 #define AX(cpu) RX(cpu, R_EAX)
341 #define CX(cpu) RX(cpu, R_ECX)
342 #define DX(cpu) RX(cpu, R_EDX)
343 #define BP(cpu) RX(cpu, R_EBP)
344 #define SP(cpu) RX(cpu, R_ESP)
345 #define BX(cpu) RX(cpu, R_EBX)
346 #define SI(cpu) RX(cpu, R_ESI)
347 #define DI(cpu) RX(cpu, R_EDI)
349 #define RL(cpu, reg) (cpu->hvf_emul->regs[reg].lx)
350 #define AL(cpu) RL(cpu, R_EAX)
351 #define CL(cpu) RL(cpu, R_ECX)
352 #define DL(cpu) RL(cpu, R_EDX)
353 #define BL(cpu) RL(cpu, R_EBX)
355 #define RH(cpu, reg) (cpu->hvf_emul->regs[reg].hx)
356 #define AH(cpu) RH(cpu, R_EAX)
357 #define CH(cpu) RH(cpu, R_ECX)
358 #define DH(cpu) RH(cpu, R_EDX)
359 #define BH(cpu) RH(cpu, R_EBX)
361 /* deal with GDT/LDT descriptors in memory */
362 bool x86_read_segment_descriptor(struct CPUState
*cpu
,
363 struct x86_segment_descriptor
*desc
,
364 x68_segment_selector sel
);
365 bool x86_write_segment_descriptor(struct CPUState
*cpu
,
366 struct x86_segment_descriptor
*desc
,
367 x68_segment_selector sel
);
369 bool x86_read_call_gate(struct CPUState
*cpu
, struct x86_call_gate
*idt_desc
,
373 bool x86_is_protected(struct CPUState
*cpu
);
374 bool x86_is_real(struct CPUState
*cpu
);
375 bool x86_is_v8086(struct CPUState
*cpu
);
376 bool x86_is_long_mode(struct CPUState
*cpu
);
377 bool x86_is_long64_mode(struct CPUState
*cpu
);
378 bool x86_is_paging_mode(struct CPUState
*cpu
);
379 bool x86_is_pae_enabled(struct CPUState
*cpu
);
382 target_ulong
linear_addr(struct CPUState
*cpu
, target_ulong addr
, enum X86Seg seg
);
383 target_ulong
linear_addr_size(struct CPUState
*cpu
, target_ulong addr
, int size
,
385 target_ulong
linear_rip(struct CPUState
*cpu
, target_ulong rip
);
387 static inline uint64_t rdtscp(void)
390 __asm__
__volatile__("rdtscp; " /* serializing read of tsc */
391 "shl $32,%%rdx; " /* shift higher 32 bits stored in rdx up */
392 "or %%rdx,%%rax" /* and or onto rax */
393 : "=a"(tsc
) /* output to tsc variable */
395 : "%rcx", "%rdx"); /* rcx and rdx are clobbered */