i386: hvf: Drop regs in HVFX86EmulatorState
[qemu/ar7.git] / target / i386 / hvf / x86.h
blob6048b5cc7408486f5fc769ee4fbf23e014f583ad
1 /*
2 * Copyright (C) 2016 Veertu Inc,
3 * Copyright (C) 2017 Veertu Inc,
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #ifndef HVF_X86_H
20 #define HVF_X86_H
22 typedef struct x86_register {
23 union {
24 struct {
25 uint64_t rrx; /* full 64 bit */
27 struct {
28 uint32_t erx; /* low 32 bit part */
29 uint32_t hi32_unused1;
31 struct {
32 uint16_t rx; /* low 16 bit part */
33 uint16_t hi16_unused1;
34 uint32_t hi32_unused2;
36 struct {
37 uint8_t lx; /* low 8 bit part */
38 uint8_t hx; /* high 8 bit */
39 uint16_t hi16_unused2;
40 uint32_t hi32_unused3;
43 } __attribute__ ((__packed__)) x86_register;
45 typedef enum x86_reg_cr0 {
46 CR0_PE = (1L << 0),
47 CR0_MP = (1L << 1),
48 CR0_EM = (1L << 2),
49 CR0_TS = (1L << 3),
50 CR0_ET = (1L << 4),
51 CR0_NE = (1L << 5),
52 CR0_WP = (1L << 16),
53 CR0_AM = (1L << 18),
54 CR0_NW = (1L << 29),
55 CR0_CD = (1L << 30),
56 CR0_PG = (1L << 31),
57 } x86_reg_cr0;
59 typedef enum x86_reg_cr4 {
60 CR4_VME = (1L << 0),
61 CR4_PVI = (1L << 1),
62 CR4_TSD = (1L << 2),
63 CR4_DE = (1L << 3),
64 CR4_PSE = (1L << 4),
65 CR4_PAE = (1L << 5),
66 CR4_MSE = (1L << 6),
67 CR4_PGE = (1L << 7),
68 CR4_PCE = (1L << 8),
69 CR4_OSFXSR = (1L << 9),
70 CR4_OSXMMEXCPT = (1L << 10),
71 CR4_VMXE = (1L << 13),
72 CR4_SMXE = (1L << 14),
73 CR4_FSGSBASE = (1L << 16),
74 CR4_PCIDE = (1L << 17),
75 CR4_OSXSAVE = (1L << 18),
76 CR4_SMEP = (1L << 20),
77 } x86_reg_cr4;
79 /* 16 bit Task State Segment */
80 typedef struct x86_tss_segment16 {
81 uint16_t link;
82 uint16_t sp0;
83 uint16_t ss0;
84 uint32_t sp1;
85 uint16_t ss1;
86 uint32_t sp2;
87 uint16_t ss2;
88 uint16_t ip;
89 uint16_t flags;
90 uint16_t ax;
91 uint16_t cx;
92 uint16_t dx;
93 uint16_t bx;
94 uint16_t sp;
95 uint16_t bp;
96 uint16_t si;
97 uint16_t di;
98 uint16_t es;
99 uint16_t cs;
100 uint16_t ss;
101 uint16_t ds;
102 uint16_t ldtr;
103 } __attribute__((packed)) x86_tss_segment16;
105 /* 32 bit Task State Segment */
106 typedef struct x86_tss_segment32 {
107 uint32_t prev_tss;
108 uint32_t esp0;
109 uint32_t ss0;
110 uint32_t esp1;
111 uint32_t ss1;
112 uint32_t esp2;
113 uint32_t ss2;
114 uint32_t cr3;
115 uint32_t eip;
116 uint32_t eflags;
117 uint32_t eax;
118 uint32_t ecx;
119 uint32_t edx;
120 uint32_t ebx;
121 uint32_t esp;
122 uint32_t ebp;
123 uint32_t esi;
124 uint32_t edi;
125 uint32_t es;
126 uint32_t cs;
127 uint32_t ss;
128 uint32_t ds;
129 uint32_t fs;
130 uint32_t gs;
131 uint32_t ldt;
132 uint16_t trap;
133 uint16_t iomap_base;
134 } __attribute__ ((__packed__)) x86_tss_segment32;
136 /* 64 bit Task State Segment */
137 typedef struct x86_tss_segment64 {
138 uint32_t unused;
139 uint64_t rsp0;
140 uint64_t rsp1;
141 uint64_t rsp2;
142 uint64_t unused1;
143 uint64_t ist1;
144 uint64_t ist2;
145 uint64_t ist3;
146 uint64_t ist4;
147 uint64_t ist5;
148 uint64_t ist6;
149 uint64_t ist7;
150 uint64_t unused2;
151 uint16_t unused3;
152 uint16_t iomap_base;
153 } __attribute__ ((__packed__)) x86_tss_segment64;
155 /* segment descriptors */
156 typedef struct x86_segment_descriptor {
157 uint64_t limit0:16;
158 uint64_t base0:16;
159 uint64_t base1:8;
160 uint64_t type:4;
161 uint64_t s:1;
162 uint64_t dpl:2;
163 uint64_t p:1;
164 uint64_t limit1:4;
165 uint64_t avl:1;
166 uint64_t l:1;
167 uint64_t db:1;
168 uint64_t g:1;
169 uint64_t base2:8;
170 } __attribute__ ((__packed__)) x86_segment_descriptor;
172 static inline uint32_t x86_segment_base(x86_segment_descriptor *desc)
174 return (uint32_t)((desc->base2 << 24) | (desc->base1 << 16) | desc->base0);
177 static inline void x86_set_segment_base(x86_segment_descriptor *desc,
178 uint32_t base)
180 desc->base2 = base >> 24;
181 desc->base1 = (base >> 16) & 0xff;
182 desc->base0 = base & 0xffff;
185 static inline uint32_t x86_segment_limit(x86_segment_descriptor *desc)
187 uint32_t limit = (uint32_t)((desc->limit1 << 16) | desc->limit0);
188 if (desc->g) {
189 return (limit << 12) | 0xfff;
191 return limit;
194 static inline void x86_set_segment_limit(x86_segment_descriptor *desc,
195 uint32_t limit)
197 desc->limit0 = limit & 0xffff;
198 desc->limit1 = limit >> 16;
201 typedef struct x86_call_gate {
202 uint64_t offset0:16;
203 uint64_t selector:16;
204 uint64_t param_count:4;
205 uint64_t reserved:3;
206 uint64_t type:4;
207 uint64_t dpl:1;
208 uint64_t p:1;
209 uint64_t offset1:16;
210 } __attribute__ ((__packed__)) x86_call_gate;
212 static inline uint32_t x86_call_gate_offset(x86_call_gate *gate)
214 return (uint32_t)((gate->offset1 << 16) | gate->offset0);
217 #define LDT_SEL 0
218 #define GDT_SEL 1
220 typedef struct x68_segment_selector {
221 union {
222 uint16_t sel;
223 struct {
224 uint16_t rpl:3;
225 uint16_t ti:1;
226 uint16_t index:12;
229 } __attribute__ ((__packed__)) x68_segment_selector;
231 typedef struct lazy_flags {
232 target_ulong result;
233 target_ulong auxbits;
234 } lazy_flags;
236 /* Definition of hvf_x86_state is here */
237 struct HVFX86EmulatorState {
238 struct lazy_flags lflags;
239 uint8_t mmio_buf[4096];
242 /* useful register access macros */
243 #define x86_reg(cpu, reg) ((x86_register *) &cpu->regs[reg])
245 #define RRX(cpu, reg) (x86_reg(cpu, reg)->rrx)
246 #define RAX(cpu) RRX(cpu, R_EAX)
247 #define RCX(cpu) RRX(cpu, R_ECX)
248 #define RDX(cpu) RRX(cpu, R_EDX)
249 #define RBX(cpu) RRX(cpu, R_EBX)
250 #define RSP(cpu) RRX(cpu, R_ESP)
251 #define RBP(cpu) RRX(cpu, R_EBP)
252 #define RSI(cpu) RRX(cpu, R_ESI)
253 #define RDI(cpu) RRX(cpu, R_EDI)
254 #define R8(cpu) RRX(cpu, R_R8)
255 #define R9(cpu) RRX(cpu, R_R9)
256 #define R10(cpu) RRX(cpu, R_R10)
257 #define R11(cpu) RRX(cpu, R_R11)
258 #define R12(cpu) RRX(cpu, R_R12)
259 #define R13(cpu) RRX(cpu, R_R13)
260 #define R14(cpu) RRX(cpu, R_R14)
261 #define R15(cpu) RRX(cpu, R_R15)
263 #define ERX(cpu, reg) (x86_reg(cpu, reg)->erx)
264 #define EAX(cpu) ERX(cpu, R_EAX)
265 #define ECX(cpu) ERX(cpu, R_ECX)
266 #define EDX(cpu) ERX(cpu, R_EDX)
267 #define EBX(cpu) ERX(cpu, R_EBX)
268 #define ESP(cpu) ERX(cpu, R_ESP)
269 #define EBP(cpu) ERX(cpu, R_EBP)
270 #define ESI(cpu) ERX(cpu, R_ESI)
271 #define EDI(cpu) ERX(cpu, R_EDI)
273 #define RX(cpu, reg) (x86_reg(cpu, reg)->rx)
274 #define AX(cpu) RX(cpu, R_EAX)
275 #define CX(cpu) RX(cpu, R_ECX)
276 #define DX(cpu) RX(cpu, R_EDX)
277 #define BP(cpu) RX(cpu, R_EBP)
278 #define SP(cpu) RX(cpu, R_ESP)
279 #define BX(cpu) RX(cpu, R_EBX)
280 #define SI(cpu) RX(cpu, R_ESI)
281 #define DI(cpu) RX(cpu, R_EDI)
283 #define RL(cpu, reg) (x86_reg(cpu, reg)->lx)
284 #define AL(cpu) RL(cpu, R_EAX)
285 #define CL(cpu) RL(cpu, R_ECX)
286 #define DL(cpu) RL(cpu, R_EDX)
287 #define BL(cpu) RL(cpu, R_EBX)
289 #define RH(cpu, reg) (x86_reg(cpu, reg)->hx)
290 #define AH(cpu) RH(cpu, R_EAX)
291 #define CH(cpu) RH(cpu, R_ECX)
292 #define DH(cpu) RH(cpu, R_EDX)
293 #define BH(cpu) RH(cpu, R_EBX)
295 /* deal with GDT/LDT descriptors in memory */
296 bool x86_read_segment_descriptor(struct CPUState *cpu,
297 struct x86_segment_descriptor *desc,
298 x68_segment_selector sel);
299 bool x86_write_segment_descriptor(struct CPUState *cpu,
300 struct x86_segment_descriptor *desc,
301 x68_segment_selector sel);
303 bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
304 int gate);
306 /* helpers */
307 bool x86_is_protected(struct CPUState *cpu);
308 bool x86_is_real(struct CPUState *cpu);
309 bool x86_is_v8086(struct CPUState *cpu);
310 bool x86_is_long_mode(struct CPUState *cpu);
311 bool x86_is_long64_mode(struct CPUState *cpu);
312 bool x86_is_paging_mode(struct CPUState *cpu);
313 bool x86_is_pae_enabled(struct CPUState *cpu);
315 enum X86Seg;
316 target_ulong linear_addr(struct CPUState *cpu, target_ulong addr, enum X86Seg seg);
317 target_ulong linear_addr_size(struct CPUState *cpu, target_ulong addr, int size,
318 enum X86Seg seg);
319 target_ulong linear_rip(struct CPUState *cpu, target_ulong rip);
321 static inline uint64_t rdtscp(void)
323 uint64_t tsc;
324 __asm__ __volatile__("rdtscp; " /* serializing read of tsc */
325 "shl $32,%%rdx; " /* shift higher 32 bits stored in rdx up */
326 "or %%rdx,%%rax" /* and or onto rax */
327 : "=a"(tsc) /* output to tsc variable */
329 : "%rcx", "%rdx"); /* rcx and rdx are clobbered */
331 return tsc;
334 #endif