i386: hvf: inject General Protection Fault when vmexit through vmcall
[qemu/ar7.git] / target / i386 / hvf-utils / x86.h
blob250364b44816a48bc3774d532cd466acced249ee
1 /*
2 * Copyright (C) 2016 Veertu Inc,
3 * Copyright (C) 2017 Veertu Inc,
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #pragma once
21 #include <sys/types.h>
22 #include <sys/ioctl.h>
23 #include <sys/mman.h>
24 #include <stdarg.h>
25 #include "qemu-common.h"
26 #include "x86_gen.h"
28 /* exceptions */
29 typedef enum x86_exception {
30 EXCEPTION_DE, /* divide error */
31 EXCEPTION_DB, /* debug fault */
32 EXCEPTION_NMI, /* non-maskable interrupt */
33 EXCEPTION_BP, /* breakpoint trap */
34 EXCEPTION_OF, /* overflow trap */
35 EXCEPTION_BR, /* boundary range exceeded fault */
36 EXCEPTION_UD, /* undefined opcode */
37 EXCEPTION_NM, /* device not available */
38 EXCEPTION_DF, /* double fault */
39 EXCEPTION_RSVD, /* not defined */
40 EXCEPTION_TS, /* invalid TSS fault */
41 EXCEPTION_NP, /* not present fault */
42 EXCEPTION_GP, /* general protection fault */
43 EXCEPTION_PF, /* page fault */
44 EXCEPTION_RSVD2, /* not defined */
45 } x86_exception;
47 /* general purpose regs */
48 typedef enum x86_reg_name {
49 REG_RAX = 0,
50 REG_RCX = 1,
51 REG_RDX = 2,
52 REG_RBX = 3,
53 REG_RSP = 4,
54 REG_RBP = 5,
55 REG_RSI = 6,
56 REG_RDI = 7,
57 REG_R8 = 8,
58 REG_R9 = 9,
59 REG_R10 = 10,
60 REG_R11 = 11,
61 REG_R12 = 12,
62 REG_R13 = 13,
63 REG_R14 = 14,
64 REG_R15 = 15,
65 } x86_reg_name;
67 /* segment regs */
68 typedef enum x86_reg_segment {
69 REG_SEG_ES = 0,
70 REG_SEG_CS = 1,
71 REG_SEG_SS = 2,
72 REG_SEG_DS = 3,
73 REG_SEG_FS = 4,
74 REG_SEG_GS = 5,
75 REG_SEG_LDTR = 6,
76 REG_SEG_TR = 7,
77 } x86_reg_segment;
79 typedef struct x86_register {
80 union {
81 struct {
82 uint64_t rrx; /* full 64 bit */
84 struct {
85 uint32_t erx; /* low 32 bit part */
86 uint32_t hi32_unused1;
88 struct {
89 uint16_t rx; /* low 16 bit part */
90 uint16_t hi16_unused1;
91 uint32_t hi32_unused2;
93 struct {
94 uint8_t lx; /* low 8 bit part */
95 uint8_t hx; /* high 8 bit */
96 uint16_t hi16_unused2;
97 uint32_t hi32_unused3;
100 } __attribute__ ((__packed__)) x86_register;
102 typedef enum x86_rflags {
103 RFLAGS_CF = (1L << 0),
104 RFLAGS_PF = (1L << 2),
105 RFLAGS_AF = (1L << 4),
106 RFLAGS_ZF = (1L << 6),
107 RFLAGS_SF = (1L << 7),
108 RFLAGS_TF = (1L << 8),
109 RFLAGS_IF = (1L << 9),
110 RFLAGS_DF = (1L << 10),
111 RFLAGS_OF = (1L << 11),
112 RFLAGS_IOPL = (3L << 12),
113 RFLAGS_NT = (1L << 14),
114 RFLAGS_RF = (1L << 16),
115 RFLAGS_VM = (1L << 17),
116 RFLAGS_AC = (1L << 18),
117 RFLAGS_VIF = (1L << 19),
118 RFLAGS_VIP = (1L << 20),
119 RFLAGS_ID = (1L << 21),
120 } x86_rflags;
122 /* rflags register */
123 typedef struct x86_reg_flags {
124 union {
125 struct {
126 uint64_t rflags;
128 struct {
129 uint32_t eflags;
130 uint32_t hi32_unused1;
132 struct {
133 uint32_t cf:1;
134 uint32_t unused1:1;
135 uint32_t pf:1;
136 uint32_t unused2:1;
137 uint32_t af:1;
138 uint32_t unused3:1;
139 uint32_t zf:1;
140 uint32_t sf:1;
141 uint32_t tf:1;
142 uint32_t ief:1;
143 uint32_t df:1;
144 uint32_t of:1;
145 uint32_t iopl:2;
146 uint32_t nt:1;
147 uint32_t unused4:1;
148 uint32_t rf:1;
149 uint32_t vm:1;
150 uint32_t ac:1;
151 uint32_t vif:1;
152 uint32_t vip:1;
153 uint32_t id:1;
154 uint32_t unused5:10;
155 uint32_t hi32_unused2;
158 } __attribute__ ((__packed__)) x86_reg_flags;
160 typedef enum x86_reg_efer {
161 EFER_SCE = (1L << 0),
162 EFER_LME = (1L << 8),
163 EFER_LMA = (1L << 10),
164 EFER_NXE = (1L << 11),
165 EFER_SVME = (1L << 12),
166 EFER_FXSR = (1L << 14),
167 } x86_reg_efer;
169 typedef struct x86_efer {
170 uint64_t efer;
171 } __attribute__ ((__packed__)) x86_efer;
173 typedef enum x86_reg_cr0 {
174 CR0_PE = (1L << 0),
175 CR0_MP = (1L << 1),
176 CR0_EM = (1L << 2),
177 CR0_TS = (1L << 3),
178 CR0_ET = (1L << 4),
179 CR0_NE = (1L << 5),
180 CR0_WP = (1L << 16),
181 CR0_AM = (1L << 18),
182 CR0_NW = (1L << 29),
183 CR0_CD = (1L << 30),
184 CR0_PG = (1L << 31),
185 } x86_reg_cr0;
187 typedef enum x86_reg_cr4 {
188 CR4_VME = (1L << 0),
189 CR4_PVI = (1L << 1),
190 CR4_TSD = (1L << 2),
191 CR4_DE = (1L << 3),
192 CR4_PSE = (1L << 4),
193 CR4_PAE = (1L << 5),
194 CR4_MSE = (1L << 6),
195 CR4_PGE = (1L << 7),
196 CR4_PCE = (1L << 8),
197 CR4_OSFXSR = (1L << 9),
198 CR4_OSXMMEXCPT = (1L << 10),
199 CR4_VMXE = (1L << 13),
200 CR4_SMXE = (1L << 14),
201 CR4_FSGSBASE = (1L << 16),
202 CR4_PCIDE = (1L << 17),
203 CR4_OSXSAVE = (1L << 18),
204 CR4_SMEP = (1L << 20),
205 } x86_reg_cr4;
207 /* 16 bit Task State Segment */
208 typedef struct x86_tss_segment16 {
209 uint16_t link;
210 uint16_t sp0;
211 uint16_t ss0;
212 uint32_t sp1;
213 uint16_t ss1;
214 uint32_t sp2;
215 uint16_t ss2;
216 uint16_t ip;
217 uint16_t flags;
218 uint16_t ax;
219 uint16_t cx;
220 uint16_t dx;
221 uint16_t bx;
222 uint16_t sp;
223 uint16_t bp;
224 uint16_t si;
225 uint16_t di;
226 uint16_t es;
227 uint16_t cs;
228 uint16_t ss;
229 uint16_t ds;
230 uint16_t ldtr;
231 } __attribute__((packed)) x86_tss_segment16;
233 /* 32 bit Task State Segment */
234 typedef struct x86_tss_segment32 {
235 uint32_t prev_tss;
236 uint32_t esp0;
237 uint32_t ss0;
238 uint32_t esp1;
239 uint32_t ss1;
240 uint32_t esp2;
241 uint32_t ss2;
242 uint32_t cr3;
243 uint32_t eip;
244 uint32_t eflags;
245 uint32_t eax;
246 uint32_t ecx;
247 uint32_t edx;
248 uint32_t ebx;
249 uint32_t esp;
250 uint32_t ebp;
251 uint32_t esi;
252 uint32_t edi;
253 uint32_t es;
254 uint32_t cs;
255 uint32_t ss;
256 uint32_t ds;
257 uint32_t fs;
258 uint32_t gs;
259 uint32_t ldt;
260 uint16_t trap;
261 uint16_t iomap_base;
262 } __attribute__ ((__packed__)) x86_tss_segment32;
264 /* 64 bit Task State Segment */
265 typedef struct x86_tss_segment64 {
266 uint32_t unused;
267 uint64_t rsp0;
268 uint64_t rsp1;
269 uint64_t rsp2;
270 uint64_t unused1;
271 uint64_t ist1;
272 uint64_t ist2;
273 uint64_t ist3;
274 uint64_t ist4;
275 uint64_t ist5;
276 uint64_t ist6;
277 uint64_t ist7;
278 uint64_t unused2;
279 uint16_t unused3;
280 uint16_t iomap_base;
281 } __attribute__ ((__packed__)) x86_tss_segment64;
283 /* segment descriptors */
284 typedef struct x86_segment_descriptor {
285 uint64_t limit0:16;
286 uint64_t base0:16;
287 uint64_t base1:8;
288 uint64_t type:4;
289 uint64_t s:1;
290 uint64_t dpl:2;
291 uint64_t p:1;
292 uint64_t limit1:4;
293 uint64_t avl:1;
294 uint64_t l:1;
295 uint64_t db:1;
296 uint64_t g:1;
297 uint64_t base2:8;
298 } __attribute__ ((__packed__)) x86_segment_descriptor;
300 static inline uint32_t x86_segment_base(x86_segment_descriptor *desc)
302 return (uint32_t)((desc->base2 << 24) | (desc->base1 << 16) | desc->base0);
305 static inline void x86_set_segment_base(x86_segment_descriptor *desc,
306 uint32_t base)
308 desc->base2 = base >> 24;
309 desc->base1 = (base >> 16) & 0xff;
310 desc->base0 = base & 0xffff;
313 static inline uint32_t x86_segment_limit(x86_segment_descriptor *desc)
315 uint32_t limit = (uint32_t)((desc->limit1 << 16) | desc->limit0);
316 if (desc->g) {
317 return (limit << 12) | 0xfff;
319 return limit;
322 static inline void x86_set_segment_limit(x86_segment_descriptor *desc,
323 uint32_t limit)
325 desc->limit0 = limit & 0xffff;
326 desc->limit1 = limit >> 16;
329 typedef struct x86_call_gate {
330 uint64_t offset0:16;
331 uint64_t selector:16;
332 uint64_t param_count:4;
333 uint64_t reserved:3;
334 uint64_t type:4;
335 uint64_t dpl:1;
336 uint64_t p:1;
337 uint64_t offset1:16;
338 } __attribute__ ((__packed__)) x86_call_gate;
340 static inline uint32_t x86_call_gate_offset(x86_call_gate *gate)
342 return (uint32_t)((gate->offset1 << 16) | gate->offset0);
345 #define LDT_SEL 0
346 #define GDT_SEL 1
348 typedef struct x68_segment_selector {
349 union {
350 uint16_t sel;
351 struct {
352 uint16_t rpl:3;
353 uint16_t ti:1;
354 uint16_t index:12;
357 } __attribute__ ((__packed__)) x68_segment_selector;
359 typedef struct lazy_flags {
360 addr_t result;
361 addr_t auxbits;
362 } lazy_flags;
364 /* Definition of hvf_x86_state is here */
365 struct HVFX86EmulatorState {
366 int interruptable;
367 uint64_t fetch_rip;
368 uint64_t rip;
369 struct x86_register regs[16];
370 struct x86_reg_flags rflags;
371 struct lazy_flags lflags;
372 struct x86_efer efer;
373 uint8_t mmio_buf[4096];
376 /* useful register access macros */
377 #define RIP(cpu) (cpu->hvf_emul->rip)
378 #define EIP(cpu) ((uint32_t)cpu->hvf_emul->rip)
379 #define RFLAGS(cpu) (cpu->hvf_emul->rflags.rflags)
380 #define EFLAGS(cpu) (cpu->hvf_emul->rflags.eflags)
382 #define RRX(cpu, reg) (cpu->hvf_emul->regs[reg].rrx)
383 #define RAX(cpu) RRX(cpu, REG_RAX)
384 #define RCX(cpu) RRX(cpu, REG_RCX)
385 #define RDX(cpu) RRX(cpu, REG_RDX)
386 #define RBX(cpu) RRX(cpu, REG_RBX)
387 #define RSP(cpu) RRX(cpu, REG_RSP)
388 #define RBP(cpu) RRX(cpu, REG_RBP)
389 #define RSI(cpu) RRX(cpu, REG_RSI)
390 #define RDI(cpu) RRX(cpu, REG_RDI)
391 #define R8(cpu) RRX(cpu, REG_R8)
392 #define R9(cpu) RRX(cpu, REG_R9)
393 #define R10(cpu) RRX(cpu, REG_R10)
394 #define R11(cpu) RRX(cpu, REG_R11)
395 #define R12(cpu) RRX(cpu, REG_R12)
396 #define R13(cpu) RRX(cpu, REG_R13)
397 #define R14(cpu) RRX(cpu, REG_R14)
398 #define R15(cpu) RRX(cpu, REG_R15)
400 #define ERX(cpu, reg) (cpu->hvf_emul->regs[reg].erx)
401 #define EAX(cpu) ERX(cpu, REG_RAX)
402 #define ECX(cpu) ERX(cpu, REG_RCX)
403 #define EDX(cpu) ERX(cpu, REG_RDX)
404 #define EBX(cpu) ERX(cpu, REG_RBX)
405 #define ESP(cpu) ERX(cpu, REG_RSP)
406 #define EBP(cpu) ERX(cpu, REG_RBP)
407 #define ESI(cpu) ERX(cpu, REG_RSI)
408 #define EDI(cpu) ERX(cpu, REG_RDI)
410 #define RX(cpu, reg) (cpu->hvf_emul->regs[reg].rx)
411 #define AX(cpu) RX(cpu, REG_RAX)
412 #define CX(cpu) RX(cpu, REG_RCX)
413 #define DX(cpu) RX(cpu, REG_RDX)
414 #define BP(cpu) RX(cpu, REG_RBP)
415 #define SP(cpu) RX(cpu, REG_RSP)
416 #define BX(cpu) RX(cpu, REG_RBX)
417 #define SI(cpu) RX(cpu, REG_RSI)
418 #define DI(cpu) RX(cpu, REG_RDI)
420 #define RL(cpu, reg) (cpu->hvf_emul->regs[reg].lx)
421 #define AL(cpu) RL(cpu, REG_RAX)
422 #define CL(cpu) RL(cpu, REG_RCX)
423 #define DL(cpu) RL(cpu, REG_RDX)
424 #define BL(cpu) RL(cpu, REG_RBX)
426 #define RH(cpu, reg) (cpu->hvf_emul->regs[reg].hx)
427 #define AH(cpu) RH(cpu, REG_RAX)
428 #define CH(cpu) RH(cpu, REG_RCX)
429 #define DH(cpu) RH(cpu, REG_RDX)
430 #define BH(cpu) RH(cpu, REG_RBX)
432 /* deal with GDT/LDT descriptors in memory */
433 bool x86_read_segment_descriptor(struct CPUState *cpu,
434 struct x86_segment_descriptor *desc,
435 x68_segment_selector sel);
436 bool x86_write_segment_descriptor(struct CPUState *cpu,
437 struct x86_segment_descriptor *desc,
438 x68_segment_selector sel);
440 bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
441 int gate);
443 /* helpers */
444 bool x86_is_protected(struct CPUState *cpu);
445 bool x86_is_real(struct CPUState *cpu);
446 bool x86_is_v8086(struct CPUState *cpu);
447 bool x86_is_long_mode(struct CPUState *cpu);
448 bool x86_is_long64_mode(struct CPUState *cpu);
449 bool x86_is_paging_mode(struct CPUState *cpu);
450 bool x86_is_pae_enabled(struct CPUState *cpu);
452 addr_t linear_addr(struct CPUState *cpu, addr_t addr, x86_reg_segment seg);
453 addr_t linear_addr_size(struct CPUState *cpu, addr_t addr, int size,
454 x86_reg_segment seg);
455 addr_t linear_rip(struct CPUState *cpu, addr_t rip);
457 static inline uint64_t rdtscp(void)
459 uint64_t tsc;
460 __asm__ __volatile__("rdtscp; " /* serializing read of tsc */
461 "shl $32,%%rdx; " /* shift higher 32 bits stored in rdx up */
462 "or %%rdx,%%rax" /* and or onto rax */
463 : "=a"(tsc) /* output to tsc variable */
465 : "%rcx", "%rdx"); /* rcx and rdx are clobbered */
467 return tsc;