i386: Don't automatically enable FEAT_KVM_HINTS bits
[qemu.git] / target / i386 / hax-all.c
blobcad7531406cb9f94157226376295104d152ddc3f
1 /*
2 * QEMU HAX support
4 * Copyright IBM, Corp. 2008
5 * Red Hat, Inc. 2008
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
11 * Copyright (c) 2011 Intel Corporation
12 * Written by:
13 * Jiang Yunhong<yunhong.jiang@intel.com>
14 * Xin Xiaohui<xiaohui.xin@intel.com>
15 * Zhang Xiantao<xiantao.zhang@intel.com>
17 * This work is licensed under the terms of the GNU GPL, version 2 or later.
18 * See the COPYING file in the top-level directory.
23 * HAX common code for both windows and darwin
26 #include "qemu/osdep.h"
27 #include "cpu.h"
28 #include "exec/address-spaces.h"
29 #include "exec/exec-all.h"
30 #include "exec/ioport.h"
32 #include "qemu-common.h"
33 #include "hax-i386.h"
34 #include "sysemu/accel.h"
35 #include "sysemu/sysemu.h"
36 #include "qemu/main-loop.h"
37 #include "hw/boards.h"
39 #define DEBUG_HAX 0
41 #define DPRINTF(fmt, ...) \
42 do { \
43 if (DEBUG_HAX) { \
44 fprintf(stdout, fmt, ## __VA_ARGS__); \
45 } \
46 } while (0)
48 /* Current version */
49 const uint32_t hax_cur_version = 0x4; /* API v4: unmapping and MMIO moves */
50 /* Minimum HAX kernel version */
51 const uint32_t hax_min_version = 0x4; /* API v4: supports unmapping */
53 static bool hax_allowed;
55 struct hax_state hax_global;
57 static void hax_vcpu_sync_state(CPUArchState *env, int modified);
58 static int hax_arch_get_registers(CPUArchState *env);
60 int hax_enabled(void)
62 return hax_allowed;
65 int valid_hax_tunnel_size(uint16_t size)
67 return size >= sizeof(struct hax_tunnel);
70 hax_fd hax_vcpu_get_fd(CPUArchState *env)
72 struct hax_vcpu_state *vcpu = ENV_GET_CPU(env)->hax_vcpu;
73 if (!vcpu) {
74 return HAX_INVALID_FD;
76 return vcpu->fd;
79 static int hax_get_capability(struct hax_state *hax)
81 int ret;
82 struct hax_capabilityinfo capinfo, *cap = &capinfo;
84 ret = hax_capability(hax, cap);
85 if (ret) {
86 return ret;
89 if ((cap->wstatus & HAX_CAP_WORKSTATUS_MASK) == HAX_CAP_STATUS_NOTWORKING) {
90 if (cap->winfo & HAX_CAP_FAILREASON_VT) {
91 DPRINTF
92 ("VTX feature is not enabled, HAX driver will not work.\n");
93 } else if (cap->winfo & HAX_CAP_FAILREASON_NX) {
94 DPRINTF
95 ("NX feature is not enabled, HAX driver will not work.\n");
97 return -ENXIO;
101 if (!(cap->winfo & HAX_CAP_UG)) {
102 fprintf(stderr, "UG mode is not supported by the hardware.\n");
103 return -ENOTSUP;
106 hax->supports_64bit_ramblock = !!(cap->winfo & HAX_CAP_64BIT_RAMBLOCK);
108 if (cap->wstatus & HAX_CAP_MEMQUOTA) {
109 if (cap->mem_quota < hax->mem_quota) {
110 fprintf(stderr, "The VM memory needed exceeds the driver limit.\n");
111 return -ENOSPC;
114 return 0;
117 static int hax_version_support(struct hax_state *hax)
119 int ret;
120 struct hax_module_version version;
122 ret = hax_mod_version(hax, &version);
123 if (ret < 0) {
124 return 0;
127 if (hax_min_version > version.cur_version) {
128 fprintf(stderr, "Incompatible HAX module version %d,",
129 version.cur_version);
130 fprintf(stderr, "requires minimum version %d\n", hax_min_version);
131 return 0;
133 if (hax_cur_version < version.compat_version) {
134 fprintf(stderr, "Incompatible QEMU HAX API version %x,",
135 hax_cur_version);
136 fprintf(stderr, "requires minimum HAX API version %x\n",
137 version.compat_version);
138 return 0;
141 return 1;
144 int hax_vcpu_create(int id)
146 struct hax_vcpu_state *vcpu = NULL;
147 int ret;
149 if (!hax_global.vm) {
150 fprintf(stderr, "vcpu %x created failed, vm is null\n", id);
151 return -1;
154 if (hax_global.vm->vcpus[id]) {
155 fprintf(stderr, "vcpu %x allocated already\n", id);
156 return 0;
159 vcpu = g_malloc(sizeof(struct hax_vcpu_state));
160 if (!vcpu) {
161 fprintf(stderr, "Failed to alloc vcpu state\n");
162 return -ENOMEM;
165 memset(vcpu, 0, sizeof(struct hax_vcpu_state));
167 ret = hax_host_create_vcpu(hax_global.vm->fd, id);
168 if (ret) {
169 fprintf(stderr, "Failed to create vcpu %x\n", id);
170 goto error;
173 vcpu->vcpu_id = id;
174 vcpu->fd = hax_host_open_vcpu(hax_global.vm->id, id);
175 if (hax_invalid_fd(vcpu->fd)) {
176 fprintf(stderr, "Failed to open the vcpu\n");
177 ret = -ENODEV;
178 goto error;
181 hax_global.vm->vcpus[id] = vcpu;
183 ret = hax_host_setup_vcpu_channel(vcpu);
184 if (ret) {
185 fprintf(stderr, "Invalid hax tunnel size\n");
186 ret = -EINVAL;
187 goto error;
189 return 0;
191 error:
192 /* vcpu and tunnel will be closed automatically */
193 if (vcpu && !hax_invalid_fd(vcpu->fd)) {
194 hax_close_fd(vcpu->fd);
197 hax_global.vm->vcpus[id] = NULL;
198 g_free(vcpu);
199 return -1;
202 int hax_vcpu_destroy(CPUState *cpu)
204 struct hax_vcpu_state *vcpu = cpu->hax_vcpu;
206 if (!hax_global.vm) {
207 fprintf(stderr, "vcpu %x destroy failed, vm is null\n", vcpu->vcpu_id);
208 return -1;
211 if (!vcpu) {
212 return 0;
216 * 1. The hax_tunnel is also destroied when vcpu destroy
217 * 2. close fd will cause hax module vcpu be cleaned
219 hax_close_fd(vcpu->fd);
220 hax_global.vm->vcpus[vcpu->vcpu_id] = NULL;
221 g_free(vcpu);
222 return 0;
225 int hax_init_vcpu(CPUState *cpu)
227 int ret;
229 ret = hax_vcpu_create(cpu->cpu_index);
230 if (ret < 0) {
231 fprintf(stderr, "Failed to create HAX vcpu\n");
232 exit(-1);
235 cpu->hax_vcpu = hax_global.vm->vcpus[cpu->cpu_index];
236 cpu->vcpu_dirty = true;
237 qemu_register_reset(hax_reset_vcpu_state, (CPUArchState *) (cpu->env_ptr));
239 return ret;
242 struct hax_vm *hax_vm_create(struct hax_state *hax)
244 struct hax_vm *vm;
245 int vm_id = 0, ret;
247 if (hax_invalid_fd(hax->fd)) {
248 return NULL;
251 if (hax->vm) {
252 return hax->vm;
255 vm = g_malloc(sizeof(struct hax_vm));
256 if (!vm) {
257 return NULL;
259 memset(vm, 0, sizeof(struct hax_vm));
260 ret = hax_host_create_vm(hax, &vm_id);
261 if (ret) {
262 fprintf(stderr, "Failed to create vm %x\n", ret);
263 goto error;
265 vm->id = vm_id;
266 vm->fd = hax_host_open_vm(hax, vm_id);
267 if (hax_invalid_fd(vm->fd)) {
268 fprintf(stderr, "Failed to open vm %d\n", vm_id);
269 goto error;
272 hax->vm = vm;
273 return vm;
275 error:
276 g_free(vm);
277 hax->vm = NULL;
278 return NULL;
281 int hax_vm_destroy(struct hax_vm *vm)
283 int i;
285 for (i = 0; i < HAX_MAX_VCPU; i++)
286 if (vm->vcpus[i]) {
287 fprintf(stderr, "VCPU should be cleaned before vm clean\n");
288 return -1;
290 hax_close_fd(vm->fd);
291 g_free(vm);
292 hax_global.vm = NULL;
293 return 0;
296 static void hax_handle_interrupt(CPUState *cpu, int mask)
298 cpu->interrupt_request |= mask;
300 if (!qemu_cpu_is_self(cpu)) {
301 qemu_cpu_kick(cpu);
305 static int hax_init(ram_addr_t ram_size)
307 struct hax_state *hax = NULL;
308 struct hax_qemu_version qversion;
309 int ret;
311 hax = &hax_global;
313 memset(hax, 0, sizeof(struct hax_state));
314 hax->mem_quota = ram_size;
316 hax->fd = hax_mod_open();
317 if (hax_invalid_fd(hax->fd)) {
318 hax->fd = 0;
319 ret = -ENODEV;
320 goto error;
323 ret = hax_get_capability(hax);
325 if (ret) {
326 if (ret != -ENOSPC) {
327 ret = -EINVAL;
329 goto error;
332 if (!hax_version_support(hax)) {
333 ret = -EINVAL;
334 goto error;
337 hax->vm = hax_vm_create(hax);
338 if (!hax->vm) {
339 fprintf(stderr, "Failed to create HAX VM\n");
340 ret = -EINVAL;
341 goto error;
344 hax_memory_init();
346 qversion.cur_version = hax_cur_version;
347 qversion.min_version = hax_min_version;
348 hax_notify_qemu_version(hax->vm->fd, &qversion);
349 cpu_interrupt_handler = hax_handle_interrupt;
351 return ret;
352 error:
353 if (hax->vm) {
354 hax_vm_destroy(hax->vm);
356 if (hax->fd) {
357 hax_mod_close(hax);
360 return ret;
363 static int hax_accel_init(MachineState *ms)
365 int ret = hax_init(ms->ram_size);
367 if (ret && (ret != -ENOSPC)) {
368 fprintf(stderr, "No accelerator found.\n");
369 } else {
370 fprintf(stdout, "HAX is %s and emulator runs in %s mode.\n",
371 !ret ? "working" : "not working",
372 !ret ? "fast virt" : "emulation");
374 return ret;
377 static int hax_handle_fastmmio(CPUArchState *env, struct hax_fastmmio *hft)
379 if (hft->direction < 2) {
380 cpu_physical_memory_rw(hft->gpa, (uint8_t *) &hft->value, hft->size,
381 hft->direction);
382 } else {
384 * HAX API v4 supports transferring data between two MMIO addresses,
385 * hft->gpa and hft->gpa2 (instructions such as MOVS require this):
386 * hft->direction == 2: gpa ==> gpa2
388 uint64_t value;
389 cpu_physical_memory_rw(hft->gpa, (uint8_t *) &value, hft->size, 0);
390 cpu_physical_memory_rw(hft->gpa2, (uint8_t *) &value, hft->size, 1);
393 return 0;
396 static int hax_handle_io(CPUArchState *env, uint32_t df, uint16_t port,
397 int direction, int size, int count, void *buffer)
399 uint8_t *ptr;
400 int i;
401 MemTxAttrs attrs = { 0 };
403 if (!df) {
404 ptr = (uint8_t *) buffer;
405 } else {
406 ptr = buffer + size * count - size;
408 for (i = 0; i < count; i++) {
409 address_space_rw(&address_space_io, port, attrs,
410 ptr, size, direction == HAX_EXIT_IO_OUT);
411 if (!df) {
412 ptr += size;
413 } else {
414 ptr -= size;
418 return 0;
421 static int hax_vcpu_interrupt(CPUArchState *env)
423 CPUState *cpu = ENV_GET_CPU(env);
424 struct hax_vcpu_state *vcpu = cpu->hax_vcpu;
425 struct hax_tunnel *ht = vcpu->tunnel;
428 * Try to inject an interrupt if the guest can accept it
429 * Unlike KVM, HAX kernel check for the eflags, instead of qemu
431 if (ht->ready_for_interrupt_injection &&
432 (cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
433 int irq;
435 irq = cpu_get_pic_interrupt(env);
436 if (irq >= 0) {
437 hax_inject_interrupt(env, irq);
438 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
442 /* If we have an interrupt but the guest is not ready to receive an
443 * interrupt, request an interrupt window exit. This will
444 * cause a return to userspace as soon as the guest is ready to
445 * receive interrupts. */
446 if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
447 ht->request_interrupt_window = 1;
448 } else {
449 ht->request_interrupt_window = 0;
451 return 0;
454 void hax_raise_event(CPUState *cpu)
456 struct hax_vcpu_state *vcpu = cpu->hax_vcpu;
458 if (!vcpu) {
459 return;
461 vcpu->tunnel->user_event_pending = 1;
465 * Ask hax kernel module to run the CPU for us till:
466 * 1. Guest crash or shutdown
467 * 2. Need QEMU's emulation like guest execute MMIO instruction
468 * 3. Guest execute HLT
469 * 4. QEMU have Signal/event pending
470 * 5. An unknown VMX exit happens
472 static int hax_vcpu_hax_exec(CPUArchState *env)
474 int ret = 0;
475 CPUState *cpu = ENV_GET_CPU(env);
476 X86CPU *x86_cpu = X86_CPU(cpu);
477 struct hax_vcpu_state *vcpu = cpu->hax_vcpu;
478 struct hax_tunnel *ht = vcpu->tunnel;
480 if (!hax_enabled()) {
481 DPRINTF("Trying to vcpu execute at eip:" TARGET_FMT_lx "\n", env->eip);
482 return 0;
485 cpu->halted = 0;
487 if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
488 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
489 apic_poll_irq(x86_cpu->apic_state);
492 if (cpu->interrupt_request & CPU_INTERRUPT_INIT) {
493 DPRINTF("\nhax_vcpu_hax_exec: handling INIT for %d\n",
494 cpu->cpu_index);
495 do_cpu_init(x86_cpu);
496 hax_vcpu_sync_state(env, 1);
499 if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
500 DPRINTF("hax_vcpu_hax_exec: handling SIPI for %d\n",
501 cpu->cpu_index);
502 hax_vcpu_sync_state(env, 0);
503 do_cpu_sipi(x86_cpu);
504 hax_vcpu_sync_state(env, 1);
507 do {
508 int hax_ret;
510 if (cpu->exit_request) {
511 ret = 1;
512 break;
515 hax_vcpu_interrupt(env);
517 qemu_mutex_unlock_iothread();
518 cpu_exec_start(cpu);
519 hax_ret = hax_vcpu_run(vcpu);
520 cpu_exec_end(cpu);
521 qemu_mutex_lock_iothread();
523 /* Simply continue the vcpu_run if system call interrupted */
524 if (hax_ret == -EINTR || hax_ret == -EAGAIN) {
525 DPRINTF("io window interrupted\n");
526 continue;
529 if (hax_ret < 0) {
530 fprintf(stderr, "vcpu run failed for vcpu %x\n", vcpu->vcpu_id);
531 abort();
533 switch (ht->_exit_status) {
534 case HAX_EXIT_IO:
535 ret = hax_handle_io(env, ht->pio._df, ht->pio._port,
536 ht->pio._direction,
537 ht->pio._size, ht->pio._count, vcpu->iobuf);
538 break;
539 case HAX_EXIT_FAST_MMIO:
540 ret = hax_handle_fastmmio(env, (struct hax_fastmmio *) vcpu->iobuf);
541 break;
542 /* Guest state changed, currently only for shutdown */
543 case HAX_EXIT_STATECHANGE:
544 fprintf(stdout, "VCPU shutdown request\n");
545 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
546 hax_vcpu_sync_state(env, 0);
547 ret = 1;
548 break;
549 case HAX_EXIT_UNKNOWN_VMEXIT:
550 fprintf(stderr, "Unknown VMX exit %x from guest\n",
551 ht->_exit_reason);
552 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
553 hax_vcpu_sync_state(env, 0);
554 cpu_dump_state(cpu, stderr, fprintf, 0);
555 ret = -1;
556 break;
557 case HAX_EXIT_HLT:
558 if (!(cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
559 !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
560 /* hlt instruction with interrupt disabled is shutdown */
561 env->eflags |= IF_MASK;
562 cpu->halted = 1;
563 cpu->exception_index = EXCP_HLT;
564 ret = 1;
566 break;
567 /* these situations will continue to hax module */
568 case HAX_EXIT_INTERRUPT:
569 case HAX_EXIT_PAUSED:
570 break;
571 case HAX_EXIT_MMIO:
572 /* Should not happen on UG system */
573 fprintf(stderr, "HAX: unsupported MMIO emulation\n");
574 ret = -1;
575 break;
576 case HAX_EXIT_REAL:
577 /* Should not happen on UG system */
578 fprintf(stderr, "HAX: unimplemented real mode emulation\n");
579 ret = -1;
580 break;
581 default:
582 fprintf(stderr, "Unknown exit %x from HAX\n", ht->_exit_status);
583 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
584 hax_vcpu_sync_state(env, 0);
585 cpu_dump_state(cpu, stderr, fprintf, 0);
586 ret = 1;
587 break;
589 } while (!ret);
591 if (cpu->exit_request) {
592 cpu->exit_request = 0;
593 cpu->exception_index = EXCP_INTERRUPT;
595 return ret < 0;
598 static void do_hax_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
600 CPUArchState *env = cpu->env_ptr;
602 hax_arch_get_registers(env);
603 cpu->vcpu_dirty = true;
606 void hax_cpu_synchronize_state(CPUState *cpu)
608 if (!cpu->vcpu_dirty) {
609 run_on_cpu(cpu, do_hax_cpu_synchronize_state, RUN_ON_CPU_NULL);
613 static void do_hax_cpu_synchronize_post_reset(CPUState *cpu,
614 run_on_cpu_data arg)
616 CPUArchState *env = cpu->env_ptr;
618 hax_vcpu_sync_state(env, 1);
619 cpu->vcpu_dirty = false;
622 void hax_cpu_synchronize_post_reset(CPUState *cpu)
624 run_on_cpu(cpu, do_hax_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
627 static void do_hax_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
629 CPUArchState *env = cpu->env_ptr;
631 hax_vcpu_sync_state(env, 1);
632 cpu->vcpu_dirty = false;
635 void hax_cpu_synchronize_post_init(CPUState *cpu)
637 run_on_cpu(cpu, do_hax_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
640 static void do_hax_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
642 cpu->vcpu_dirty = true;
645 void hax_cpu_synchronize_pre_loadvm(CPUState *cpu)
647 run_on_cpu(cpu, do_hax_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
650 int hax_smp_cpu_exec(CPUState *cpu)
652 CPUArchState *env = (CPUArchState *) (cpu->env_ptr);
653 int fatal;
654 int ret;
656 while (1) {
657 if (cpu->exception_index >= EXCP_INTERRUPT) {
658 ret = cpu->exception_index;
659 cpu->exception_index = -1;
660 break;
663 fatal = hax_vcpu_hax_exec(env);
665 if (fatal) {
666 fprintf(stderr, "Unsupported HAX vcpu return\n");
667 abort();
671 return ret;
674 static void set_v8086_seg(struct segment_desc_t *lhs, const SegmentCache *rhs)
676 memset(lhs, 0, sizeof(struct segment_desc_t));
677 lhs->selector = rhs->selector;
678 lhs->base = rhs->base;
679 lhs->limit = rhs->limit;
680 lhs->type = 3;
681 lhs->present = 1;
682 lhs->dpl = 3;
683 lhs->operand_size = 0;
684 lhs->desc = 1;
685 lhs->long_mode = 0;
686 lhs->granularity = 0;
687 lhs->available = 0;
690 static void get_seg(SegmentCache *lhs, const struct segment_desc_t *rhs)
692 lhs->selector = rhs->selector;
693 lhs->base = rhs->base;
694 lhs->limit = rhs->limit;
695 lhs->flags = (rhs->type << DESC_TYPE_SHIFT)
696 | (rhs->present * DESC_P_MASK)
697 | (rhs->dpl << DESC_DPL_SHIFT)
698 | (rhs->operand_size << DESC_B_SHIFT)
699 | (rhs->desc * DESC_S_MASK)
700 | (rhs->long_mode << DESC_L_SHIFT)
701 | (rhs->granularity * DESC_G_MASK) | (rhs->available * DESC_AVL_MASK);
704 static void set_seg(struct segment_desc_t *lhs, const SegmentCache *rhs)
706 unsigned flags = rhs->flags;
708 memset(lhs, 0, sizeof(struct segment_desc_t));
709 lhs->selector = rhs->selector;
710 lhs->base = rhs->base;
711 lhs->limit = rhs->limit;
712 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
713 lhs->present = (flags & DESC_P_MASK) != 0;
714 lhs->dpl = rhs->selector & 3;
715 lhs->operand_size = (flags >> DESC_B_SHIFT) & 1;
716 lhs->desc = (flags & DESC_S_MASK) != 0;
717 lhs->long_mode = (flags >> DESC_L_SHIFT) & 1;
718 lhs->granularity = (flags & DESC_G_MASK) != 0;
719 lhs->available = (flags & DESC_AVL_MASK) != 0;
722 static void hax_getput_reg(uint64_t *hax_reg, target_ulong *qemu_reg, int set)
724 target_ulong reg = *hax_reg;
726 if (set) {
727 *hax_reg = *qemu_reg;
728 } else {
729 *qemu_reg = reg;
733 /* The sregs has been synced with HAX kernel already before this call */
734 static int hax_get_segments(CPUArchState *env, struct vcpu_state_t *sregs)
736 get_seg(&env->segs[R_CS], &sregs->_cs);
737 get_seg(&env->segs[R_DS], &sregs->_ds);
738 get_seg(&env->segs[R_ES], &sregs->_es);
739 get_seg(&env->segs[R_FS], &sregs->_fs);
740 get_seg(&env->segs[R_GS], &sregs->_gs);
741 get_seg(&env->segs[R_SS], &sregs->_ss);
743 get_seg(&env->tr, &sregs->_tr);
744 get_seg(&env->ldt, &sregs->_ldt);
745 env->idt.limit = sregs->_idt.limit;
746 env->idt.base = sregs->_idt.base;
747 env->gdt.limit = sregs->_gdt.limit;
748 env->gdt.base = sregs->_gdt.base;
749 return 0;
752 static int hax_set_segments(CPUArchState *env, struct vcpu_state_t *sregs)
754 if ((env->eflags & VM_MASK)) {
755 set_v8086_seg(&sregs->_cs, &env->segs[R_CS]);
756 set_v8086_seg(&sregs->_ds, &env->segs[R_DS]);
757 set_v8086_seg(&sregs->_es, &env->segs[R_ES]);
758 set_v8086_seg(&sregs->_fs, &env->segs[R_FS]);
759 set_v8086_seg(&sregs->_gs, &env->segs[R_GS]);
760 set_v8086_seg(&sregs->_ss, &env->segs[R_SS]);
761 } else {
762 set_seg(&sregs->_cs, &env->segs[R_CS]);
763 set_seg(&sregs->_ds, &env->segs[R_DS]);
764 set_seg(&sregs->_es, &env->segs[R_ES]);
765 set_seg(&sregs->_fs, &env->segs[R_FS]);
766 set_seg(&sregs->_gs, &env->segs[R_GS]);
767 set_seg(&sregs->_ss, &env->segs[R_SS]);
769 if (env->cr[0] & CR0_PE_MASK) {
770 /* force ss cpl to cs cpl */
771 sregs->_ss.selector = (sregs->_ss.selector & ~3) |
772 (sregs->_cs.selector & 3);
773 sregs->_ss.dpl = sregs->_ss.selector & 3;
777 set_seg(&sregs->_tr, &env->tr);
778 set_seg(&sregs->_ldt, &env->ldt);
779 sregs->_idt.limit = env->idt.limit;
780 sregs->_idt.base = env->idt.base;
781 sregs->_gdt.limit = env->gdt.limit;
782 sregs->_gdt.base = env->gdt.base;
783 return 0;
786 static int hax_sync_vcpu_register(CPUArchState *env, int set)
788 struct vcpu_state_t regs;
789 int ret;
790 memset(&regs, 0, sizeof(struct vcpu_state_t));
792 if (!set) {
793 ret = hax_sync_vcpu_state(env, &regs, 0);
794 if (ret < 0) {
795 return -1;
799 /* generic register */
800 hax_getput_reg(&regs._rax, &env->regs[R_EAX], set);
801 hax_getput_reg(&regs._rbx, &env->regs[R_EBX], set);
802 hax_getput_reg(&regs._rcx, &env->regs[R_ECX], set);
803 hax_getput_reg(&regs._rdx, &env->regs[R_EDX], set);
804 hax_getput_reg(&regs._rsi, &env->regs[R_ESI], set);
805 hax_getput_reg(&regs._rdi, &env->regs[R_EDI], set);
806 hax_getput_reg(&regs._rsp, &env->regs[R_ESP], set);
807 hax_getput_reg(&regs._rbp, &env->regs[R_EBP], set);
808 #ifdef TARGET_X86_64
809 hax_getput_reg(&regs._r8, &env->regs[8], set);
810 hax_getput_reg(&regs._r9, &env->regs[9], set);
811 hax_getput_reg(&regs._r10, &env->regs[10], set);
812 hax_getput_reg(&regs._r11, &env->regs[11], set);
813 hax_getput_reg(&regs._r12, &env->regs[12], set);
814 hax_getput_reg(&regs._r13, &env->regs[13], set);
815 hax_getput_reg(&regs._r14, &env->regs[14], set);
816 hax_getput_reg(&regs._r15, &env->regs[15], set);
817 #endif
818 hax_getput_reg(&regs._rflags, &env->eflags, set);
819 hax_getput_reg(&regs._rip, &env->eip, set);
821 if (set) {
822 regs._cr0 = env->cr[0];
823 regs._cr2 = env->cr[2];
824 regs._cr3 = env->cr[3];
825 regs._cr4 = env->cr[4];
826 hax_set_segments(env, &regs);
827 } else {
828 env->cr[0] = regs._cr0;
829 env->cr[2] = regs._cr2;
830 env->cr[3] = regs._cr3;
831 env->cr[4] = regs._cr4;
832 hax_get_segments(env, &regs);
835 if (set) {
836 ret = hax_sync_vcpu_state(env, &regs, 1);
837 if (ret < 0) {
838 return -1;
841 return 0;
844 static void hax_msr_entry_set(struct vmx_msr *item, uint32_t index,
845 uint64_t value)
847 item->entry = index;
848 item->value = value;
851 static int hax_get_msrs(CPUArchState *env)
853 struct hax_msr_data md;
854 struct vmx_msr *msrs = md.entries;
855 int ret, i, n;
857 n = 0;
858 msrs[n++].entry = MSR_IA32_SYSENTER_CS;
859 msrs[n++].entry = MSR_IA32_SYSENTER_ESP;
860 msrs[n++].entry = MSR_IA32_SYSENTER_EIP;
861 msrs[n++].entry = MSR_IA32_TSC;
862 #ifdef TARGET_X86_64
863 msrs[n++].entry = MSR_EFER;
864 msrs[n++].entry = MSR_STAR;
865 msrs[n++].entry = MSR_LSTAR;
866 msrs[n++].entry = MSR_CSTAR;
867 msrs[n++].entry = MSR_FMASK;
868 msrs[n++].entry = MSR_KERNELGSBASE;
869 #endif
870 md.nr_msr = n;
871 ret = hax_sync_msr(env, &md, 0);
872 if (ret < 0) {
873 return ret;
876 for (i = 0; i < md.done; i++) {
877 switch (msrs[i].entry) {
878 case MSR_IA32_SYSENTER_CS:
879 env->sysenter_cs = msrs[i].value;
880 break;
881 case MSR_IA32_SYSENTER_ESP:
882 env->sysenter_esp = msrs[i].value;
883 break;
884 case MSR_IA32_SYSENTER_EIP:
885 env->sysenter_eip = msrs[i].value;
886 break;
887 case MSR_IA32_TSC:
888 env->tsc = msrs[i].value;
889 break;
890 #ifdef TARGET_X86_64
891 case MSR_EFER:
892 env->efer = msrs[i].value;
893 break;
894 case MSR_STAR:
895 env->star = msrs[i].value;
896 break;
897 case MSR_LSTAR:
898 env->lstar = msrs[i].value;
899 break;
900 case MSR_CSTAR:
901 env->cstar = msrs[i].value;
902 break;
903 case MSR_FMASK:
904 env->fmask = msrs[i].value;
905 break;
906 case MSR_KERNELGSBASE:
907 env->kernelgsbase = msrs[i].value;
908 break;
909 #endif
913 return 0;
916 static int hax_set_msrs(CPUArchState *env)
918 struct hax_msr_data md;
919 struct vmx_msr *msrs;
920 msrs = md.entries;
921 int n = 0;
923 memset(&md, 0, sizeof(struct hax_msr_data));
924 hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
925 hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
926 hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
927 hax_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
928 #ifdef TARGET_X86_64
929 hax_msr_entry_set(&msrs[n++], MSR_EFER, env->efer);
930 hax_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
931 hax_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
932 hax_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
933 hax_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
934 hax_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
935 #endif
936 md.nr_msr = n;
937 md.done = 0;
939 return hax_sync_msr(env, &md, 1);
942 static int hax_get_fpu(CPUArchState *env)
944 struct fx_layout fpu;
945 int i, ret;
947 ret = hax_sync_fpu(env, &fpu, 0);
948 if (ret < 0) {
949 return ret;
952 env->fpstt = (fpu.fsw >> 11) & 7;
953 env->fpus = fpu.fsw;
954 env->fpuc = fpu.fcw;
955 for (i = 0; i < 8; ++i) {
956 env->fptags[i] = !((fpu.ftw >> i) & 1);
958 memcpy(env->fpregs, fpu.st_mm, sizeof(env->fpregs));
960 for (i = 0; i < 8; i++) {
961 env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.mmx_1[i][0]);
962 env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.mmx_1[i][8]);
963 if (CPU_NB_REGS > 8) {
964 env->xmm_regs[i + 8].ZMM_Q(0) = ldq_p(&fpu.mmx_2[i][0]);
965 env->xmm_regs[i + 8].ZMM_Q(1) = ldq_p(&fpu.mmx_2[i][8]);
968 env->mxcsr = fpu.mxcsr;
970 return 0;
973 static int hax_set_fpu(CPUArchState *env)
975 struct fx_layout fpu;
976 int i;
978 memset(&fpu, 0, sizeof(fpu));
979 fpu.fsw = env->fpus & ~(7 << 11);
980 fpu.fsw |= (env->fpstt & 7) << 11;
981 fpu.fcw = env->fpuc;
983 for (i = 0; i < 8; ++i) {
984 fpu.ftw |= (!env->fptags[i]) << i;
987 memcpy(fpu.st_mm, env->fpregs, sizeof(env->fpregs));
988 for (i = 0; i < 8; i++) {
989 stq_p(&fpu.mmx_1[i][0], env->xmm_regs[i].ZMM_Q(0));
990 stq_p(&fpu.mmx_1[i][8], env->xmm_regs[i].ZMM_Q(1));
991 if (CPU_NB_REGS > 8) {
992 stq_p(&fpu.mmx_2[i][0], env->xmm_regs[i + 8].ZMM_Q(0));
993 stq_p(&fpu.mmx_2[i][8], env->xmm_regs[i + 8].ZMM_Q(1));
997 fpu.mxcsr = env->mxcsr;
999 return hax_sync_fpu(env, &fpu, 1);
1002 static int hax_arch_get_registers(CPUArchState *env)
1004 int ret;
1006 ret = hax_sync_vcpu_register(env, 0);
1007 if (ret < 0) {
1008 return ret;
1011 ret = hax_get_fpu(env);
1012 if (ret < 0) {
1013 return ret;
1016 ret = hax_get_msrs(env);
1017 if (ret < 0) {
1018 return ret;
1021 x86_update_hflags(env);
1022 return 0;
1025 static int hax_arch_set_registers(CPUArchState *env)
1027 int ret;
1028 ret = hax_sync_vcpu_register(env, 1);
1030 if (ret < 0) {
1031 fprintf(stderr, "Failed to sync vcpu reg\n");
1032 return ret;
1034 ret = hax_set_fpu(env);
1035 if (ret < 0) {
1036 fprintf(stderr, "FPU failed\n");
1037 return ret;
1039 ret = hax_set_msrs(env);
1040 if (ret < 0) {
1041 fprintf(stderr, "MSR failed\n");
1042 return ret;
1045 return 0;
1048 static void hax_vcpu_sync_state(CPUArchState *env, int modified)
1050 if (hax_enabled()) {
1051 if (modified) {
1052 hax_arch_set_registers(env);
1053 } else {
1054 hax_arch_get_registers(env);
1060 * much simpler than kvm, at least in first stage because:
1061 * We don't need consider the device pass-through, we don't need
1062 * consider the framebuffer, and we may even remove the bios at all
1064 int hax_sync_vcpus(void)
1066 if (hax_enabled()) {
1067 CPUState *cpu;
1069 cpu = first_cpu;
1070 if (!cpu) {
1071 return 0;
1074 for (; cpu != NULL; cpu = CPU_NEXT(cpu)) {
1075 int ret;
1077 ret = hax_arch_set_registers(cpu->env_ptr);
1078 if (ret < 0) {
1079 return ret;
1084 return 0;
1087 void hax_reset_vcpu_state(void *opaque)
1089 CPUState *cpu;
1090 for (cpu = first_cpu; cpu != NULL; cpu = CPU_NEXT(cpu)) {
1091 cpu->hax_vcpu->tunnel->user_event_pending = 0;
1092 cpu->hax_vcpu->tunnel->ready_for_interrupt_injection = 0;
1096 static void hax_accel_class_init(ObjectClass *oc, void *data)
1098 AccelClass *ac = ACCEL_CLASS(oc);
1099 ac->name = "HAX";
1100 ac->init_machine = hax_accel_init;
1101 ac->allowed = &hax_allowed;
1104 static const TypeInfo hax_accel_type = {
1105 .name = ACCEL_CLASS_NAME("hax"),
1106 .parent = TYPE_ACCEL,
1107 .class_init = hax_accel_class_init,
1110 static void hax_type_init(void)
1112 type_register_static(&hax_accel_type);
1115 type_init(hax_type_init);