4 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
11 * Copyright (c) 2011 Intel Corporation
13 * Jiang Yunhong<yunhong.jiang@intel.com>
14 * Xin Xiaohui<xiaohui.xin@intel.com>
15 * Zhang Xiantao<xiantao.zhang@intel.com>
17 * This work is licensed under the terms of the GNU GPL, version 2 or later.
18 * See the COPYING file in the top-level directory.
23 * HAX common code for both windows and darwin
26 #include "qemu/osdep.h"
28 #include "exec/address-spaces.h"
29 #include "exec/exec-all.h"
30 #include "exec/ioport.h"
32 #include "qemu-common.h"
35 #include "sysemu/accel.h"
36 #include "sysemu/sysemu.h"
37 #include "qemu/main-loop.h"
38 #include "hw/boards.h"
42 #define DPRINTF(fmt, ...) \
45 fprintf(stdout, fmt, ## __VA_ARGS__); \
50 const uint32_t hax_cur_version
= 0x4; /* API v4: unmapping and MMIO moves */
51 /* Minimum HAX kernel version */
52 const uint32_t hax_min_version
= 0x4; /* API v4: supports unmapping */
54 static bool hax_allowed
;
56 struct hax_state hax_global
;
58 static void hax_vcpu_sync_state(CPUArchState
*env
, int modified
);
59 static int hax_arch_get_registers(CPUArchState
*env
);
66 int valid_hax_tunnel_size(uint16_t size
)
68 return size
>= sizeof(struct hax_tunnel
);
71 hax_fd
hax_vcpu_get_fd(CPUArchState
*env
)
73 struct hax_vcpu_state
*vcpu
= ENV_GET_CPU(env
)->hax_vcpu
;
75 return HAX_INVALID_FD
;
80 static int hax_get_capability(struct hax_state
*hax
)
83 struct hax_capabilityinfo capinfo
, *cap
= &capinfo
;
85 ret
= hax_capability(hax
, cap
);
90 if ((cap
->wstatus
& HAX_CAP_WORKSTATUS_MASK
) == HAX_CAP_STATUS_NOTWORKING
) {
91 if (cap
->winfo
& HAX_CAP_FAILREASON_VT
) {
93 ("VTX feature is not enabled, HAX driver will not work.\n");
94 } else if (cap
->winfo
& HAX_CAP_FAILREASON_NX
) {
96 ("NX feature is not enabled, HAX driver will not work.\n");
102 if (!(cap
->winfo
& HAX_CAP_UG
)) {
103 fprintf(stderr
, "UG mode is not supported by the hardware.\n");
107 if (cap
->wstatus
& HAX_CAP_MEMQUOTA
) {
108 if (cap
->mem_quota
< hax
->mem_quota
) {
109 fprintf(stderr
, "The VM memory needed exceeds the driver limit.\n");
116 static int hax_version_support(struct hax_state
*hax
)
119 struct hax_module_version version
;
121 ret
= hax_mod_version(hax
, &version
);
126 if (hax_min_version
> version
.cur_version
) {
127 fprintf(stderr
, "Incompatible HAX module version %d,",
128 version
.cur_version
);
129 fprintf(stderr
, "requires minimum version %d\n", hax_min_version
);
132 if (hax_cur_version
< version
.compat_version
) {
133 fprintf(stderr
, "Incompatible QEMU HAX API version %x,",
135 fprintf(stderr
, "requires minimum HAX API version %x\n",
136 version
.compat_version
);
143 int hax_vcpu_create(int id
)
145 struct hax_vcpu_state
*vcpu
= NULL
;
148 if (!hax_global
.vm
) {
149 fprintf(stderr
, "vcpu %x created failed, vm is null\n", id
);
153 if (hax_global
.vm
->vcpus
[id
]) {
154 fprintf(stderr
, "vcpu %x allocated already\n", id
);
158 vcpu
= g_malloc(sizeof(struct hax_vcpu_state
));
160 fprintf(stderr
, "Failed to alloc vcpu state\n");
164 memset(vcpu
, 0, sizeof(struct hax_vcpu_state
));
166 ret
= hax_host_create_vcpu(hax_global
.vm
->fd
, id
);
168 fprintf(stderr
, "Failed to create vcpu %x\n", id
);
173 vcpu
->fd
= hax_host_open_vcpu(hax_global
.vm
->id
, id
);
174 if (hax_invalid_fd(vcpu
->fd
)) {
175 fprintf(stderr
, "Failed to open the vcpu\n");
180 hax_global
.vm
->vcpus
[id
] = vcpu
;
182 ret
= hax_host_setup_vcpu_channel(vcpu
);
184 fprintf(stderr
, "Invalid hax tunnel size\n");
191 /* vcpu and tunnel will be closed automatically */
192 if (vcpu
&& !hax_invalid_fd(vcpu
->fd
)) {
193 hax_close_fd(vcpu
->fd
);
196 hax_global
.vm
->vcpus
[id
] = NULL
;
201 int hax_vcpu_destroy(CPUState
*cpu
)
203 struct hax_vcpu_state
*vcpu
= cpu
->hax_vcpu
;
205 if (!hax_global
.vm
) {
206 fprintf(stderr
, "vcpu %x destroy failed, vm is null\n", vcpu
->vcpu_id
);
215 * 1. The hax_tunnel is also destroied when vcpu destroy
216 * 2. close fd will cause hax module vcpu be cleaned
218 hax_close_fd(vcpu
->fd
);
219 hax_global
.vm
->vcpus
[vcpu
->vcpu_id
] = NULL
;
224 int hax_init_vcpu(CPUState
*cpu
)
228 ret
= hax_vcpu_create(cpu
->cpu_index
);
230 fprintf(stderr
, "Failed to create HAX vcpu\n");
234 cpu
->hax_vcpu
= hax_global
.vm
->vcpus
[cpu
->cpu_index
];
235 cpu
->hax_vcpu_dirty
= true;
236 qemu_register_reset(hax_reset_vcpu_state
, (CPUArchState
*) (cpu
->env_ptr
));
241 struct hax_vm
*hax_vm_create(struct hax_state
*hax
)
246 if (hax_invalid_fd(hax
->fd
)) {
254 vm
= g_malloc(sizeof(struct hax_vm
));
258 memset(vm
, 0, sizeof(struct hax_vm
));
259 ret
= hax_host_create_vm(hax
, &vm_id
);
261 fprintf(stderr
, "Failed to create vm %x\n", ret
);
265 vm
->fd
= hax_host_open_vm(hax
, vm_id
);
266 if (hax_invalid_fd(vm
->fd
)) {
267 fprintf(stderr
, "Failed to open vm %d\n", vm_id
);
280 int hax_vm_destroy(struct hax_vm
*vm
)
284 for (i
= 0; i
< HAX_MAX_VCPU
; i
++)
286 fprintf(stderr
, "VCPU should be cleaned before vm clean\n");
289 hax_close_fd(vm
->fd
);
291 hax_global
.vm
= NULL
;
295 static void hax_handle_interrupt(CPUState
*cpu
, int mask
)
297 cpu
->interrupt_request
|= mask
;
299 if (!qemu_cpu_is_self(cpu
)) {
304 static int hax_init(ram_addr_t ram_size
)
306 struct hax_state
*hax
= NULL
;
307 struct hax_qemu_version qversion
;
312 memset(hax
, 0, sizeof(struct hax_state
));
313 hax
->mem_quota
= ram_size
;
315 hax
->fd
= hax_mod_open();
316 if (hax_invalid_fd(hax
->fd
)) {
322 ret
= hax_get_capability(hax
);
325 if (ret
!= -ENOSPC
) {
331 if (!hax_version_support(hax
)) {
336 hax
->vm
= hax_vm_create(hax
);
338 fprintf(stderr
, "Failed to create HAX VM\n");
345 qversion
.cur_version
= hax_cur_version
;
346 qversion
.min_version
= hax_min_version
;
347 hax_notify_qemu_version(hax
->vm
->fd
, &qversion
);
348 cpu_interrupt_handler
= hax_handle_interrupt
;
353 hax_vm_destroy(hax
->vm
);
362 static int hax_accel_init(MachineState
*ms
)
364 int ret
= hax_init(ms
->ram_size
);
366 if (ret
&& (ret
!= -ENOSPC
)) {
367 fprintf(stderr
, "No accelerator found.\n");
369 fprintf(stdout
, "HAX is %s and emulator runs in %s mode.\n",
370 !ret
? "working" : "not working",
371 !ret
? "fast virt" : "emulation");
376 static int hax_handle_fastmmio(CPUArchState
*env
, struct hax_fastmmio
*hft
)
378 if (hft
->direction
< 2) {
379 cpu_physical_memory_rw(hft
->gpa
, (uint8_t *) &hft
->value
, hft
->size
,
383 * HAX API v4 supports transferring data between two MMIO addresses,
384 * hft->gpa and hft->gpa2 (instructions such as MOVS require this):
385 * hft->direction == 2: gpa ==> gpa2
388 cpu_physical_memory_rw(hft
->gpa
, (uint8_t *) &value
, hft
->size
, 0);
389 cpu_physical_memory_rw(hft
->gpa2
, (uint8_t *) &value
, hft
->size
, 1);
395 static int hax_handle_io(CPUArchState
*env
, uint32_t df
, uint16_t port
,
396 int direction
, int size
, int count
, void *buffer
)
400 MemTxAttrs attrs
= { 0 };
403 ptr
= (uint8_t *) buffer
;
405 ptr
= buffer
+ size
* count
- size
;
407 for (i
= 0; i
< count
; i
++) {
408 address_space_rw(&address_space_io
, port
, attrs
,
409 ptr
, size
, direction
== HAX_EXIT_IO_OUT
);
420 static int hax_vcpu_interrupt(CPUArchState
*env
)
422 CPUState
*cpu
= ENV_GET_CPU(env
);
423 struct hax_vcpu_state
*vcpu
= cpu
->hax_vcpu
;
424 struct hax_tunnel
*ht
= vcpu
->tunnel
;
427 * Try to inject an interrupt if the guest can accept it
428 * Unlike KVM, HAX kernel check for the eflags, instead of qemu
430 if (ht
->ready_for_interrupt_injection
&&
431 (cpu
->interrupt_request
& CPU_INTERRUPT_HARD
)) {
434 irq
= cpu_get_pic_interrupt(env
);
436 hax_inject_interrupt(env
, irq
);
437 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
441 /* If we have an interrupt but the guest is not ready to receive an
442 * interrupt, request an interrupt window exit. This will
443 * cause a return to userspace as soon as the guest is ready to
444 * receive interrupts. */
445 if ((cpu
->interrupt_request
& CPU_INTERRUPT_HARD
)) {
446 ht
->request_interrupt_window
= 1;
448 ht
->request_interrupt_window
= 0;
453 void hax_raise_event(CPUState
*cpu
)
455 struct hax_vcpu_state
*vcpu
= cpu
->hax_vcpu
;
460 vcpu
->tunnel
->user_event_pending
= 1;
464 * Ask hax kernel module to run the CPU for us till:
465 * 1. Guest crash or shutdown
466 * 2. Need QEMU's emulation like guest execute MMIO instruction
467 * 3. Guest execute HLT
468 * 4. QEMU have Signal/event pending
469 * 5. An unknown VMX exit happens
471 static int hax_vcpu_hax_exec(CPUArchState
*env
)
474 CPUState
*cpu
= ENV_GET_CPU(env
);
475 X86CPU
*x86_cpu
= X86_CPU(cpu
);
476 struct hax_vcpu_state
*vcpu
= cpu
->hax_vcpu
;
477 struct hax_tunnel
*ht
= vcpu
->tunnel
;
479 if (!hax_enabled()) {
480 DPRINTF("Trying to vcpu execute at eip:" TARGET_FMT_lx
"\n", env
->eip
);
486 if (cpu
->interrupt_request
& CPU_INTERRUPT_POLL
) {
487 cpu
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
488 apic_poll_irq(x86_cpu
->apic_state
);
491 if (cpu
->interrupt_request
& CPU_INTERRUPT_INIT
) {
492 DPRINTF("\nhax_vcpu_hax_exec: handling INIT for %d\n",
494 do_cpu_init(x86_cpu
);
495 hax_vcpu_sync_state(env
, 1);
498 if (cpu
->interrupt_request
& CPU_INTERRUPT_SIPI
) {
499 DPRINTF("hax_vcpu_hax_exec: handling SIPI for %d\n",
501 hax_vcpu_sync_state(env
, 0);
502 do_cpu_sipi(x86_cpu
);
503 hax_vcpu_sync_state(env
, 1);
509 if (cpu
->exit_request
) {
514 hax_vcpu_interrupt(env
);
516 qemu_mutex_unlock_iothread();
517 hax_ret
= hax_vcpu_run(vcpu
);
518 qemu_mutex_lock_iothread();
521 /* Simply continue the vcpu_run if system call interrupted */
522 if (hax_ret
== -EINTR
|| hax_ret
== -EAGAIN
) {
523 DPRINTF("io window interrupted\n");
528 fprintf(stderr
, "vcpu run failed for vcpu %x\n", vcpu
->vcpu_id
);
531 switch (ht
->_exit_status
) {
533 ret
= hax_handle_io(env
, ht
->pio
._df
, ht
->pio
._port
,
535 ht
->pio
._size
, ht
->pio
._count
, vcpu
->iobuf
);
537 case HAX_EXIT_FAST_MMIO
:
538 ret
= hax_handle_fastmmio(env
, (struct hax_fastmmio
*) vcpu
->iobuf
);
540 /* Guest state changed, currently only for shutdown */
541 case HAX_EXIT_STATECHANGE
:
542 fprintf(stdout
, "VCPU shutdown request\n");
543 qemu_system_shutdown_request();
544 hax_vcpu_sync_state(env
, 0);
547 case HAX_EXIT_UNKNOWN_VMEXIT
:
548 fprintf(stderr
, "Unknown VMX exit %x from guest\n",
550 qemu_system_reset_request();
551 hax_vcpu_sync_state(env
, 0);
552 cpu_dump_state(cpu
, stderr
, fprintf
, 0);
556 if (!(cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
557 !(cpu
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
558 /* hlt instruction with interrupt disabled is shutdown */
559 env
->eflags
|= IF_MASK
;
561 cpu
->exception_index
= EXCP_HLT
;
565 /* these situations will continue to hax module */
566 case HAX_EXIT_INTERRUPT
:
567 case HAX_EXIT_PAUSED
:
570 /* Should not happen on UG system */
571 fprintf(stderr
, "HAX: unsupported MMIO emulation\n");
575 /* Should not happen on UG system */
576 fprintf(stderr
, "HAX: unimplemented real mode emulation\n");
580 fprintf(stderr
, "Unknown exit %x from HAX\n", ht
->_exit_status
);
581 qemu_system_reset_request();
582 hax_vcpu_sync_state(env
, 0);
583 cpu_dump_state(cpu
, stderr
, fprintf
, 0);
589 if (cpu
->exit_request
) {
590 cpu
->exit_request
= 0;
591 cpu
->exception_index
= EXCP_INTERRUPT
;
596 static void do_hax_cpu_synchronize_state(CPUState
*cpu
, run_on_cpu_data arg
)
598 CPUArchState
*env
= cpu
->env_ptr
;
600 hax_arch_get_registers(env
);
601 cpu
->hax_vcpu_dirty
= true;
604 void hax_cpu_synchronize_state(CPUState
*cpu
)
606 if (!cpu
->hax_vcpu_dirty
) {
607 run_on_cpu(cpu
, do_hax_cpu_synchronize_state
, RUN_ON_CPU_NULL
);
611 static void do_hax_cpu_synchronize_post_reset(CPUState
*cpu
,
614 CPUArchState
*env
= cpu
->env_ptr
;
616 hax_vcpu_sync_state(env
, 1);
617 cpu
->hax_vcpu_dirty
= false;
620 void hax_cpu_synchronize_post_reset(CPUState
*cpu
)
622 run_on_cpu(cpu
, do_hax_cpu_synchronize_post_reset
, RUN_ON_CPU_NULL
);
625 static void do_hax_cpu_synchronize_post_init(CPUState
*cpu
, run_on_cpu_data arg
)
627 CPUArchState
*env
= cpu
->env_ptr
;
629 hax_vcpu_sync_state(env
, 1);
630 cpu
->hax_vcpu_dirty
= false;
633 void hax_cpu_synchronize_post_init(CPUState
*cpu
)
635 run_on_cpu(cpu
, do_hax_cpu_synchronize_post_init
, RUN_ON_CPU_NULL
);
638 int hax_smp_cpu_exec(CPUState
*cpu
)
640 CPUArchState
*env
= (CPUArchState
*) (cpu
->env_ptr
);
645 if (cpu
->exception_index
>= EXCP_INTERRUPT
) {
646 ret
= cpu
->exception_index
;
647 cpu
->exception_index
= -1;
651 fatal
= hax_vcpu_hax_exec(env
);
654 fprintf(stderr
, "Unsupported HAX vcpu return\n");
662 static void set_v8086_seg(struct segment_desc_t
*lhs
, const SegmentCache
*rhs
)
664 memset(lhs
, 0, sizeof(struct segment_desc_t
));
665 lhs
->selector
= rhs
->selector
;
666 lhs
->base
= rhs
->base
;
667 lhs
->limit
= rhs
->limit
;
671 lhs
->operand_size
= 0;
674 lhs
->granularity
= 0;
678 static void get_seg(SegmentCache
*lhs
, const struct segment_desc_t
*rhs
)
680 lhs
->selector
= rhs
->selector
;
681 lhs
->base
= rhs
->base
;
682 lhs
->limit
= rhs
->limit
;
683 lhs
->flags
= (rhs
->type
<< DESC_TYPE_SHIFT
)
684 | (rhs
->present
* DESC_P_MASK
)
685 | (rhs
->dpl
<< DESC_DPL_SHIFT
)
686 | (rhs
->operand_size
<< DESC_B_SHIFT
)
687 | (rhs
->desc
* DESC_S_MASK
)
688 | (rhs
->long_mode
<< DESC_L_SHIFT
)
689 | (rhs
->granularity
* DESC_G_MASK
) | (rhs
->available
* DESC_AVL_MASK
);
692 static void set_seg(struct segment_desc_t
*lhs
, const SegmentCache
*rhs
)
694 unsigned flags
= rhs
->flags
;
696 memset(lhs
, 0, sizeof(struct segment_desc_t
));
697 lhs
->selector
= rhs
->selector
;
698 lhs
->base
= rhs
->base
;
699 lhs
->limit
= rhs
->limit
;
700 lhs
->type
= (flags
>> DESC_TYPE_SHIFT
) & 15;
701 lhs
->present
= (flags
& DESC_P_MASK
) != 0;
702 lhs
->dpl
= rhs
->selector
& 3;
703 lhs
->operand_size
= (flags
>> DESC_B_SHIFT
) & 1;
704 lhs
->desc
= (flags
& DESC_S_MASK
) != 0;
705 lhs
->long_mode
= (flags
>> DESC_L_SHIFT
) & 1;
706 lhs
->granularity
= (flags
& DESC_G_MASK
) != 0;
707 lhs
->available
= (flags
& DESC_AVL_MASK
) != 0;
710 static void hax_getput_reg(uint64_t *hax_reg
, target_ulong
*qemu_reg
, int set
)
712 target_ulong reg
= *hax_reg
;
715 *hax_reg
= *qemu_reg
;
721 /* The sregs has been synced with HAX kernel already before this call */
722 static int hax_get_segments(CPUArchState
*env
, struct vcpu_state_t
*sregs
)
724 get_seg(&env
->segs
[R_CS
], &sregs
->_cs
);
725 get_seg(&env
->segs
[R_DS
], &sregs
->_ds
);
726 get_seg(&env
->segs
[R_ES
], &sregs
->_es
);
727 get_seg(&env
->segs
[R_FS
], &sregs
->_fs
);
728 get_seg(&env
->segs
[R_GS
], &sregs
->_gs
);
729 get_seg(&env
->segs
[R_SS
], &sregs
->_ss
);
731 get_seg(&env
->tr
, &sregs
->_tr
);
732 get_seg(&env
->ldt
, &sregs
->_ldt
);
733 env
->idt
.limit
= sregs
->_idt
.limit
;
734 env
->idt
.base
= sregs
->_idt
.base
;
735 env
->gdt
.limit
= sregs
->_gdt
.limit
;
736 env
->gdt
.base
= sregs
->_gdt
.base
;
740 static int hax_set_segments(CPUArchState
*env
, struct vcpu_state_t
*sregs
)
742 if ((env
->eflags
& VM_MASK
)) {
743 set_v8086_seg(&sregs
->_cs
, &env
->segs
[R_CS
]);
744 set_v8086_seg(&sregs
->_ds
, &env
->segs
[R_DS
]);
745 set_v8086_seg(&sregs
->_es
, &env
->segs
[R_ES
]);
746 set_v8086_seg(&sregs
->_fs
, &env
->segs
[R_FS
]);
747 set_v8086_seg(&sregs
->_gs
, &env
->segs
[R_GS
]);
748 set_v8086_seg(&sregs
->_ss
, &env
->segs
[R_SS
]);
750 set_seg(&sregs
->_cs
, &env
->segs
[R_CS
]);
751 set_seg(&sregs
->_ds
, &env
->segs
[R_DS
]);
752 set_seg(&sregs
->_es
, &env
->segs
[R_ES
]);
753 set_seg(&sregs
->_fs
, &env
->segs
[R_FS
]);
754 set_seg(&sregs
->_gs
, &env
->segs
[R_GS
]);
755 set_seg(&sregs
->_ss
, &env
->segs
[R_SS
]);
757 if (env
->cr
[0] & CR0_PE_MASK
) {
758 /* force ss cpl to cs cpl */
759 sregs
->_ss
.selector
= (sregs
->_ss
.selector
& ~3) |
760 (sregs
->_cs
.selector
& 3);
761 sregs
->_ss
.dpl
= sregs
->_ss
.selector
& 3;
765 set_seg(&sregs
->_tr
, &env
->tr
);
766 set_seg(&sregs
->_ldt
, &env
->ldt
);
767 sregs
->_idt
.limit
= env
->idt
.limit
;
768 sregs
->_idt
.base
= env
->idt
.base
;
769 sregs
->_gdt
.limit
= env
->gdt
.limit
;
770 sregs
->_gdt
.base
= env
->gdt
.base
;
775 * After get the state from the kernel module, some
776 * qemu emulator state need be updated also
778 static int hax_setup_qemu_emulator(CPUArchState
*env
)
781 #define HFLAG_COPY_MASK (~( \
782 HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
783 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
784 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
785 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK))
789 hflags
= (env
->segs
[R_CS
].flags
>> DESC_DPL_SHIFT
) & HF_CPL_MASK
;
790 hflags
|= (env
->cr
[0] & CR0_PE_MASK
) << (HF_PE_SHIFT
- CR0_PE_SHIFT
);
791 hflags
|= (env
->cr
[0] << (HF_MP_SHIFT
- CR0_MP_SHIFT
)) &
792 (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
);
793 hflags
|= (env
->eflags
& (HF_TF_MASK
| HF_VM_MASK
| HF_IOPL_MASK
));
794 hflags
|= (env
->cr
[4] & CR4_OSFXSR_MASK
) <<
795 (HF_OSFXSR_SHIFT
- CR4_OSFXSR_SHIFT
);
797 if (env
->efer
& MSR_EFER_LMA
) {
798 hflags
|= HF_LMA_MASK
;
801 if ((hflags
& HF_LMA_MASK
) && (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
802 hflags
|= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
804 hflags
|= (env
->segs
[R_CS
].flags
& DESC_B_MASK
) >>
805 (DESC_B_SHIFT
- HF_CS32_SHIFT
);
806 hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
) >>
807 (DESC_B_SHIFT
- HF_SS32_SHIFT
);
808 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
809 (env
->eflags
& VM_MASK
) || !(hflags
& HF_CS32_MASK
)) {
810 hflags
|= HF_ADDSEG_MASK
;
812 hflags
|= ((env
->segs
[R_DS
].base
|
813 env
->segs
[R_ES
].base
|
814 env
->segs
[R_SS
].base
) != 0) << HF_ADDSEG_SHIFT
;
818 hflags
&= ~HF_SMM_MASK
;
820 env
->hflags
= (env
->hflags
& HFLAG_COPY_MASK
) | hflags
;
824 static int hax_sync_vcpu_register(CPUArchState
*env
, int set
)
826 struct vcpu_state_t regs
;
828 memset(®s
, 0, sizeof(struct vcpu_state_t
));
831 ret
= hax_sync_vcpu_state(env
, ®s
, 0);
837 /* generic register */
838 hax_getput_reg(®s
._rax
, &env
->regs
[R_EAX
], set
);
839 hax_getput_reg(®s
._rbx
, &env
->regs
[R_EBX
], set
);
840 hax_getput_reg(®s
._rcx
, &env
->regs
[R_ECX
], set
);
841 hax_getput_reg(®s
._rdx
, &env
->regs
[R_EDX
], set
);
842 hax_getput_reg(®s
._rsi
, &env
->regs
[R_ESI
], set
);
843 hax_getput_reg(®s
._rdi
, &env
->regs
[R_EDI
], set
);
844 hax_getput_reg(®s
._rsp
, &env
->regs
[R_ESP
], set
);
845 hax_getput_reg(®s
._rbp
, &env
->regs
[R_EBP
], set
);
847 hax_getput_reg(®s
._r8
, &env
->regs
[8], set
);
848 hax_getput_reg(®s
._r9
, &env
->regs
[9], set
);
849 hax_getput_reg(®s
._r10
, &env
->regs
[10], set
);
850 hax_getput_reg(®s
._r11
, &env
->regs
[11], set
);
851 hax_getput_reg(®s
._r12
, &env
->regs
[12], set
);
852 hax_getput_reg(®s
._r13
, &env
->regs
[13], set
);
853 hax_getput_reg(®s
._r14
, &env
->regs
[14], set
);
854 hax_getput_reg(®s
._r15
, &env
->regs
[15], set
);
856 hax_getput_reg(®s
._rflags
, &env
->eflags
, set
);
857 hax_getput_reg(®s
._rip
, &env
->eip
, set
);
860 regs
._cr0
= env
->cr
[0];
861 regs
._cr2
= env
->cr
[2];
862 regs
._cr3
= env
->cr
[3];
863 regs
._cr4
= env
->cr
[4];
864 hax_set_segments(env
, ®s
);
866 env
->cr
[0] = regs
._cr0
;
867 env
->cr
[2] = regs
._cr2
;
868 env
->cr
[3] = regs
._cr3
;
869 env
->cr
[4] = regs
._cr4
;
870 hax_get_segments(env
, ®s
);
874 ret
= hax_sync_vcpu_state(env
, ®s
, 1);
880 hax_setup_qemu_emulator(env
);
885 static void hax_msr_entry_set(struct vmx_msr
*item
, uint32_t index
,
892 static int hax_get_msrs(CPUArchState
*env
)
894 struct hax_msr_data md
;
895 struct vmx_msr
*msrs
= md
.entries
;
899 msrs
[n
++].entry
= MSR_IA32_SYSENTER_CS
;
900 msrs
[n
++].entry
= MSR_IA32_SYSENTER_ESP
;
901 msrs
[n
++].entry
= MSR_IA32_SYSENTER_EIP
;
902 msrs
[n
++].entry
= MSR_IA32_TSC
;
904 msrs
[n
++].entry
= MSR_EFER
;
905 msrs
[n
++].entry
= MSR_STAR
;
906 msrs
[n
++].entry
= MSR_LSTAR
;
907 msrs
[n
++].entry
= MSR_CSTAR
;
908 msrs
[n
++].entry
= MSR_FMASK
;
909 msrs
[n
++].entry
= MSR_KERNELGSBASE
;
912 ret
= hax_sync_msr(env
, &md
, 0);
917 for (i
= 0; i
< md
.done
; i
++) {
918 switch (msrs
[i
].entry
) {
919 case MSR_IA32_SYSENTER_CS
:
920 env
->sysenter_cs
= msrs
[i
].value
;
922 case MSR_IA32_SYSENTER_ESP
:
923 env
->sysenter_esp
= msrs
[i
].value
;
925 case MSR_IA32_SYSENTER_EIP
:
926 env
->sysenter_eip
= msrs
[i
].value
;
929 env
->tsc
= msrs
[i
].value
;
933 env
->efer
= msrs
[i
].value
;
936 env
->star
= msrs
[i
].value
;
939 env
->lstar
= msrs
[i
].value
;
942 env
->cstar
= msrs
[i
].value
;
945 env
->fmask
= msrs
[i
].value
;
947 case MSR_KERNELGSBASE
:
948 env
->kernelgsbase
= msrs
[i
].value
;
957 static int hax_set_msrs(CPUArchState
*env
)
959 struct hax_msr_data md
;
960 struct vmx_msr
*msrs
;
964 memset(&md
, 0, sizeof(struct hax_msr_data
));
965 hax_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_CS
, env
->sysenter_cs
);
966 hax_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_ESP
, env
->sysenter_esp
);
967 hax_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_EIP
, env
->sysenter_eip
);
968 hax_msr_entry_set(&msrs
[n
++], MSR_IA32_TSC
, env
->tsc
);
970 hax_msr_entry_set(&msrs
[n
++], MSR_EFER
, env
->efer
);
971 hax_msr_entry_set(&msrs
[n
++], MSR_STAR
, env
->star
);
972 hax_msr_entry_set(&msrs
[n
++], MSR_LSTAR
, env
->lstar
);
973 hax_msr_entry_set(&msrs
[n
++], MSR_CSTAR
, env
->cstar
);
974 hax_msr_entry_set(&msrs
[n
++], MSR_FMASK
, env
->fmask
);
975 hax_msr_entry_set(&msrs
[n
++], MSR_KERNELGSBASE
, env
->kernelgsbase
);
980 return hax_sync_msr(env
, &md
, 1);
983 static int hax_get_fpu(CPUArchState
*env
)
985 struct fx_layout fpu
;
988 ret
= hax_sync_fpu(env
, &fpu
, 0);
993 env
->fpstt
= (fpu
.fsw
>> 11) & 7;
996 for (i
= 0; i
< 8; ++i
) {
997 env
->fptags
[i
] = !((fpu
.ftw
>> i
) & 1);
999 memcpy(env
->fpregs
, fpu
.st_mm
, sizeof(env
->fpregs
));
1001 for (i
= 0; i
< 8; i
++) {
1002 env
->xmm_regs
[i
].ZMM_Q(0) = ldq_p(&fpu
.mmx_1
[i
][0]);
1003 env
->xmm_regs
[i
].ZMM_Q(1) = ldq_p(&fpu
.mmx_1
[i
][8]);
1004 if (CPU_NB_REGS
> 8) {
1005 env
->xmm_regs
[i
+ 8].ZMM_Q(0) = ldq_p(&fpu
.mmx_2
[i
][0]);
1006 env
->xmm_regs
[i
+ 8].ZMM_Q(1) = ldq_p(&fpu
.mmx_2
[i
][8]);
1009 env
->mxcsr
= fpu
.mxcsr
;
1014 static int hax_set_fpu(CPUArchState
*env
)
1016 struct fx_layout fpu
;
1019 memset(&fpu
, 0, sizeof(fpu
));
1020 fpu
.fsw
= env
->fpus
& ~(7 << 11);
1021 fpu
.fsw
|= (env
->fpstt
& 7) << 11;
1022 fpu
.fcw
= env
->fpuc
;
1024 for (i
= 0; i
< 8; ++i
) {
1025 fpu
.ftw
|= (!env
->fptags
[i
]) << i
;
1028 memcpy(fpu
.st_mm
, env
->fpregs
, sizeof(env
->fpregs
));
1029 for (i
= 0; i
< 8; i
++) {
1030 stq_p(&fpu
.mmx_1
[i
][0], env
->xmm_regs
[i
].ZMM_Q(0));
1031 stq_p(&fpu
.mmx_1
[i
][8], env
->xmm_regs
[i
].ZMM_Q(1));
1032 if (CPU_NB_REGS
> 8) {
1033 stq_p(&fpu
.mmx_2
[i
][0], env
->xmm_regs
[i
+ 8].ZMM_Q(0));
1034 stq_p(&fpu
.mmx_2
[i
][8], env
->xmm_regs
[i
+ 8].ZMM_Q(1));
1038 fpu
.mxcsr
= env
->mxcsr
;
1040 return hax_sync_fpu(env
, &fpu
, 1);
1043 static int hax_arch_get_registers(CPUArchState
*env
)
1047 ret
= hax_sync_vcpu_register(env
, 0);
1052 ret
= hax_get_fpu(env
);
1057 ret
= hax_get_msrs(env
);
1065 static int hax_arch_set_registers(CPUArchState
*env
)
1068 ret
= hax_sync_vcpu_register(env
, 1);
1071 fprintf(stderr
, "Failed to sync vcpu reg\n");
1074 ret
= hax_set_fpu(env
);
1076 fprintf(stderr
, "FPU failed\n");
1079 ret
= hax_set_msrs(env
);
1081 fprintf(stderr
, "MSR failed\n");
1088 static void hax_vcpu_sync_state(CPUArchState
*env
, int modified
)
1090 if (hax_enabled()) {
1092 hax_arch_set_registers(env
);
1094 hax_arch_get_registers(env
);
1100 * much simpler than kvm, at least in first stage because:
1101 * We don't need consider the device pass-through, we don't need
1102 * consider the framebuffer, and we may even remove the bios at all
1104 int hax_sync_vcpus(void)
1106 if (hax_enabled()) {
1114 for (; cpu
!= NULL
; cpu
= CPU_NEXT(cpu
)) {
1117 ret
= hax_arch_set_registers(cpu
->env_ptr
);
1127 void hax_reset_vcpu_state(void *opaque
)
1130 for (cpu
= first_cpu
; cpu
!= NULL
; cpu
= CPU_NEXT(cpu
)) {
1131 cpu
->hax_vcpu
->tunnel
->user_event_pending
= 0;
1132 cpu
->hax_vcpu
->tunnel
->ready_for_interrupt_injection
= 0;
1136 static void hax_accel_class_init(ObjectClass
*oc
, void *data
)
1138 AccelClass
*ac
= ACCEL_CLASS(oc
);
1140 ac
->init_machine
= hax_accel_init
;
1141 ac
->allowed
= &hax_allowed
;
1144 static const TypeInfo hax_accel_type
= {
1145 .name
= ACCEL_CLASS_NAME("hax"),
1146 .parent
= TYPE_ACCEL
,
1147 .class_init
= hax_accel_class_init
,
1150 static void hax_type_init(void)
1152 type_register_static(&hax_accel_type
);
1155 type_init(hax_type_init
);