4 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
11 * Copyright (c) 2011 Intel Corporation
13 * Jiang Yunhong<yunhong.jiang@intel.com>
14 * Xin Xiaohui<xiaohui.xin@intel.com>
15 * Zhang Xiantao<xiantao.zhang@intel.com>
17 * This work is licensed under the terms of the GNU GPL, version 2 or later.
18 * See the COPYING file in the top-level directory.
23 * HAX common code for both windows and darwin
26 #include "qemu/osdep.h"
28 #include "exec/address-spaces.h"
29 #include "exec/exec-all.h"
30 #include "exec/ioport.h"
32 #include "qemu-common.h"
35 #include "sysemu/accel.h"
36 #include "sysemu/sysemu.h"
37 #include "qemu/main-loop.h"
38 #include "hw/boards.h"
42 #define DPRINTF(fmt, ...) \
45 fprintf(stdout, fmt, ## __VA_ARGS__); \
50 const uint32_t hax_cur_version
= 0x4; /* API v4: unmapping and MMIO moves */
51 /* Minimum HAX kernel version */
52 const uint32_t hax_min_version
= 0x4; /* API v4: supports unmapping */
54 static bool hax_allowed
;
56 struct hax_state hax_global
;
58 static void hax_vcpu_sync_state(CPUArchState
*env
, int modified
);
59 static int hax_arch_get_registers(CPUArchState
*env
);
66 int valid_hax_tunnel_size(uint16_t size
)
68 return size
>= sizeof(struct hax_tunnel
);
71 hax_fd
hax_vcpu_get_fd(CPUArchState
*env
)
73 struct hax_vcpu_state
*vcpu
= ENV_GET_CPU(env
)->hax_vcpu
;
75 return HAX_INVALID_FD
;
80 static int hax_get_capability(struct hax_state
*hax
)
83 struct hax_capabilityinfo capinfo
, *cap
= &capinfo
;
85 ret
= hax_capability(hax
, cap
);
90 if ((cap
->wstatus
& HAX_CAP_WORKSTATUS_MASK
) == HAX_CAP_STATUS_NOTWORKING
) {
91 if (cap
->winfo
& HAX_CAP_FAILREASON_VT
) {
93 ("VTX feature is not enabled, HAX driver will not work.\n");
94 } else if (cap
->winfo
& HAX_CAP_FAILREASON_NX
) {
96 ("NX feature is not enabled, HAX driver will not work.\n");
102 if (!(cap
->winfo
& HAX_CAP_UG
)) {
103 fprintf(stderr
, "UG mode is not supported by the hardware.\n");
107 if (cap
->wstatus
& HAX_CAP_MEMQUOTA
) {
108 if (cap
->mem_quota
< hax
->mem_quota
) {
109 fprintf(stderr
, "The VM memory needed exceeds the driver limit.\n");
116 static int hax_version_support(struct hax_state
*hax
)
119 struct hax_module_version version
;
121 ret
= hax_mod_version(hax
, &version
);
126 if (hax_min_version
> version
.cur_version
) {
127 fprintf(stderr
, "Incompatible HAX module version %d,",
128 version
.cur_version
);
129 fprintf(stderr
, "requires minimum version %d\n", hax_min_version
);
132 if (hax_cur_version
< version
.compat_version
) {
133 fprintf(stderr
, "Incompatible QEMU HAX API version %x,",
135 fprintf(stderr
, "requires minimum HAX API version %x\n",
136 version
.compat_version
);
143 int hax_vcpu_create(int id
)
145 struct hax_vcpu_state
*vcpu
= NULL
;
148 if (!hax_global
.vm
) {
149 fprintf(stderr
, "vcpu %x created failed, vm is null\n", id
);
153 if (hax_global
.vm
->vcpus
[id
]) {
154 fprintf(stderr
, "vcpu %x allocated already\n", id
);
158 vcpu
= g_malloc(sizeof(struct hax_vcpu_state
));
160 fprintf(stderr
, "Failed to alloc vcpu state\n");
164 memset(vcpu
, 0, sizeof(struct hax_vcpu_state
));
166 ret
= hax_host_create_vcpu(hax_global
.vm
->fd
, id
);
168 fprintf(stderr
, "Failed to create vcpu %x\n", id
);
173 vcpu
->fd
= hax_host_open_vcpu(hax_global
.vm
->id
, id
);
174 if (hax_invalid_fd(vcpu
->fd
)) {
175 fprintf(stderr
, "Failed to open the vcpu\n");
180 hax_global
.vm
->vcpus
[id
] = vcpu
;
182 ret
= hax_host_setup_vcpu_channel(vcpu
);
184 fprintf(stderr
, "Invalid hax tunnel size\n");
191 /* vcpu and tunnel will be closed automatically */
192 if (vcpu
&& !hax_invalid_fd(vcpu
->fd
)) {
193 hax_close_fd(vcpu
->fd
);
196 hax_global
.vm
->vcpus
[id
] = NULL
;
201 int hax_vcpu_destroy(CPUState
*cpu
)
203 struct hax_vcpu_state
*vcpu
= cpu
->hax_vcpu
;
205 if (!hax_global
.vm
) {
206 fprintf(stderr
, "vcpu %x destroy failed, vm is null\n", vcpu
->vcpu_id
);
215 * 1. The hax_tunnel is also destroied when vcpu destroy
216 * 2. close fd will cause hax module vcpu be cleaned
218 hax_close_fd(vcpu
->fd
);
219 hax_global
.vm
->vcpus
[vcpu
->vcpu_id
] = NULL
;
224 int hax_init_vcpu(CPUState
*cpu
)
228 ret
= hax_vcpu_create(cpu
->cpu_index
);
230 fprintf(stderr
, "Failed to create HAX vcpu\n");
234 cpu
->hax_vcpu
= hax_global
.vm
->vcpus
[cpu
->cpu_index
];
235 cpu
->hax_vcpu_dirty
= true;
236 qemu_register_reset(hax_reset_vcpu_state
, (CPUArchState
*) (cpu
->env_ptr
));
241 struct hax_vm
*hax_vm_create(struct hax_state
*hax
)
246 if (hax_invalid_fd(hax
->fd
)) {
254 vm
= g_malloc(sizeof(struct hax_vm
));
258 memset(vm
, 0, sizeof(struct hax_vm
));
259 ret
= hax_host_create_vm(hax
, &vm_id
);
261 fprintf(stderr
, "Failed to create vm %x\n", ret
);
265 vm
->fd
= hax_host_open_vm(hax
, vm_id
);
266 if (hax_invalid_fd(vm
->fd
)) {
267 fprintf(stderr
, "Failed to open vm %d\n", vm_id
);
280 int hax_vm_destroy(struct hax_vm
*vm
)
284 for (i
= 0; i
< HAX_MAX_VCPU
; i
++)
286 fprintf(stderr
, "VCPU should be cleaned before vm clean\n");
289 hax_close_fd(vm
->fd
);
291 hax_global
.vm
= NULL
;
295 static void hax_handle_interrupt(CPUState
*cpu
, int mask
)
297 cpu
->interrupt_request
|= mask
;
299 if (!qemu_cpu_is_self(cpu
)) {
304 static int hax_init(ram_addr_t ram_size
)
306 struct hax_state
*hax
= NULL
;
307 struct hax_qemu_version qversion
;
312 memset(hax
, 0, sizeof(struct hax_state
));
313 hax
->mem_quota
= ram_size
;
315 hax
->fd
= hax_mod_open();
316 if (hax_invalid_fd(hax
->fd
)) {
322 ret
= hax_get_capability(hax
);
325 if (ret
!= -ENOSPC
) {
331 if (!hax_version_support(hax
)) {
336 hax
->vm
= hax_vm_create(hax
);
338 fprintf(stderr
, "Failed to create HAX VM\n");
345 qversion
.cur_version
= hax_cur_version
;
346 qversion
.min_version
= hax_min_version
;
347 hax_notify_qemu_version(hax
->vm
->fd
, &qversion
);
348 cpu_interrupt_handler
= hax_handle_interrupt
;
353 hax_vm_destroy(hax
->vm
);
362 static int hax_accel_init(MachineState
*ms
)
364 int ret
= hax_init(ms
->ram_size
);
366 if (ret
&& (ret
!= -ENOSPC
)) {
367 fprintf(stderr
, "No accelerator found.\n");
369 fprintf(stdout
, "HAX is %s and emulator runs in %s mode.\n",
370 !ret
? "working" : "not working",
371 !ret
? "fast virt" : "emulation");
376 static int hax_handle_fastmmio(CPUArchState
*env
, struct hax_fastmmio
*hft
)
378 if (hft
->direction
< 2) {
379 cpu_physical_memory_rw(hft
->gpa
, (uint8_t *) &hft
->value
, hft
->size
,
383 * HAX API v4 supports transferring data between two MMIO addresses,
384 * hft->gpa and hft->gpa2 (instructions such as MOVS require this):
385 * hft->direction == 2: gpa ==> gpa2
388 cpu_physical_memory_rw(hft
->gpa
, (uint8_t *) &value
, hft
->size
, 0);
389 cpu_physical_memory_rw(hft
->gpa2
, (uint8_t *) &value
, hft
->size
, 1);
395 static int hax_handle_io(CPUArchState
*env
, uint32_t df
, uint16_t port
,
396 int direction
, int size
, int count
, void *buffer
)
400 MemTxAttrs attrs
= { 0 };
403 ptr
= (uint8_t *) buffer
;
405 ptr
= buffer
+ size
* count
- size
;
407 for (i
= 0; i
< count
; i
++) {
408 address_space_rw(&address_space_io
, port
, attrs
,
409 ptr
, size
, direction
== HAX_EXIT_IO_OUT
);
420 static int hax_vcpu_interrupt(CPUArchState
*env
)
422 CPUState
*cpu
= ENV_GET_CPU(env
);
423 struct hax_vcpu_state
*vcpu
= cpu
->hax_vcpu
;
424 struct hax_tunnel
*ht
= vcpu
->tunnel
;
427 * Try to inject an interrupt if the guest can accept it
428 * Unlike KVM, HAX kernel check for the eflags, instead of qemu
430 if (ht
->ready_for_interrupt_injection
&&
431 (cpu
->interrupt_request
& CPU_INTERRUPT_HARD
)) {
434 irq
= cpu_get_pic_interrupt(env
);
436 hax_inject_interrupt(env
, irq
);
437 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
441 /* If we have an interrupt but the guest is not ready to receive an
442 * interrupt, request an interrupt window exit. This will
443 * cause a return to userspace as soon as the guest is ready to
444 * receive interrupts. */
445 if ((cpu
->interrupt_request
& CPU_INTERRUPT_HARD
)) {
446 ht
->request_interrupt_window
= 1;
448 ht
->request_interrupt_window
= 0;
453 void hax_raise_event(CPUState
*cpu
)
455 struct hax_vcpu_state
*vcpu
= cpu
->hax_vcpu
;
460 vcpu
->tunnel
->user_event_pending
= 1;
464 * Ask hax kernel module to run the CPU for us till:
465 * 1. Guest crash or shutdown
466 * 2. Need QEMU's emulation like guest execute MMIO instruction
467 * 3. Guest execute HLT
468 * 4. QEMU have Signal/event pending
469 * 5. An unknown VMX exit happens
471 static int hax_vcpu_hax_exec(CPUArchState
*env
)
474 CPUState
*cpu
= ENV_GET_CPU(env
);
475 X86CPU
*x86_cpu
= X86_CPU(cpu
);
476 struct hax_vcpu_state
*vcpu
= cpu
->hax_vcpu
;
477 struct hax_tunnel
*ht
= vcpu
->tunnel
;
479 if (!hax_enabled()) {
480 DPRINTF("Trying to vcpu execute at eip:" TARGET_FMT_lx
"\n", env
->eip
);
486 if (cpu
->interrupt_request
& CPU_INTERRUPT_POLL
) {
487 cpu
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
488 apic_poll_irq(x86_cpu
->apic_state
);
491 if (cpu
->interrupt_request
& CPU_INTERRUPT_INIT
) {
492 DPRINTF("\nhax_vcpu_hax_exec: handling INIT for %d\n",
494 do_cpu_init(x86_cpu
);
495 hax_vcpu_sync_state(env
, 1);
498 if (cpu
->interrupt_request
& CPU_INTERRUPT_SIPI
) {
499 DPRINTF("hax_vcpu_hax_exec: handling SIPI for %d\n",
501 hax_vcpu_sync_state(env
, 0);
502 do_cpu_sipi(x86_cpu
);
503 hax_vcpu_sync_state(env
, 1);
509 if (cpu
->exit_request
) {
514 hax_vcpu_interrupt(env
);
516 qemu_mutex_unlock_iothread();
517 hax_ret
= hax_vcpu_run(vcpu
);
518 qemu_mutex_lock_iothread();
521 /* Simply continue the vcpu_run if system call interrupted */
522 if (hax_ret
== -EINTR
|| hax_ret
== -EAGAIN
) {
523 DPRINTF("io window interrupted\n");
528 fprintf(stderr
, "vcpu run failed for vcpu %x\n", vcpu
->vcpu_id
);
531 switch (ht
->_exit_status
) {
533 ret
= hax_handle_io(env
, ht
->pio
._df
, ht
->pio
._port
,
535 ht
->pio
._size
, ht
->pio
._count
, vcpu
->iobuf
);
537 case HAX_EXIT_FAST_MMIO
:
538 ret
= hax_handle_fastmmio(env
, (struct hax_fastmmio
*) vcpu
->iobuf
);
540 /* Guest state changed, currently only for shutdown */
541 case HAX_EXIT_STATECHANGE
:
542 fprintf(stdout
, "VCPU shutdown request\n");
543 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN
);
544 hax_vcpu_sync_state(env
, 0);
547 case HAX_EXIT_UNKNOWN_VMEXIT
:
548 fprintf(stderr
, "Unknown VMX exit %x from guest\n",
550 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
551 hax_vcpu_sync_state(env
, 0);
552 cpu_dump_state(cpu
, stderr
, fprintf
, 0);
556 if (!(cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
557 !(cpu
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
558 /* hlt instruction with interrupt disabled is shutdown */
559 env
->eflags
|= IF_MASK
;
561 cpu
->exception_index
= EXCP_HLT
;
565 /* these situations will continue to hax module */
566 case HAX_EXIT_INTERRUPT
:
567 case HAX_EXIT_PAUSED
:
570 /* Should not happen on UG system */
571 fprintf(stderr
, "HAX: unsupported MMIO emulation\n");
575 /* Should not happen on UG system */
576 fprintf(stderr
, "HAX: unimplemented real mode emulation\n");
580 fprintf(stderr
, "Unknown exit %x from HAX\n", ht
->_exit_status
);
581 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
582 hax_vcpu_sync_state(env
, 0);
583 cpu_dump_state(cpu
, stderr
, fprintf
, 0);
589 if (cpu
->exit_request
) {
590 cpu
->exit_request
= 0;
591 cpu
->exception_index
= EXCP_INTERRUPT
;
596 static void do_hax_cpu_synchronize_state(CPUState
*cpu
, run_on_cpu_data arg
)
598 CPUArchState
*env
= cpu
->env_ptr
;
600 hax_arch_get_registers(env
);
601 cpu
->hax_vcpu_dirty
= true;
604 void hax_cpu_synchronize_state(CPUState
*cpu
)
606 if (!cpu
->hax_vcpu_dirty
) {
607 run_on_cpu(cpu
, do_hax_cpu_synchronize_state
, RUN_ON_CPU_NULL
);
611 static void do_hax_cpu_synchronize_post_reset(CPUState
*cpu
,
614 CPUArchState
*env
= cpu
->env_ptr
;
616 hax_vcpu_sync_state(env
, 1);
617 cpu
->hax_vcpu_dirty
= false;
620 void hax_cpu_synchronize_post_reset(CPUState
*cpu
)
622 run_on_cpu(cpu
, do_hax_cpu_synchronize_post_reset
, RUN_ON_CPU_NULL
);
625 static void do_hax_cpu_synchronize_post_init(CPUState
*cpu
, run_on_cpu_data arg
)
627 CPUArchState
*env
= cpu
->env_ptr
;
629 hax_vcpu_sync_state(env
, 1);
630 cpu
->hax_vcpu_dirty
= false;
633 void hax_cpu_synchronize_post_init(CPUState
*cpu
)
635 run_on_cpu(cpu
, do_hax_cpu_synchronize_post_init
, RUN_ON_CPU_NULL
);
638 static void do_hax_cpu_synchronize_pre_loadvm(CPUState
*cpu
, run_on_cpu_data arg
)
640 cpu
->hax_vcpu_dirty
= true;
643 void hax_cpu_synchronize_pre_loadvm(CPUState
*cpu
)
645 run_on_cpu(cpu
, do_hax_cpu_synchronize_pre_loadvm
, RUN_ON_CPU_NULL
);
648 int hax_smp_cpu_exec(CPUState
*cpu
)
650 CPUArchState
*env
= (CPUArchState
*) (cpu
->env_ptr
);
655 if (cpu
->exception_index
>= EXCP_INTERRUPT
) {
656 ret
= cpu
->exception_index
;
657 cpu
->exception_index
= -1;
661 fatal
= hax_vcpu_hax_exec(env
);
664 fprintf(stderr
, "Unsupported HAX vcpu return\n");
672 static void set_v8086_seg(struct segment_desc_t
*lhs
, const SegmentCache
*rhs
)
674 memset(lhs
, 0, sizeof(struct segment_desc_t
));
675 lhs
->selector
= rhs
->selector
;
676 lhs
->base
= rhs
->base
;
677 lhs
->limit
= rhs
->limit
;
681 lhs
->operand_size
= 0;
684 lhs
->granularity
= 0;
688 static void get_seg(SegmentCache
*lhs
, const struct segment_desc_t
*rhs
)
690 lhs
->selector
= rhs
->selector
;
691 lhs
->base
= rhs
->base
;
692 lhs
->limit
= rhs
->limit
;
693 lhs
->flags
= (rhs
->type
<< DESC_TYPE_SHIFT
)
694 | (rhs
->present
* DESC_P_MASK
)
695 | (rhs
->dpl
<< DESC_DPL_SHIFT
)
696 | (rhs
->operand_size
<< DESC_B_SHIFT
)
697 | (rhs
->desc
* DESC_S_MASK
)
698 | (rhs
->long_mode
<< DESC_L_SHIFT
)
699 | (rhs
->granularity
* DESC_G_MASK
) | (rhs
->available
* DESC_AVL_MASK
);
702 static void set_seg(struct segment_desc_t
*lhs
, const SegmentCache
*rhs
)
704 unsigned flags
= rhs
->flags
;
706 memset(lhs
, 0, sizeof(struct segment_desc_t
));
707 lhs
->selector
= rhs
->selector
;
708 lhs
->base
= rhs
->base
;
709 lhs
->limit
= rhs
->limit
;
710 lhs
->type
= (flags
>> DESC_TYPE_SHIFT
) & 15;
711 lhs
->present
= (flags
& DESC_P_MASK
) != 0;
712 lhs
->dpl
= rhs
->selector
& 3;
713 lhs
->operand_size
= (flags
>> DESC_B_SHIFT
) & 1;
714 lhs
->desc
= (flags
& DESC_S_MASK
) != 0;
715 lhs
->long_mode
= (flags
>> DESC_L_SHIFT
) & 1;
716 lhs
->granularity
= (flags
& DESC_G_MASK
) != 0;
717 lhs
->available
= (flags
& DESC_AVL_MASK
) != 0;
720 static void hax_getput_reg(uint64_t *hax_reg
, target_ulong
*qemu_reg
, int set
)
722 target_ulong reg
= *hax_reg
;
725 *hax_reg
= *qemu_reg
;
731 /* The sregs has been synced with HAX kernel already before this call */
732 static int hax_get_segments(CPUArchState
*env
, struct vcpu_state_t
*sregs
)
734 get_seg(&env
->segs
[R_CS
], &sregs
->_cs
);
735 get_seg(&env
->segs
[R_DS
], &sregs
->_ds
);
736 get_seg(&env
->segs
[R_ES
], &sregs
->_es
);
737 get_seg(&env
->segs
[R_FS
], &sregs
->_fs
);
738 get_seg(&env
->segs
[R_GS
], &sregs
->_gs
);
739 get_seg(&env
->segs
[R_SS
], &sregs
->_ss
);
741 get_seg(&env
->tr
, &sregs
->_tr
);
742 get_seg(&env
->ldt
, &sregs
->_ldt
);
743 env
->idt
.limit
= sregs
->_idt
.limit
;
744 env
->idt
.base
= sregs
->_idt
.base
;
745 env
->gdt
.limit
= sregs
->_gdt
.limit
;
746 env
->gdt
.base
= sregs
->_gdt
.base
;
750 static int hax_set_segments(CPUArchState
*env
, struct vcpu_state_t
*sregs
)
752 if ((env
->eflags
& VM_MASK
)) {
753 set_v8086_seg(&sregs
->_cs
, &env
->segs
[R_CS
]);
754 set_v8086_seg(&sregs
->_ds
, &env
->segs
[R_DS
]);
755 set_v8086_seg(&sregs
->_es
, &env
->segs
[R_ES
]);
756 set_v8086_seg(&sregs
->_fs
, &env
->segs
[R_FS
]);
757 set_v8086_seg(&sregs
->_gs
, &env
->segs
[R_GS
]);
758 set_v8086_seg(&sregs
->_ss
, &env
->segs
[R_SS
]);
760 set_seg(&sregs
->_cs
, &env
->segs
[R_CS
]);
761 set_seg(&sregs
->_ds
, &env
->segs
[R_DS
]);
762 set_seg(&sregs
->_es
, &env
->segs
[R_ES
]);
763 set_seg(&sregs
->_fs
, &env
->segs
[R_FS
]);
764 set_seg(&sregs
->_gs
, &env
->segs
[R_GS
]);
765 set_seg(&sregs
->_ss
, &env
->segs
[R_SS
]);
767 if (env
->cr
[0] & CR0_PE_MASK
) {
768 /* force ss cpl to cs cpl */
769 sregs
->_ss
.selector
= (sregs
->_ss
.selector
& ~3) |
770 (sregs
->_cs
.selector
& 3);
771 sregs
->_ss
.dpl
= sregs
->_ss
.selector
& 3;
775 set_seg(&sregs
->_tr
, &env
->tr
);
776 set_seg(&sregs
->_ldt
, &env
->ldt
);
777 sregs
->_idt
.limit
= env
->idt
.limit
;
778 sregs
->_idt
.base
= env
->idt
.base
;
779 sregs
->_gdt
.limit
= env
->gdt
.limit
;
780 sregs
->_gdt
.base
= env
->gdt
.base
;
785 * After get the state from the kernel module, some
786 * qemu emulator state need be updated also
788 static int hax_setup_qemu_emulator(CPUArchState
*env
)
791 #define HFLAG_COPY_MASK (~( \
792 HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
793 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
794 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
795 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK))
799 hflags
= (env
->segs
[R_CS
].flags
>> DESC_DPL_SHIFT
) & HF_CPL_MASK
;
800 hflags
|= (env
->cr
[0] & CR0_PE_MASK
) << (HF_PE_SHIFT
- CR0_PE_SHIFT
);
801 hflags
|= (env
->cr
[0] << (HF_MP_SHIFT
- CR0_MP_SHIFT
)) &
802 (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
);
803 hflags
|= (env
->eflags
& (HF_TF_MASK
| HF_VM_MASK
| HF_IOPL_MASK
));
804 hflags
|= (env
->cr
[4] & CR4_OSFXSR_MASK
) <<
805 (HF_OSFXSR_SHIFT
- CR4_OSFXSR_SHIFT
);
807 if (env
->efer
& MSR_EFER_LMA
) {
808 hflags
|= HF_LMA_MASK
;
811 if ((hflags
& HF_LMA_MASK
) && (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
812 hflags
|= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
814 hflags
|= (env
->segs
[R_CS
].flags
& DESC_B_MASK
) >>
815 (DESC_B_SHIFT
- HF_CS32_SHIFT
);
816 hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
) >>
817 (DESC_B_SHIFT
- HF_SS32_SHIFT
);
818 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
819 (env
->eflags
& VM_MASK
) || !(hflags
& HF_CS32_MASK
)) {
820 hflags
|= HF_ADDSEG_MASK
;
822 hflags
|= ((env
->segs
[R_DS
].base
|
823 env
->segs
[R_ES
].base
|
824 env
->segs
[R_SS
].base
) != 0) << HF_ADDSEG_SHIFT
;
828 hflags
&= ~HF_SMM_MASK
;
830 env
->hflags
= (env
->hflags
& HFLAG_COPY_MASK
) | hflags
;
834 static int hax_sync_vcpu_register(CPUArchState
*env
, int set
)
836 struct vcpu_state_t regs
;
838 memset(®s
, 0, sizeof(struct vcpu_state_t
));
841 ret
= hax_sync_vcpu_state(env
, ®s
, 0);
847 /* generic register */
848 hax_getput_reg(®s
._rax
, &env
->regs
[R_EAX
], set
);
849 hax_getput_reg(®s
._rbx
, &env
->regs
[R_EBX
], set
);
850 hax_getput_reg(®s
._rcx
, &env
->regs
[R_ECX
], set
);
851 hax_getput_reg(®s
._rdx
, &env
->regs
[R_EDX
], set
);
852 hax_getput_reg(®s
._rsi
, &env
->regs
[R_ESI
], set
);
853 hax_getput_reg(®s
._rdi
, &env
->regs
[R_EDI
], set
);
854 hax_getput_reg(®s
._rsp
, &env
->regs
[R_ESP
], set
);
855 hax_getput_reg(®s
._rbp
, &env
->regs
[R_EBP
], set
);
857 hax_getput_reg(®s
._r8
, &env
->regs
[8], set
);
858 hax_getput_reg(®s
._r9
, &env
->regs
[9], set
);
859 hax_getput_reg(®s
._r10
, &env
->regs
[10], set
);
860 hax_getput_reg(®s
._r11
, &env
->regs
[11], set
);
861 hax_getput_reg(®s
._r12
, &env
->regs
[12], set
);
862 hax_getput_reg(®s
._r13
, &env
->regs
[13], set
);
863 hax_getput_reg(®s
._r14
, &env
->regs
[14], set
);
864 hax_getput_reg(®s
._r15
, &env
->regs
[15], set
);
866 hax_getput_reg(®s
._rflags
, &env
->eflags
, set
);
867 hax_getput_reg(®s
._rip
, &env
->eip
, set
);
870 regs
._cr0
= env
->cr
[0];
871 regs
._cr2
= env
->cr
[2];
872 regs
._cr3
= env
->cr
[3];
873 regs
._cr4
= env
->cr
[4];
874 hax_set_segments(env
, ®s
);
876 env
->cr
[0] = regs
._cr0
;
877 env
->cr
[2] = regs
._cr2
;
878 env
->cr
[3] = regs
._cr3
;
879 env
->cr
[4] = regs
._cr4
;
880 hax_get_segments(env
, ®s
);
884 ret
= hax_sync_vcpu_state(env
, ®s
, 1);
890 hax_setup_qemu_emulator(env
);
895 static void hax_msr_entry_set(struct vmx_msr
*item
, uint32_t index
,
902 static int hax_get_msrs(CPUArchState
*env
)
904 struct hax_msr_data md
;
905 struct vmx_msr
*msrs
= md
.entries
;
909 msrs
[n
++].entry
= MSR_IA32_SYSENTER_CS
;
910 msrs
[n
++].entry
= MSR_IA32_SYSENTER_ESP
;
911 msrs
[n
++].entry
= MSR_IA32_SYSENTER_EIP
;
912 msrs
[n
++].entry
= MSR_IA32_TSC
;
914 msrs
[n
++].entry
= MSR_EFER
;
915 msrs
[n
++].entry
= MSR_STAR
;
916 msrs
[n
++].entry
= MSR_LSTAR
;
917 msrs
[n
++].entry
= MSR_CSTAR
;
918 msrs
[n
++].entry
= MSR_FMASK
;
919 msrs
[n
++].entry
= MSR_KERNELGSBASE
;
922 ret
= hax_sync_msr(env
, &md
, 0);
927 for (i
= 0; i
< md
.done
; i
++) {
928 switch (msrs
[i
].entry
) {
929 case MSR_IA32_SYSENTER_CS
:
930 env
->sysenter_cs
= msrs
[i
].value
;
932 case MSR_IA32_SYSENTER_ESP
:
933 env
->sysenter_esp
= msrs
[i
].value
;
935 case MSR_IA32_SYSENTER_EIP
:
936 env
->sysenter_eip
= msrs
[i
].value
;
939 env
->tsc
= msrs
[i
].value
;
943 env
->efer
= msrs
[i
].value
;
946 env
->star
= msrs
[i
].value
;
949 env
->lstar
= msrs
[i
].value
;
952 env
->cstar
= msrs
[i
].value
;
955 env
->fmask
= msrs
[i
].value
;
957 case MSR_KERNELGSBASE
:
958 env
->kernelgsbase
= msrs
[i
].value
;
967 static int hax_set_msrs(CPUArchState
*env
)
969 struct hax_msr_data md
;
970 struct vmx_msr
*msrs
;
974 memset(&md
, 0, sizeof(struct hax_msr_data
));
975 hax_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_CS
, env
->sysenter_cs
);
976 hax_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_ESP
, env
->sysenter_esp
);
977 hax_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_EIP
, env
->sysenter_eip
);
978 hax_msr_entry_set(&msrs
[n
++], MSR_IA32_TSC
, env
->tsc
);
980 hax_msr_entry_set(&msrs
[n
++], MSR_EFER
, env
->efer
);
981 hax_msr_entry_set(&msrs
[n
++], MSR_STAR
, env
->star
);
982 hax_msr_entry_set(&msrs
[n
++], MSR_LSTAR
, env
->lstar
);
983 hax_msr_entry_set(&msrs
[n
++], MSR_CSTAR
, env
->cstar
);
984 hax_msr_entry_set(&msrs
[n
++], MSR_FMASK
, env
->fmask
);
985 hax_msr_entry_set(&msrs
[n
++], MSR_KERNELGSBASE
, env
->kernelgsbase
);
990 return hax_sync_msr(env
, &md
, 1);
993 static int hax_get_fpu(CPUArchState
*env
)
995 struct fx_layout fpu
;
998 ret
= hax_sync_fpu(env
, &fpu
, 0);
1003 env
->fpstt
= (fpu
.fsw
>> 11) & 7;
1004 env
->fpus
= fpu
.fsw
;
1005 env
->fpuc
= fpu
.fcw
;
1006 for (i
= 0; i
< 8; ++i
) {
1007 env
->fptags
[i
] = !((fpu
.ftw
>> i
) & 1);
1009 memcpy(env
->fpregs
, fpu
.st_mm
, sizeof(env
->fpregs
));
1011 for (i
= 0; i
< 8; i
++) {
1012 env
->xmm_regs
[i
].ZMM_Q(0) = ldq_p(&fpu
.mmx_1
[i
][0]);
1013 env
->xmm_regs
[i
].ZMM_Q(1) = ldq_p(&fpu
.mmx_1
[i
][8]);
1014 if (CPU_NB_REGS
> 8) {
1015 env
->xmm_regs
[i
+ 8].ZMM_Q(0) = ldq_p(&fpu
.mmx_2
[i
][0]);
1016 env
->xmm_regs
[i
+ 8].ZMM_Q(1) = ldq_p(&fpu
.mmx_2
[i
][8]);
1019 env
->mxcsr
= fpu
.mxcsr
;
1024 static int hax_set_fpu(CPUArchState
*env
)
1026 struct fx_layout fpu
;
1029 memset(&fpu
, 0, sizeof(fpu
));
1030 fpu
.fsw
= env
->fpus
& ~(7 << 11);
1031 fpu
.fsw
|= (env
->fpstt
& 7) << 11;
1032 fpu
.fcw
= env
->fpuc
;
1034 for (i
= 0; i
< 8; ++i
) {
1035 fpu
.ftw
|= (!env
->fptags
[i
]) << i
;
1038 memcpy(fpu
.st_mm
, env
->fpregs
, sizeof(env
->fpregs
));
1039 for (i
= 0; i
< 8; i
++) {
1040 stq_p(&fpu
.mmx_1
[i
][0], env
->xmm_regs
[i
].ZMM_Q(0));
1041 stq_p(&fpu
.mmx_1
[i
][8], env
->xmm_regs
[i
].ZMM_Q(1));
1042 if (CPU_NB_REGS
> 8) {
1043 stq_p(&fpu
.mmx_2
[i
][0], env
->xmm_regs
[i
+ 8].ZMM_Q(0));
1044 stq_p(&fpu
.mmx_2
[i
][8], env
->xmm_regs
[i
+ 8].ZMM_Q(1));
1048 fpu
.mxcsr
= env
->mxcsr
;
1050 return hax_sync_fpu(env
, &fpu
, 1);
1053 static int hax_arch_get_registers(CPUArchState
*env
)
1057 ret
= hax_sync_vcpu_register(env
, 0);
1062 ret
= hax_get_fpu(env
);
1067 ret
= hax_get_msrs(env
);
1075 static int hax_arch_set_registers(CPUArchState
*env
)
1078 ret
= hax_sync_vcpu_register(env
, 1);
1081 fprintf(stderr
, "Failed to sync vcpu reg\n");
1084 ret
= hax_set_fpu(env
);
1086 fprintf(stderr
, "FPU failed\n");
1089 ret
= hax_set_msrs(env
);
1091 fprintf(stderr
, "MSR failed\n");
1098 static void hax_vcpu_sync_state(CPUArchState
*env
, int modified
)
1100 if (hax_enabled()) {
1102 hax_arch_set_registers(env
);
1104 hax_arch_get_registers(env
);
1110 * much simpler than kvm, at least in first stage because:
1111 * We don't need consider the device pass-through, we don't need
1112 * consider the framebuffer, and we may even remove the bios at all
1114 int hax_sync_vcpus(void)
1116 if (hax_enabled()) {
1124 for (; cpu
!= NULL
; cpu
= CPU_NEXT(cpu
)) {
1127 ret
= hax_arch_set_registers(cpu
->env_ptr
);
1137 void hax_reset_vcpu_state(void *opaque
)
1140 for (cpu
= first_cpu
; cpu
!= NULL
; cpu
= CPU_NEXT(cpu
)) {
1141 cpu
->hax_vcpu
->tunnel
->user_event_pending
= 0;
1142 cpu
->hax_vcpu
->tunnel
->ready_for_interrupt_injection
= 0;
1146 static void hax_accel_class_init(ObjectClass
*oc
, void *data
)
1148 AccelClass
*ac
= ACCEL_CLASS(oc
);
1150 ac
->init_machine
= hax_accel_init
;
1151 ac
->allowed
= &hax_allowed
;
1154 static const TypeInfo hax_accel_type
= {
1155 .name
= ACCEL_CLASS_NAME("hax"),
1156 .parent
= TYPE_ACCEL
,
1157 .class_init
= hax_accel_class_init
,
1160 static void hax_type_init(void)
1162 type_register_static(&hax_accel_type
);
1165 type_init(hax_type_init
);