spapr: Move DRC RTAS calls into spapr_drc.c
[qemu.git] / target / i386 / hax-all.c
blob097db5cae1cdf1037c5301df3474f97ebd905074
1 /*
2 * QEMU HAX support
4 * Copyright IBM, Corp. 2008
5 * Red Hat, Inc. 2008
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
11 * Copyright (c) 2011 Intel Corporation
12 * Written by:
13 * Jiang Yunhong<yunhong.jiang@intel.com>
14 * Xin Xiaohui<xiaohui.xin@intel.com>
15 * Zhang Xiantao<xiantao.zhang@intel.com>
17 * This work is licensed under the terms of the GNU GPL, version 2 or later.
18 * See the COPYING file in the top-level directory.
23 * HAX common code for both windows and darwin
26 #include "qemu/osdep.h"
27 #include "cpu.h"
28 #include "exec/address-spaces.h"
29 #include "exec/exec-all.h"
30 #include "exec/ioport.h"
32 #include "qemu-common.h"
33 #include "strings.h"
34 #include "hax-i386.h"
35 #include "sysemu/accel.h"
36 #include "sysemu/sysemu.h"
37 #include "qemu/main-loop.h"
38 #include "hw/boards.h"
40 #define DEBUG_HAX 0
42 #define DPRINTF(fmt, ...) \
43 do { \
44 if (DEBUG_HAX) { \
45 fprintf(stdout, fmt, ## __VA_ARGS__); \
46 } \
47 } while (0)
49 /* Current version */
50 const uint32_t hax_cur_version = 0x4; /* API v4: unmapping and MMIO moves */
51 /* Minimum HAX kernel version */
52 const uint32_t hax_min_version = 0x4; /* API v4: supports unmapping */
54 static bool hax_allowed;
56 struct hax_state hax_global;
58 static void hax_vcpu_sync_state(CPUArchState *env, int modified);
59 static int hax_arch_get_registers(CPUArchState *env);
61 int hax_enabled(void)
63 return hax_allowed;
66 int valid_hax_tunnel_size(uint16_t size)
68 return size >= sizeof(struct hax_tunnel);
71 hax_fd hax_vcpu_get_fd(CPUArchState *env)
73 struct hax_vcpu_state *vcpu = ENV_GET_CPU(env)->hax_vcpu;
74 if (!vcpu) {
75 return HAX_INVALID_FD;
77 return vcpu->fd;
80 static int hax_get_capability(struct hax_state *hax)
82 int ret;
83 struct hax_capabilityinfo capinfo, *cap = &capinfo;
85 ret = hax_capability(hax, cap);
86 if (ret) {
87 return ret;
90 if ((cap->wstatus & HAX_CAP_WORKSTATUS_MASK) == HAX_CAP_STATUS_NOTWORKING) {
91 if (cap->winfo & HAX_CAP_FAILREASON_VT) {
92 DPRINTF
93 ("VTX feature is not enabled, HAX driver will not work.\n");
94 } else if (cap->winfo & HAX_CAP_FAILREASON_NX) {
95 DPRINTF
96 ("NX feature is not enabled, HAX driver will not work.\n");
98 return -ENXIO;
102 if (!(cap->winfo & HAX_CAP_UG)) {
103 fprintf(stderr, "UG mode is not supported by the hardware.\n");
104 return -ENOTSUP;
107 if (cap->wstatus & HAX_CAP_MEMQUOTA) {
108 if (cap->mem_quota < hax->mem_quota) {
109 fprintf(stderr, "The VM memory needed exceeds the driver limit.\n");
110 return -ENOSPC;
113 return 0;
116 static int hax_version_support(struct hax_state *hax)
118 int ret;
119 struct hax_module_version version;
121 ret = hax_mod_version(hax, &version);
122 if (ret < 0) {
123 return 0;
126 if (hax_min_version > version.cur_version) {
127 fprintf(stderr, "Incompatible HAX module version %d,",
128 version.cur_version);
129 fprintf(stderr, "requires minimum version %d\n", hax_min_version);
130 return 0;
132 if (hax_cur_version < version.compat_version) {
133 fprintf(stderr, "Incompatible QEMU HAX API version %x,",
134 hax_cur_version);
135 fprintf(stderr, "requires minimum HAX API version %x\n",
136 version.compat_version);
137 return 0;
140 return 1;
143 int hax_vcpu_create(int id)
145 struct hax_vcpu_state *vcpu = NULL;
146 int ret;
148 if (!hax_global.vm) {
149 fprintf(stderr, "vcpu %x created failed, vm is null\n", id);
150 return -1;
153 if (hax_global.vm->vcpus[id]) {
154 fprintf(stderr, "vcpu %x allocated already\n", id);
155 return 0;
158 vcpu = g_malloc(sizeof(struct hax_vcpu_state));
159 if (!vcpu) {
160 fprintf(stderr, "Failed to alloc vcpu state\n");
161 return -ENOMEM;
164 memset(vcpu, 0, sizeof(struct hax_vcpu_state));
166 ret = hax_host_create_vcpu(hax_global.vm->fd, id);
167 if (ret) {
168 fprintf(stderr, "Failed to create vcpu %x\n", id);
169 goto error;
172 vcpu->vcpu_id = id;
173 vcpu->fd = hax_host_open_vcpu(hax_global.vm->id, id);
174 if (hax_invalid_fd(vcpu->fd)) {
175 fprintf(stderr, "Failed to open the vcpu\n");
176 ret = -ENODEV;
177 goto error;
180 hax_global.vm->vcpus[id] = vcpu;
182 ret = hax_host_setup_vcpu_channel(vcpu);
183 if (ret) {
184 fprintf(stderr, "Invalid hax tunnel size\n");
185 ret = -EINVAL;
186 goto error;
188 return 0;
190 error:
191 /* vcpu and tunnel will be closed automatically */
192 if (vcpu && !hax_invalid_fd(vcpu->fd)) {
193 hax_close_fd(vcpu->fd);
196 hax_global.vm->vcpus[id] = NULL;
197 g_free(vcpu);
198 return -1;
201 int hax_vcpu_destroy(CPUState *cpu)
203 struct hax_vcpu_state *vcpu = cpu->hax_vcpu;
205 if (!hax_global.vm) {
206 fprintf(stderr, "vcpu %x destroy failed, vm is null\n", vcpu->vcpu_id);
207 return -1;
210 if (!vcpu) {
211 return 0;
215 * 1. The hax_tunnel is also destroied when vcpu destroy
216 * 2. close fd will cause hax module vcpu be cleaned
218 hax_close_fd(vcpu->fd);
219 hax_global.vm->vcpus[vcpu->vcpu_id] = NULL;
220 g_free(vcpu);
221 return 0;
224 int hax_init_vcpu(CPUState *cpu)
226 int ret;
228 ret = hax_vcpu_create(cpu->cpu_index);
229 if (ret < 0) {
230 fprintf(stderr, "Failed to create HAX vcpu\n");
231 exit(-1);
234 cpu->hax_vcpu = hax_global.vm->vcpus[cpu->cpu_index];
235 cpu->hax_vcpu_dirty = true;
236 qemu_register_reset(hax_reset_vcpu_state, (CPUArchState *) (cpu->env_ptr));
238 return ret;
241 struct hax_vm *hax_vm_create(struct hax_state *hax)
243 struct hax_vm *vm;
244 int vm_id = 0, ret;
246 if (hax_invalid_fd(hax->fd)) {
247 return NULL;
250 if (hax->vm) {
251 return hax->vm;
254 vm = g_malloc(sizeof(struct hax_vm));
255 if (!vm) {
256 return NULL;
258 memset(vm, 0, sizeof(struct hax_vm));
259 ret = hax_host_create_vm(hax, &vm_id);
260 if (ret) {
261 fprintf(stderr, "Failed to create vm %x\n", ret);
262 goto error;
264 vm->id = vm_id;
265 vm->fd = hax_host_open_vm(hax, vm_id);
266 if (hax_invalid_fd(vm->fd)) {
267 fprintf(stderr, "Failed to open vm %d\n", vm_id);
268 goto error;
271 hax->vm = vm;
272 return vm;
274 error:
275 g_free(vm);
276 hax->vm = NULL;
277 return NULL;
280 int hax_vm_destroy(struct hax_vm *vm)
282 int i;
284 for (i = 0; i < HAX_MAX_VCPU; i++)
285 if (vm->vcpus[i]) {
286 fprintf(stderr, "VCPU should be cleaned before vm clean\n");
287 return -1;
289 hax_close_fd(vm->fd);
290 g_free(vm);
291 hax_global.vm = NULL;
292 return 0;
295 static void hax_handle_interrupt(CPUState *cpu, int mask)
297 cpu->interrupt_request |= mask;
299 if (!qemu_cpu_is_self(cpu)) {
300 qemu_cpu_kick(cpu);
304 static int hax_init(ram_addr_t ram_size)
306 struct hax_state *hax = NULL;
307 struct hax_qemu_version qversion;
308 int ret;
310 hax = &hax_global;
312 memset(hax, 0, sizeof(struct hax_state));
313 hax->mem_quota = ram_size;
315 hax->fd = hax_mod_open();
316 if (hax_invalid_fd(hax->fd)) {
317 hax->fd = 0;
318 ret = -ENODEV;
319 goto error;
322 ret = hax_get_capability(hax);
324 if (ret) {
325 if (ret != -ENOSPC) {
326 ret = -EINVAL;
328 goto error;
331 if (!hax_version_support(hax)) {
332 ret = -EINVAL;
333 goto error;
336 hax->vm = hax_vm_create(hax);
337 if (!hax->vm) {
338 fprintf(stderr, "Failed to create HAX VM\n");
339 ret = -EINVAL;
340 goto error;
343 hax_memory_init();
345 qversion.cur_version = hax_cur_version;
346 qversion.min_version = hax_min_version;
347 hax_notify_qemu_version(hax->vm->fd, &qversion);
348 cpu_interrupt_handler = hax_handle_interrupt;
350 return ret;
351 error:
352 if (hax->vm) {
353 hax_vm_destroy(hax->vm);
355 if (hax->fd) {
356 hax_mod_close(hax);
359 return ret;
362 static int hax_accel_init(MachineState *ms)
364 int ret = hax_init(ms->ram_size);
366 if (ret && (ret != -ENOSPC)) {
367 fprintf(stderr, "No accelerator found.\n");
368 } else {
369 fprintf(stdout, "HAX is %s and emulator runs in %s mode.\n",
370 !ret ? "working" : "not working",
371 !ret ? "fast virt" : "emulation");
373 return ret;
376 static int hax_handle_fastmmio(CPUArchState *env, struct hax_fastmmio *hft)
378 if (hft->direction < 2) {
379 cpu_physical_memory_rw(hft->gpa, (uint8_t *) &hft->value, hft->size,
380 hft->direction);
381 } else {
383 * HAX API v4 supports transferring data between two MMIO addresses,
384 * hft->gpa and hft->gpa2 (instructions such as MOVS require this):
385 * hft->direction == 2: gpa ==> gpa2
387 uint64_t value;
388 cpu_physical_memory_rw(hft->gpa, (uint8_t *) &value, hft->size, 0);
389 cpu_physical_memory_rw(hft->gpa2, (uint8_t *) &value, hft->size, 1);
392 return 0;
395 static int hax_handle_io(CPUArchState *env, uint32_t df, uint16_t port,
396 int direction, int size, int count, void *buffer)
398 uint8_t *ptr;
399 int i;
400 MemTxAttrs attrs = { 0 };
402 if (!df) {
403 ptr = (uint8_t *) buffer;
404 } else {
405 ptr = buffer + size * count - size;
407 for (i = 0; i < count; i++) {
408 address_space_rw(&address_space_io, port, attrs,
409 ptr, size, direction == HAX_EXIT_IO_OUT);
410 if (!df) {
411 ptr += size;
412 } else {
413 ptr -= size;
417 return 0;
420 static int hax_vcpu_interrupt(CPUArchState *env)
422 CPUState *cpu = ENV_GET_CPU(env);
423 struct hax_vcpu_state *vcpu = cpu->hax_vcpu;
424 struct hax_tunnel *ht = vcpu->tunnel;
427 * Try to inject an interrupt if the guest can accept it
428 * Unlike KVM, HAX kernel check for the eflags, instead of qemu
430 if (ht->ready_for_interrupt_injection &&
431 (cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
432 int irq;
434 irq = cpu_get_pic_interrupt(env);
435 if (irq >= 0) {
436 hax_inject_interrupt(env, irq);
437 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
441 /* If we have an interrupt but the guest is not ready to receive an
442 * interrupt, request an interrupt window exit. This will
443 * cause a return to userspace as soon as the guest is ready to
444 * receive interrupts. */
445 if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
446 ht->request_interrupt_window = 1;
447 } else {
448 ht->request_interrupt_window = 0;
450 return 0;
453 void hax_raise_event(CPUState *cpu)
455 struct hax_vcpu_state *vcpu = cpu->hax_vcpu;
457 if (!vcpu) {
458 return;
460 vcpu->tunnel->user_event_pending = 1;
464 * Ask hax kernel module to run the CPU for us till:
465 * 1. Guest crash or shutdown
466 * 2. Need QEMU's emulation like guest execute MMIO instruction
467 * 3. Guest execute HLT
468 * 4. QEMU have Signal/event pending
469 * 5. An unknown VMX exit happens
471 static int hax_vcpu_hax_exec(CPUArchState *env)
473 int ret = 0;
474 CPUState *cpu = ENV_GET_CPU(env);
475 X86CPU *x86_cpu = X86_CPU(cpu);
476 struct hax_vcpu_state *vcpu = cpu->hax_vcpu;
477 struct hax_tunnel *ht = vcpu->tunnel;
479 if (!hax_enabled()) {
480 DPRINTF("Trying to vcpu execute at eip:" TARGET_FMT_lx "\n", env->eip);
481 return 0;
484 cpu->halted = 0;
486 if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
487 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
488 apic_poll_irq(x86_cpu->apic_state);
491 if (cpu->interrupt_request & CPU_INTERRUPT_INIT) {
492 DPRINTF("\nhax_vcpu_hax_exec: handling INIT for %d\n",
493 cpu->cpu_index);
494 do_cpu_init(x86_cpu);
495 hax_vcpu_sync_state(env, 1);
498 if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
499 DPRINTF("hax_vcpu_hax_exec: handling SIPI for %d\n",
500 cpu->cpu_index);
501 hax_vcpu_sync_state(env, 0);
502 do_cpu_sipi(x86_cpu);
503 hax_vcpu_sync_state(env, 1);
506 do {
507 int hax_ret;
509 if (cpu->exit_request) {
510 ret = 1;
511 break;
514 hax_vcpu_interrupt(env);
516 qemu_mutex_unlock_iothread();
517 hax_ret = hax_vcpu_run(vcpu);
518 qemu_mutex_lock_iothread();
519 current_cpu = cpu;
521 /* Simply continue the vcpu_run if system call interrupted */
522 if (hax_ret == -EINTR || hax_ret == -EAGAIN) {
523 DPRINTF("io window interrupted\n");
524 continue;
527 if (hax_ret < 0) {
528 fprintf(stderr, "vcpu run failed for vcpu %x\n", vcpu->vcpu_id);
529 abort();
531 switch (ht->_exit_status) {
532 case HAX_EXIT_IO:
533 ret = hax_handle_io(env, ht->pio._df, ht->pio._port,
534 ht->pio._direction,
535 ht->pio._size, ht->pio._count, vcpu->iobuf);
536 break;
537 case HAX_EXIT_FAST_MMIO:
538 ret = hax_handle_fastmmio(env, (struct hax_fastmmio *) vcpu->iobuf);
539 break;
540 /* Guest state changed, currently only for shutdown */
541 case HAX_EXIT_STATECHANGE:
542 fprintf(stdout, "VCPU shutdown request\n");
543 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
544 hax_vcpu_sync_state(env, 0);
545 ret = 1;
546 break;
547 case HAX_EXIT_UNKNOWN_VMEXIT:
548 fprintf(stderr, "Unknown VMX exit %x from guest\n",
549 ht->_exit_reason);
550 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
551 hax_vcpu_sync_state(env, 0);
552 cpu_dump_state(cpu, stderr, fprintf, 0);
553 ret = -1;
554 break;
555 case HAX_EXIT_HLT:
556 if (!(cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
557 !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
558 /* hlt instruction with interrupt disabled is shutdown */
559 env->eflags |= IF_MASK;
560 cpu->halted = 1;
561 cpu->exception_index = EXCP_HLT;
562 ret = 1;
564 break;
565 /* these situations will continue to hax module */
566 case HAX_EXIT_INTERRUPT:
567 case HAX_EXIT_PAUSED:
568 break;
569 case HAX_EXIT_MMIO:
570 /* Should not happen on UG system */
571 fprintf(stderr, "HAX: unsupported MMIO emulation\n");
572 ret = -1;
573 break;
574 case HAX_EXIT_REAL:
575 /* Should not happen on UG system */
576 fprintf(stderr, "HAX: unimplemented real mode emulation\n");
577 ret = -1;
578 break;
579 default:
580 fprintf(stderr, "Unknown exit %x from HAX\n", ht->_exit_status);
581 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
582 hax_vcpu_sync_state(env, 0);
583 cpu_dump_state(cpu, stderr, fprintf, 0);
584 ret = 1;
585 break;
587 } while (!ret);
589 if (cpu->exit_request) {
590 cpu->exit_request = 0;
591 cpu->exception_index = EXCP_INTERRUPT;
593 return ret < 0;
596 static void do_hax_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
598 CPUArchState *env = cpu->env_ptr;
600 hax_arch_get_registers(env);
601 cpu->hax_vcpu_dirty = true;
604 void hax_cpu_synchronize_state(CPUState *cpu)
606 if (!cpu->hax_vcpu_dirty) {
607 run_on_cpu(cpu, do_hax_cpu_synchronize_state, RUN_ON_CPU_NULL);
611 static void do_hax_cpu_synchronize_post_reset(CPUState *cpu,
612 run_on_cpu_data arg)
614 CPUArchState *env = cpu->env_ptr;
616 hax_vcpu_sync_state(env, 1);
617 cpu->hax_vcpu_dirty = false;
620 void hax_cpu_synchronize_post_reset(CPUState *cpu)
622 run_on_cpu(cpu, do_hax_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
625 static void do_hax_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
627 CPUArchState *env = cpu->env_ptr;
629 hax_vcpu_sync_state(env, 1);
630 cpu->hax_vcpu_dirty = false;
633 void hax_cpu_synchronize_post_init(CPUState *cpu)
635 run_on_cpu(cpu, do_hax_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
638 static void do_hax_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
640 cpu->hax_vcpu_dirty = true;
643 void hax_cpu_synchronize_pre_loadvm(CPUState *cpu)
645 run_on_cpu(cpu, do_hax_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
648 int hax_smp_cpu_exec(CPUState *cpu)
650 CPUArchState *env = (CPUArchState *) (cpu->env_ptr);
651 int fatal;
652 int ret;
654 while (1) {
655 if (cpu->exception_index >= EXCP_INTERRUPT) {
656 ret = cpu->exception_index;
657 cpu->exception_index = -1;
658 break;
661 fatal = hax_vcpu_hax_exec(env);
663 if (fatal) {
664 fprintf(stderr, "Unsupported HAX vcpu return\n");
665 abort();
669 return ret;
672 static void set_v8086_seg(struct segment_desc_t *lhs, const SegmentCache *rhs)
674 memset(lhs, 0, sizeof(struct segment_desc_t));
675 lhs->selector = rhs->selector;
676 lhs->base = rhs->base;
677 lhs->limit = rhs->limit;
678 lhs->type = 3;
679 lhs->present = 1;
680 lhs->dpl = 3;
681 lhs->operand_size = 0;
682 lhs->desc = 1;
683 lhs->long_mode = 0;
684 lhs->granularity = 0;
685 lhs->available = 0;
688 static void get_seg(SegmentCache *lhs, const struct segment_desc_t *rhs)
690 lhs->selector = rhs->selector;
691 lhs->base = rhs->base;
692 lhs->limit = rhs->limit;
693 lhs->flags = (rhs->type << DESC_TYPE_SHIFT)
694 | (rhs->present * DESC_P_MASK)
695 | (rhs->dpl << DESC_DPL_SHIFT)
696 | (rhs->operand_size << DESC_B_SHIFT)
697 | (rhs->desc * DESC_S_MASK)
698 | (rhs->long_mode << DESC_L_SHIFT)
699 | (rhs->granularity * DESC_G_MASK) | (rhs->available * DESC_AVL_MASK);
702 static void set_seg(struct segment_desc_t *lhs, const SegmentCache *rhs)
704 unsigned flags = rhs->flags;
706 memset(lhs, 0, sizeof(struct segment_desc_t));
707 lhs->selector = rhs->selector;
708 lhs->base = rhs->base;
709 lhs->limit = rhs->limit;
710 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
711 lhs->present = (flags & DESC_P_MASK) != 0;
712 lhs->dpl = rhs->selector & 3;
713 lhs->operand_size = (flags >> DESC_B_SHIFT) & 1;
714 lhs->desc = (flags & DESC_S_MASK) != 0;
715 lhs->long_mode = (flags >> DESC_L_SHIFT) & 1;
716 lhs->granularity = (flags & DESC_G_MASK) != 0;
717 lhs->available = (flags & DESC_AVL_MASK) != 0;
720 static void hax_getput_reg(uint64_t *hax_reg, target_ulong *qemu_reg, int set)
722 target_ulong reg = *hax_reg;
724 if (set) {
725 *hax_reg = *qemu_reg;
726 } else {
727 *qemu_reg = reg;
731 /* The sregs has been synced with HAX kernel already before this call */
732 static int hax_get_segments(CPUArchState *env, struct vcpu_state_t *sregs)
734 get_seg(&env->segs[R_CS], &sregs->_cs);
735 get_seg(&env->segs[R_DS], &sregs->_ds);
736 get_seg(&env->segs[R_ES], &sregs->_es);
737 get_seg(&env->segs[R_FS], &sregs->_fs);
738 get_seg(&env->segs[R_GS], &sregs->_gs);
739 get_seg(&env->segs[R_SS], &sregs->_ss);
741 get_seg(&env->tr, &sregs->_tr);
742 get_seg(&env->ldt, &sregs->_ldt);
743 env->idt.limit = sregs->_idt.limit;
744 env->idt.base = sregs->_idt.base;
745 env->gdt.limit = sregs->_gdt.limit;
746 env->gdt.base = sregs->_gdt.base;
747 return 0;
750 static int hax_set_segments(CPUArchState *env, struct vcpu_state_t *sregs)
752 if ((env->eflags & VM_MASK)) {
753 set_v8086_seg(&sregs->_cs, &env->segs[R_CS]);
754 set_v8086_seg(&sregs->_ds, &env->segs[R_DS]);
755 set_v8086_seg(&sregs->_es, &env->segs[R_ES]);
756 set_v8086_seg(&sregs->_fs, &env->segs[R_FS]);
757 set_v8086_seg(&sregs->_gs, &env->segs[R_GS]);
758 set_v8086_seg(&sregs->_ss, &env->segs[R_SS]);
759 } else {
760 set_seg(&sregs->_cs, &env->segs[R_CS]);
761 set_seg(&sregs->_ds, &env->segs[R_DS]);
762 set_seg(&sregs->_es, &env->segs[R_ES]);
763 set_seg(&sregs->_fs, &env->segs[R_FS]);
764 set_seg(&sregs->_gs, &env->segs[R_GS]);
765 set_seg(&sregs->_ss, &env->segs[R_SS]);
767 if (env->cr[0] & CR0_PE_MASK) {
768 /* force ss cpl to cs cpl */
769 sregs->_ss.selector = (sregs->_ss.selector & ~3) |
770 (sregs->_cs.selector & 3);
771 sregs->_ss.dpl = sregs->_ss.selector & 3;
775 set_seg(&sregs->_tr, &env->tr);
776 set_seg(&sregs->_ldt, &env->ldt);
777 sregs->_idt.limit = env->idt.limit;
778 sregs->_idt.base = env->idt.base;
779 sregs->_gdt.limit = env->gdt.limit;
780 sregs->_gdt.base = env->gdt.base;
781 return 0;
785 * After get the state from the kernel module, some
786 * qemu emulator state need be updated also
788 static int hax_setup_qemu_emulator(CPUArchState *env)
791 #define HFLAG_COPY_MASK (~( \
792 HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
793 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
794 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
795 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK))
797 uint32_t hflags;
799 hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
800 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
801 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
802 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
803 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
804 hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
805 (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
807 if (env->efer & MSR_EFER_LMA) {
808 hflags |= HF_LMA_MASK;
811 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
812 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
813 } else {
814 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
815 (DESC_B_SHIFT - HF_CS32_SHIFT);
816 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
817 (DESC_B_SHIFT - HF_SS32_SHIFT);
818 if (!(env->cr[0] & CR0_PE_MASK) ||
819 (env->eflags & VM_MASK) || !(hflags & HF_CS32_MASK)) {
820 hflags |= HF_ADDSEG_MASK;
821 } else {
822 hflags |= ((env->segs[R_DS].base |
823 env->segs[R_ES].base |
824 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
828 hflags &= ~HF_SMM_MASK;
830 env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
831 return 0;
834 static int hax_sync_vcpu_register(CPUArchState *env, int set)
836 struct vcpu_state_t regs;
837 int ret;
838 memset(&regs, 0, sizeof(struct vcpu_state_t));
840 if (!set) {
841 ret = hax_sync_vcpu_state(env, &regs, 0);
842 if (ret < 0) {
843 return -1;
847 /* generic register */
848 hax_getput_reg(&regs._rax, &env->regs[R_EAX], set);
849 hax_getput_reg(&regs._rbx, &env->regs[R_EBX], set);
850 hax_getput_reg(&regs._rcx, &env->regs[R_ECX], set);
851 hax_getput_reg(&regs._rdx, &env->regs[R_EDX], set);
852 hax_getput_reg(&regs._rsi, &env->regs[R_ESI], set);
853 hax_getput_reg(&regs._rdi, &env->regs[R_EDI], set);
854 hax_getput_reg(&regs._rsp, &env->regs[R_ESP], set);
855 hax_getput_reg(&regs._rbp, &env->regs[R_EBP], set);
856 #ifdef TARGET_X86_64
857 hax_getput_reg(&regs._r8, &env->regs[8], set);
858 hax_getput_reg(&regs._r9, &env->regs[9], set);
859 hax_getput_reg(&regs._r10, &env->regs[10], set);
860 hax_getput_reg(&regs._r11, &env->regs[11], set);
861 hax_getput_reg(&regs._r12, &env->regs[12], set);
862 hax_getput_reg(&regs._r13, &env->regs[13], set);
863 hax_getput_reg(&regs._r14, &env->regs[14], set);
864 hax_getput_reg(&regs._r15, &env->regs[15], set);
865 #endif
866 hax_getput_reg(&regs._rflags, &env->eflags, set);
867 hax_getput_reg(&regs._rip, &env->eip, set);
869 if (set) {
870 regs._cr0 = env->cr[0];
871 regs._cr2 = env->cr[2];
872 regs._cr3 = env->cr[3];
873 regs._cr4 = env->cr[4];
874 hax_set_segments(env, &regs);
875 } else {
876 env->cr[0] = regs._cr0;
877 env->cr[2] = regs._cr2;
878 env->cr[3] = regs._cr3;
879 env->cr[4] = regs._cr4;
880 hax_get_segments(env, &regs);
883 if (set) {
884 ret = hax_sync_vcpu_state(env, &regs, 1);
885 if (ret < 0) {
886 return -1;
889 if (!set) {
890 hax_setup_qemu_emulator(env);
892 return 0;
895 static void hax_msr_entry_set(struct vmx_msr *item, uint32_t index,
896 uint64_t value)
898 item->entry = index;
899 item->value = value;
902 static int hax_get_msrs(CPUArchState *env)
904 struct hax_msr_data md;
905 struct vmx_msr *msrs = md.entries;
906 int ret, i, n;
908 n = 0;
909 msrs[n++].entry = MSR_IA32_SYSENTER_CS;
910 msrs[n++].entry = MSR_IA32_SYSENTER_ESP;
911 msrs[n++].entry = MSR_IA32_SYSENTER_EIP;
912 msrs[n++].entry = MSR_IA32_TSC;
913 #ifdef TARGET_X86_64
914 msrs[n++].entry = MSR_EFER;
915 msrs[n++].entry = MSR_STAR;
916 msrs[n++].entry = MSR_LSTAR;
917 msrs[n++].entry = MSR_CSTAR;
918 msrs[n++].entry = MSR_FMASK;
919 msrs[n++].entry = MSR_KERNELGSBASE;
920 #endif
921 md.nr_msr = n;
922 ret = hax_sync_msr(env, &md, 0);
923 if (ret < 0) {
924 return ret;
927 for (i = 0; i < md.done; i++) {
928 switch (msrs[i].entry) {
929 case MSR_IA32_SYSENTER_CS:
930 env->sysenter_cs = msrs[i].value;
931 break;
932 case MSR_IA32_SYSENTER_ESP:
933 env->sysenter_esp = msrs[i].value;
934 break;
935 case MSR_IA32_SYSENTER_EIP:
936 env->sysenter_eip = msrs[i].value;
937 break;
938 case MSR_IA32_TSC:
939 env->tsc = msrs[i].value;
940 break;
941 #ifdef TARGET_X86_64
942 case MSR_EFER:
943 env->efer = msrs[i].value;
944 break;
945 case MSR_STAR:
946 env->star = msrs[i].value;
947 break;
948 case MSR_LSTAR:
949 env->lstar = msrs[i].value;
950 break;
951 case MSR_CSTAR:
952 env->cstar = msrs[i].value;
953 break;
954 case MSR_FMASK:
955 env->fmask = msrs[i].value;
956 break;
957 case MSR_KERNELGSBASE:
958 env->kernelgsbase = msrs[i].value;
959 break;
960 #endif
964 return 0;
967 static int hax_set_msrs(CPUArchState *env)
969 struct hax_msr_data md;
970 struct vmx_msr *msrs;
971 msrs = md.entries;
972 int n = 0;
974 memset(&md, 0, sizeof(struct hax_msr_data));
975 hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
976 hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
977 hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
978 hax_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
979 #ifdef TARGET_X86_64
980 hax_msr_entry_set(&msrs[n++], MSR_EFER, env->efer);
981 hax_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
982 hax_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
983 hax_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
984 hax_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
985 hax_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
986 #endif
987 md.nr_msr = n;
988 md.done = 0;
990 return hax_sync_msr(env, &md, 1);
993 static int hax_get_fpu(CPUArchState *env)
995 struct fx_layout fpu;
996 int i, ret;
998 ret = hax_sync_fpu(env, &fpu, 0);
999 if (ret < 0) {
1000 return ret;
1003 env->fpstt = (fpu.fsw >> 11) & 7;
1004 env->fpus = fpu.fsw;
1005 env->fpuc = fpu.fcw;
1006 for (i = 0; i < 8; ++i) {
1007 env->fptags[i] = !((fpu.ftw >> i) & 1);
1009 memcpy(env->fpregs, fpu.st_mm, sizeof(env->fpregs));
1011 for (i = 0; i < 8; i++) {
1012 env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.mmx_1[i][0]);
1013 env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.mmx_1[i][8]);
1014 if (CPU_NB_REGS > 8) {
1015 env->xmm_regs[i + 8].ZMM_Q(0) = ldq_p(&fpu.mmx_2[i][0]);
1016 env->xmm_regs[i + 8].ZMM_Q(1) = ldq_p(&fpu.mmx_2[i][8]);
1019 env->mxcsr = fpu.mxcsr;
1021 return 0;
1024 static int hax_set_fpu(CPUArchState *env)
1026 struct fx_layout fpu;
1027 int i;
1029 memset(&fpu, 0, sizeof(fpu));
1030 fpu.fsw = env->fpus & ~(7 << 11);
1031 fpu.fsw |= (env->fpstt & 7) << 11;
1032 fpu.fcw = env->fpuc;
1034 for (i = 0; i < 8; ++i) {
1035 fpu.ftw |= (!env->fptags[i]) << i;
1038 memcpy(fpu.st_mm, env->fpregs, sizeof(env->fpregs));
1039 for (i = 0; i < 8; i++) {
1040 stq_p(&fpu.mmx_1[i][0], env->xmm_regs[i].ZMM_Q(0));
1041 stq_p(&fpu.mmx_1[i][8], env->xmm_regs[i].ZMM_Q(1));
1042 if (CPU_NB_REGS > 8) {
1043 stq_p(&fpu.mmx_2[i][0], env->xmm_regs[i + 8].ZMM_Q(0));
1044 stq_p(&fpu.mmx_2[i][8], env->xmm_regs[i + 8].ZMM_Q(1));
1048 fpu.mxcsr = env->mxcsr;
1050 return hax_sync_fpu(env, &fpu, 1);
1053 static int hax_arch_get_registers(CPUArchState *env)
1055 int ret;
1057 ret = hax_sync_vcpu_register(env, 0);
1058 if (ret < 0) {
1059 return ret;
1062 ret = hax_get_fpu(env);
1063 if (ret < 0) {
1064 return ret;
1067 ret = hax_get_msrs(env);
1068 if (ret < 0) {
1069 return ret;
1072 return 0;
1075 static int hax_arch_set_registers(CPUArchState *env)
1077 int ret;
1078 ret = hax_sync_vcpu_register(env, 1);
1080 if (ret < 0) {
1081 fprintf(stderr, "Failed to sync vcpu reg\n");
1082 return ret;
1084 ret = hax_set_fpu(env);
1085 if (ret < 0) {
1086 fprintf(stderr, "FPU failed\n");
1087 return ret;
1089 ret = hax_set_msrs(env);
1090 if (ret < 0) {
1091 fprintf(stderr, "MSR failed\n");
1092 return ret;
1095 return 0;
1098 static void hax_vcpu_sync_state(CPUArchState *env, int modified)
1100 if (hax_enabled()) {
1101 if (modified) {
1102 hax_arch_set_registers(env);
1103 } else {
1104 hax_arch_get_registers(env);
1110 * much simpler than kvm, at least in first stage because:
1111 * We don't need consider the device pass-through, we don't need
1112 * consider the framebuffer, and we may even remove the bios at all
1114 int hax_sync_vcpus(void)
1116 if (hax_enabled()) {
1117 CPUState *cpu;
1119 cpu = first_cpu;
1120 if (!cpu) {
1121 return 0;
1124 for (; cpu != NULL; cpu = CPU_NEXT(cpu)) {
1125 int ret;
1127 ret = hax_arch_set_registers(cpu->env_ptr);
1128 if (ret < 0) {
1129 return ret;
1134 return 0;
1137 void hax_reset_vcpu_state(void *opaque)
1139 CPUState *cpu;
1140 for (cpu = first_cpu; cpu != NULL; cpu = CPU_NEXT(cpu)) {
1141 cpu->hax_vcpu->tunnel->user_event_pending = 0;
1142 cpu->hax_vcpu->tunnel->ready_for_interrupt_injection = 0;
1146 static void hax_accel_class_init(ObjectClass *oc, void *data)
1148 AccelClass *ac = ACCEL_CLASS(oc);
1149 ac->name = "HAX";
1150 ac->init_machine = hax_accel_init;
1151 ac->allowed = &hax_allowed;
1154 static const TypeInfo hax_accel_type = {
1155 .name = ACCEL_CLASS_NAME("hax"),
1156 .parent = TYPE_ACCEL,
1157 .class_init = hax_accel_class_init,
1160 static void hax_type_init(void)
1162 type_register_static(&hax_accel_type);
1165 type_init(hax_type_init);