4 * Copyright (c) 2011 Intel Corporation
6 * Jiang Yunhong<yunhong.jiang@intel.com>
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
13 /* HAX module interface - darwin version */
14 #include "qemu/osdep.h"
15 #include <sys/ioctl.h>
17 #include "sysemu/cpus.h"
20 hax_fd
hax_mod_open(void)
22 int fd
= open("/dev/HAX", O_RDWR
);
24 fprintf(stderr
, "Failed to open the hax module\n");
32 int hax_populate_ram(uint64_t va
, uint64_t size
)
36 if (!hax_global
.vm
|| !hax_global
.vm
->fd
) {
37 fprintf(stderr
, "Allocate memory before vm create?\n");
41 if (hax_global
.supports_64bit_ramblock
) {
42 struct hax_ramblock_info ramblock
= {
48 ret
= ioctl(hax_global
.vm
->fd
, HAX_VM_IOCTL_ADD_RAMBLOCK
, &ramblock
);
50 struct hax_alloc_ram_info info
= {
51 .size
= (uint32_t)size
,
56 ret
= ioctl(hax_global
.vm
->fd
, HAX_VM_IOCTL_ALLOC_RAM
, &info
);
59 fprintf(stderr
, "Failed to register RAM block: ret=%d, va=0x%" PRIx64
60 ", size=0x%" PRIx64
", method=%s\n", ret
, va
, size
,
61 hax_global
.supports_64bit_ramblock
? "new" : "legacy");
67 int hax_set_ram(uint64_t start_pa
, uint32_t size
, uint64_t host_va
, int flags
)
69 struct hax_set_ram_info info
;
72 info
.pa_start
= start_pa
;
75 info
.flags
= (uint8_t) flags
;
77 ret
= ioctl(hax_global
.vm
->fd
, HAX_VM_IOCTL_SET_RAM
, &info
);
84 int hax_capability(struct hax_state
*hax
, struct hax_capabilityinfo
*cap
)
88 ret
= ioctl(hax
->fd
, HAX_IOCTL_CAPABILITY
, cap
);
90 fprintf(stderr
, "Failed to get HAX capability\n");
97 int hax_mod_version(struct hax_state
*hax
, struct hax_module_version
*version
)
101 ret
= ioctl(hax
->fd
, HAX_IOCTL_VERSION
, version
);
103 fprintf(stderr
, "Failed to get HAX version\n");
110 static char *hax_vm_devfs_string(int vm_id
)
112 return g_strdup_printf("/dev/hax_vm/vm%02d", vm_id
);
115 static char *hax_vcpu_devfs_string(int vm_id
, int vcpu_id
)
117 return g_strdup_printf("/dev/hax_vm%02d/vcpu%02d", vm_id
, vcpu_id
);
120 int hax_host_create_vm(struct hax_state
*hax
, int *vmid
)
125 if (hax_invalid_fd(hax
->fd
)) {
133 ret
= ioctl(hax
->fd
, HAX_IOCTL_CREATE_VM
, &vm_id
);
138 hax_fd
hax_host_open_vm(struct hax_state
*hax
, int vm_id
)
141 char *vm_name
= NULL
;
143 vm_name
= hax_vm_devfs_string(vm_id
);
148 fd
= open(vm_name
, O_RDWR
);
151 qemu_set_cloexec(fd
);
156 int hax_notify_qemu_version(hax_fd vm_fd
, struct hax_qemu_version
*qversion
)
160 if (hax_invalid_fd(vm_fd
)) {
164 ret
= ioctl(vm_fd
, HAX_VM_IOCTL_NOTIFY_QEMU_VERSION
, qversion
);
167 fprintf(stderr
, "Failed to notify qemu API version\n");
173 /* Simply assume the size should be bigger than the hax_tunnel,
174 * since the hax_tunnel can be extended later with compatibility considered
176 int hax_host_create_vcpu(hax_fd vm_fd
, int vcpuid
)
180 ret
= ioctl(vm_fd
, HAX_VM_IOCTL_VCPU_CREATE
, &vcpuid
);
182 fprintf(stderr
, "Failed to create vcpu %x\n", vcpuid
);
188 hax_fd
hax_host_open_vcpu(int vmid
, int vcpuid
)
190 char *devfs_path
= NULL
;
193 devfs_path
= hax_vcpu_devfs_string(vmid
, vcpuid
);
195 fprintf(stderr
, "Failed to get the devfs\n");
199 fd
= open(devfs_path
, O_RDWR
);
202 fprintf(stderr
, "Failed to open the vcpu devfs\n");
204 qemu_set_cloexec(fd
);
208 int hax_host_setup_vcpu_channel(struct hax_vcpu_state
*vcpu
)
211 struct hax_tunnel_info info
;
213 ret
= ioctl(vcpu
->fd
, HAX_VCPU_IOCTL_SETUP_TUNNEL
, &info
);
215 fprintf(stderr
, "Failed to setup the hax tunnel\n");
219 if (!valid_hax_tunnel_size(info
.size
)) {
220 fprintf(stderr
, "Invalid hax tunnel size %x\n", info
.size
);
225 vcpu
->tunnel
= (struct hax_tunnel
*) (intptr_t) (info
.va
);
226 vcpu
->iobuf
= (unsigned char *) (intptr_t) (info
.io_va
);
230 int hax_vcpu_run(struct hax_vcpu_state
*vcpu
)
232 return ioctl(vcpu
->fd
, HAX_VCPU_IOCTL_RUN
, NULL
);
235 int hax_sync_fpu(CPUArchState
*env
, struct fx_layout
*fl
, int set
)
239 fd
= hax_vcpu_get_fd(env
);
245 ret
= ioctl(fd
, HAX_VCPU_IOCTL_SET_FPU
, fl
);
247 ret
= ioctl(fd
, HAX_VCPU_IOCTL_GET_FPU
, fl
);
252 int hax_sync_msr(CPUArchState
*env
, struct hax_msr_data
*msrs
, int set
)
256 fd
= hax_vcpu_get_fd(env
);
261 ret
= ioctl(fd
, HAX_VCPU_IOCTL_SET_MSRS
, msrs
);
263 ret
= ioctl(fd
, HAX_VCPU_IOCTL_GET_MSRS
, msrs
);
268 int hax_sync_vcpu_state(CPUArchState
*env
, struct vcpu_state_t
*state
, int set
)
272 fd
= hax_vcpu_get_fd(env
);
278 ret
= ioctl(fd
, HAX_VCPU_SET_REGS
, state
);
280 ret
= ioctl(fd
, HAX_VCPU_GET_REGS
, state
);
285 int hax_inject_interrupt(CPUArchState
*env
, int vector
)
289 fd
= hax_vcpu_get_fd(env
);
294 return ioctl(fd
, HAX_VCPU_IOCTL_INTERRUPT
, &vector
);
297 void hax_kick_vcpu_thread(CPUState
*cpu
)
300 * FIXME: race condition with the exit_request check in
303 cpu
->exit_request
= 1;
304 cpus_kick_thread(cpu
);