4 * Copyright (c) 2011 Intel Corporation
6 * Jiang Yunhong<yunhong.jiang@intel.com>
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
13 /* HAX module interface - darwin version */
14 #include "qemu/osdep.h"
15 #include <sys/ioctl.h>
17 #include "target/i386/hax-i386.h"
19 hax_fd
hax_mod_open(void)
21 int fd
= open("/dev/HAX", O_RDWR
);
23 fprintf(stderr
, "Failed to open the hax module\n");
26 fcntl(fd
, F_SETFD
, FD_CLOEXEC
);
31 int hax_populate_ram(uint64_t va
, uint64_t size
)
35 if (!hax_global
.vm
|| !hax_global
.vm
->fd
) {
36 fprintf(stderr
, "Allocate memory before vm create?\n");
40 if (hax_global
.supports_64bit_ramblock
) {
41 struct hax_ramblock_info ramblock
= {
47 ret
= ioctl(hax_global
.vm
->fd
, HAX_VM_IOCTL_ADD_RAMBLOCK
, &ramblock
);
49 struct hax_alloc_ram_info info
= {
50 .size
= (uint32_t)size
,
55 ret
= ioctl(hax_global
.vm
->fd
, HAX_VM_IOCTL_ALLOC_RAM
, &info
);
58 fprintf(stderr
, "Failed to register RAM block: ret=%d, va=0x%" PRIx64
59 ", size=0x%" PRIx64
", method=%s\n", ret
, va
, size
,
60 hax_global
.supports_64bit_ramblock
? "new" : "legacy");
66 int hax_set_ram(uint64_t start_pa
, uint32_t size
, uint64_t host_va
, int flags
)
68 struct hax_set_ram_info info
;
71 info
.pa_start
= start_pa
;
74 info
.flags
= (uint8_t) flags
;
76 ret
= ioctl(hax_global
.vm
->fd
, HAX_VM_IOCTL_SET_RAM
, &info
);
83 int hax_capability(struct hax_state
*hax
, struct hax_capabilityinfo
*cap
)
87 ret
= ioctl(hax
->fd
, HAX_IOCTL_CAPABILITY
, cap
);
89 fprintf(stderr
, "Failed to get HAX capability\n");
96 int hax_mod_version(struct hax_state
*hax
, struct hax_module_version
*version
)
100 ret
= ioctl(hax
->fd
, HAX_IOCTL_VERSION
, version
);
102 fprintf(stderr
, "Failed to get HAX version\n");
109 static char *hax_vm_devfs_string(int vm_id
)
111 return g_strdup_printf("/dev/hax_vm/vm%02d", vm_id
);
114 static char *hax_vcpu_devfs_string(int vm_id
, int vcpu_id
)
116 return g_strdup_printf("/dev/hax_vm%02d/vcpu%02d", vm_id
, vcpu_id
);
119 int hax_host_create_vm(struct hax_state
*hax
, int *vmid
)
124 if (hax_invalid_fd(hax
->fd
)) {
132 ret
= ioctl(hax
->fd
, HAX_IOCTL_CREATE_VM
, &vm_id
);
137 hax_fd
hax_host_open_vm(struct hax_state
*hax
, int vm_id
)
140 char *vm_name
= NULL
;
142 vm_name
= hax_vm_devfs_string(vm_id
);
147 fd
= open(vm_name
, O_RDWR
);
150 fcntl(fd
, F_SETFD
, FD_CLOEXEC
);
155 int hax_notify_qemu_version(hax_fd vm_fd
, struct hax_qemu_version
*qversion
)
159 if (hax_invalid_fd(vm_fd
)) {
163 ret
= ioctl(vm_fd
, HAX_VM_IOCTL_NOTIFY_QEMU_VERSION
, qversion
);
166 fprintf(stderr
, "Failed to notify qemu API version\n");
172 /* Simply assume the size should be bigger than the hax_tunnel,
173 * since the hax_tunnel can be extended later with compatibility considered
175 int hax_host_create_vcpu(hax_fd vm_fd
, int vcpuid
)
179 ret
= ioctl(vm_fd
, HAX_VM_IOCTL_VCPU_CREATE
, &vcpuid
);
181 fprintf(stderr
, "Failed to create vcpu %x\n", vcpuid
);
187 hax_fd
hax_host_open_vcpu(int vmid
, int vcpuid
)
189 char *devfs_path
= NULL
;
192 devfs_path
= hax_vcpu_devfs_string(vmid
, vcpuid
);
194 fprintf(stderr
, "Failed to get the devfs\n");
198 fd
= open(devfs_path
, O_RDWR
);
201 fprintf(stderr
, "Failed to open the vcpu devfs\n");
203 fcntl(fd
, F_SETFD
, FD_CLOEXEC
);
207 int hax_host_setup_vcpu_channel(struct hax_vcpu_state
*vcpu
)
210 struct hax_tunnel_info info
;
212 ret
= ioctl(vcpu
->fd
, HAX_VCPU_IOCTL_SETUP_TUNNEL
, &info
);
214 fprintf(stderr
, "Failed to setup the hax tunnel\n");
218 if (!valid_hax_tunnel_size(info
.size
)) {
219 fprintf(stderr
, "Invalid hax tunnel size %x\n", info
.size
);
224 vcpu
->tunnel
= (struct hax_tunnel
*) (intptr_t) (info
.va
);
225 vcpu
->iobuf
= (unsigned char *) (intptr_t) (info
.io_va
);
229 int hax_vcpu_run(struct hax_vcpu_state
*vcpu
)
231 return ioctl(vcpu
->fd
, HAX_VCPU_IOCTL_RUN
, NULL
);
234 int hax_sync_fpu(CPUArchState
*env
, struct fx_layout
*fl
, int set
)
238 fd
= hax_vcpu_get_fd(env
);
244 ret
= ioctl(fd
, HAX_VCPU_IOCTL_SET_FPU
, fl
);
246 ret
= ioctl(fd
, HAX_VCPU_IOCTL_GET_FPU
, fl
);
251 int hax_sync_msr(CPUArchState
*env
, struct hax_msr_data
*msrs
, int set
)
255 fd
= hax_vcpu_get_fd(env
);
260 ret
= ioctl(fd
, HAX_VCPU_IOCTL_SET_MSRS
, msrs
);
262 ret
= ioctl(fd
, HAX_VCPU_IOCTL_GET_MSRS
, msrs
);
267 int hax_sync_vcpu_state(CPUArchState
*env
, struct vcpu_state_t
*state
, int set
)
271 fd
= hax_vcpu_get_fd(env
);
277 ret
= ioctl(fd
, HAX_VCPU_SET_REGS
, state
);
279 ret
= ioctl(fd
, HAX_VCPU_GET_REGS
, state
);
284 int hax_inject_interrupt(CPUArchState
*env
, int vector
)
288 fd
= hax_vcpu_get_fd(env
);
293 return ioctl(fd
, HAX_VCPU_IOCTL_INTERRUPT
, &vector
);