Merge remote-tracking branch 'remotes/maxreitz/tags/pull-block-2020-03-26' into staging
[qemu.git] / target / i386 / hax-posix.c
blob3bad89f133376bc054c0a0523591f13294e58678
1 /*
2 * QEMU HAXM support
4 * Copyright (c) 2011 Intel Corporation
5 * Written by:
6 * Jiang Yunhong<yunhong.jiang@intel.com>
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
13 /* HAX module interface - darwin version */
14 #include "qemu/osdep.h"
15 #include <sys/ioctl.h>
17 #include "target/i386/hax-i386.h"
19 hax_fd hax_mod_open(void)
21 int fd = open("/dev/HAX", O_RDWR);
22 if (fd == -1) {
23 fprintf(stderr, "Failed to open the hax module\n");
26 fcntl(fd, F_SETFD, FD_CLOEXEC);
28 return fd;
31 int hax_populate_ram(uint64_t va, uint64_t size)
33 int ret;
35 if (!hax_global.vm || !hax_global.vm->fd) {
36 fprintf(stderr, "Allocate memory before vm create?\n");
37 return -EINVAL;
40 if (hax_global.supports_64bit_ramblock) {
41 struct hax_ramblock_info ramblock = {
42 .start_va = va,
43 .size = size,
44 .reserved = 0
47 ret = ioctl(hax_global.vm->fd, HAX_VM_IOCTL_ADD_RAMBLOCK, &ramblock);
48 } else {
49 struct hax_alloc_ram_info info = {
50 .size = (uint32_t)size,
51 .pad = 0,
52 .va = va
55 ret = ioctl(hax_global.vm->fd, HAX_VM_IOCTL_ALLOC_RAM, &info);
57 if (ret < 0) {
58 fprintf(stderr, "Failed to register RAM block: ret=%d, va=0x%" PRIx64
59 ", size=0x%" PRIx64 ", method=%s\n", ret, va, size,
60 hax_global.supports_64bit_ramblock ? "new" : "legacy");
61 return ret;
63 return 0;
66 int hax_set_ram(uint64_t start_pa, uint32_t size, uint64_t host_va, int flags)
68 struct hax_set_ram_info info;
69 int ret;
71 info.pa_start = start_pa;
72 info.size = size;
73 info.va = host_va;
74 info.flags = (uint8_t) flags;
76 ret = ioctl(hax_global.vm->fd, HAX_VM_IOCTL_SET_RAM, &info);
77 if (ret < 0) {
78 return -errno;
80 return 0;
83 int hax_capability(struct hax_state *hax, struct hax_capabilityinfo *cap)
85 int ret;
87 ret = ioctl(hax->fd, HAX_IOCTL_CAPABILITY, cap);
88 if (ret == -1) {
89 fprintf(stderr, "Failed to get HAX capability\n");
90 return -errno;
93 return 0;
96 int hax_mod_version(struct hax_state *hax, struct hax_module_version *version)
98 int ret;
100 ret = ioctl(hax->fd, HAX_IOCTL_VERSION, version);
101 if (ret == -1) {
102 fprintf(stderr, "Failed to get HAX version\n");
103 return -errno;
106 return 0;
109 static char *hax_vm_devfs_string(int vm_id)
111 return g_strdup_printf("/dev/hax_vm/vm%02d", vm_id);
114 static char *hax_vcpu_devfs_string(int vm_id, int vcpu_id)
116 return g_strdup_printf("/dev/hax_vm%02d/vcpu%02d", vm_id, vcpu_id);
119 int hax_host_create_vm(struct hax_state *hax, int *vmid)
121 int ret;
122 int vm_id = 0;
124 if (hax_invalid_fd(hax->fd)) {
125 return -EINVAL;
128 if (hax->vm) {
129 return 0;
132 ret = ioctl(hax->fd, HAX_IOCTL_CREATE_VM, &vm_id);
133 *vmid = vm_id;
134 return ret;
137 hax_fd hax_host_open_vm(struct hax_state *hax, int vm_id)
139 hax_fd fd;
140 char *vm_name = NULL;
142 vm_name = hax_vm_devfs_string(vm_id);
143 if (!vm_name) {
144 return -1;
147 fd = open(vm_name, O_RDWR);
148 g_free(vm_name);
150 fcntl(fd, F_SETFD, FD_CLOEXEC);
152 return fd;
155 int hax_notify_qemu_version(hax_fd vm_fd, struct hax_qemu_version *qversion)
157 int ret;
159 if (hax_invalid_fd(vm_fd)) {
160 return -EINVAL;
163 ret = ioctl(vm_fd, HAX_VM_IOCTL_NOTIFY_QEMU_VERSION, qversion);
165 if (ret < 0) {
166 fprintf(stderr, "Failed to notify qemu API version\n");
167 return ret;
169 return 0;
172 /* Simply assume the size should be bigger than the hax_tunnel,
173 * since the hax_tunnel can be extended later with compatibility considered
175 int hax_host_create_vcpu(hax_fd vm_fd, int vcpuid)
177 int ret;
179 ret = ioctl(vm_fd, HAX_VM_IOCTL_VCPU_CREATE, &vcpuid);
180 if (ret < 0) {
181 fprintf(stderr, "Failed to create vcpu %x\n", vcpuid);
184 return ret;
187 hax_fd hax_host_open_vcpu(int vmid, int vcpuid)
189 char *devfs_path = NULL;
190 hax_fd fd;
192 devfs_path = hax_vcpu_devfs_string(vmid, vcpuid);
193 if (!devfs_path) {
194 fprintf(stderr, "Failed to get the devfs\n");
195 return -EINVAL;
198 fd = open(devfs_path, O_RDWR);
199 g_free(devfs_path);
200 if (fd < 0) {
201 fprintf(stderr, "Failed to open the vcpu devfs\n");
203 fcntl(fd, F_SETFD, FD_CLOEXEC);
204 return fd;
207 int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu)
209 int ret;
210 struct hax_tunnel_info info;
212 ret = ioctl(vcpu->fd, HAX_VCPU_IOCTL_SETUP_TUNNEL, &info);
213 if (ret) {
214 fprintf(stderr, "Failed to setup the hax tunnel\n");
215 return ret;
218 if (!valid_hax_tunnel_size(info.size)) {
219 fprintf(stderr, "Invalid hax tunnel size %x\n", info.size);
220 ret = -EINVAL;
221 return ret;
224 vcpu->tunnel = (struct hax_tunnel *) (intptr_t) (info.va);
225 vcpu->iobuf = (unsigned char *) (intptr_t) (info.io_va);
226 return 0;
229 int hax_vcpu_run(struct hax_vcpu_state *vcpu)
231 return ioctl(vcpu->fd, HAX_VCPU_IOCTL_RUN, NULL);
234 int hax_sync_fpu(CPUArchState *env, struct fx_layout *fl, int set)
236 int ret, fd;
238 fd = hax_vcpu_get_fd(env);
239 if (fd <= 0) {
240 return -1;
243 if (set) {
244 ret = ioctl(fd, HAX_VCPU_IOCTL_SET_FPU, fl);
245 } else {
246 ret = ioctl(fd, HAX_VCPU_IOCTL_GET_FPU, fl);
248 return ret;
251 int hax_sync_msr(CPUArchState *env, struct hax_msr_data *msrs, int set)
253 int ret, fd;
255 fd = hax_vcpu_get_fd(env);
256 if (fd <= 0) {
257 return -1;
259 if (set) {
260 ret = ioctl(fd, HAX_VCPU_IOCTL_SET_MSRS, msrs);
261 } else {
262 ret = ioctl(fd, HAX_VCPU_IOCTL_GET_MSRS, msrs);
264 return ret;
267 int hax_sync_vcpu_state(CPUArchState *env, struct vcpu_state_t *state, int set)
269 int ret, fd;
271 fd = hax_vcpu_get_fd(env);
272 if (fd <= 0) {
273 return -1;
276 if (set) {
277 ret = ioctl(fd, HAX_VCPU_SET_REGS, state);
278 } else {
279 ret = ioctl(fd, HAX_VCPU_GET_REGS, state);
281 return ret;
284 int hax_inject_interrupt(CPUArchState *env, int vector)
286 int fd;
288 fd = hax_vcpu_get_fd(env);
289 if (fd <= 0) {
290 return -1;
293 return ioctl(fd, HAX_VCPU_IOCTL_INTERRUPT, &vector);