2 * Copyright 2008 IBM Corporation
4 * Copyright 2011 Intel Corporation
5 * Copyright 2016 Veertu, Inc.
6 * Copyright 2017 The Android Open Source Project
8 * QEMU Hypervisor.framework support
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, see <http://www.gnu.org/licenses/>.
22 * This file contain code under public domain from the hvdos project:
23 * https://github.com/mist64/hvdos
25 * Parts Copyright (c) 2011 NetApp, Inc.
26 * All rights reserved.
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
37 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
38 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
42 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
43 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
45 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
46 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 #include "qemu/osdep.h"
51 #include "qemu/error-report.h"
52 #include "qemu/main-loop.h"
53 #include "exec/address-spaces.h"
54 #include "exec/exec-all.h"
55 #include "sysemu/cpus.h"
56 #include "sysemu/hvf.h"
57 #include "sysemu/hvf_int.h"
58 #include "sysemu/runstate.h"
59 #include "qemu/guest-random.h"
65 hvf_slot
*hvf_find_overlap_slot(uint64_t start
, uint64_t size
)
69 for (x
= 0; x
< hvf_state
->num_slots
; ++x
) {
70 slot
= &hvf_state
->slots
[x
];
71 if (slot
->size
&& start
< (slot
->start
+ slot
->size
) &&
72 (start
+ size
) > slot
->start
) {
86 struct mac_slot mac_slots
[32];
88 static int do_hvf_set_memory(hvf_slot
*slot
, hv_memory_flags_t flags
)
90 struct mac_slot
*macslot
;
93 macslot
= &mac_slots
[slot
->slot_id
];
95 if (macslot
->present
) {
96 if (macslot
->size
!= slot
->size
) {
98 ret
= hv_vm_unmap(macslot
->gpa_start
, macslot
->size
);
107 macslot
->present
= 1;
108 macslot
->gpa_start
= slot
->start
;
109 macslot
->size
= slot
->size
;
110 ret
= hv_vm_map(slot
->mem
, slot
->start
, slot
->size
, flags
);
115 static void hvf_set_phys_mem(MemoryRegionSection
*section
, bool add
)
118 MemoryRegion
*area
= section
->mr
;
119 bool writeable
= !area
->readonly
&& !area
->rom_device
;
120 hv_memory_flags_t flags
;
122 if (!memory_region_is_ram(area
)) {
125 } else if (!memory_region_is_romd(area
)) {
127 * If the memory device is not in romd_mode, then we actually want
128 * to remove the hvf memory slot so all accesses will trap.
134 mem
= hvf_find_overlap_slot(
135 section
->offset_within_address_space
,
136 int128_get64(section
->size
));
139 if (mem
->size
== int128_get64(section
->size
) &&
140 mem
->start
== section
->offset_within_address_space
&&
141 mem
->mem
== (memory_region_get_ram_ptr(area
) +
142 section
->offset_within_region
)) {
143 return; /* Same region was attempted to register, go away. */
147 /* Region needs to be reset. set the size to 0 and remap it. */
150 if (do_hvf_set_memory(mem
, 0)) {
151 error_report("Failed to reset overlapping slot");
160 if (area
->readonly
||
161 (!memory_region_is_ram(area
) && memory_region_is_romd(area
))) {
162 flags
= HV_MEMORY_READ
| HV_MEMORY_EXEC
;
164 flags
= HV_MEMORY_READ
| HV_MEMORY_WRITE
| HV_MEMORY_EXEC
;
167 /* Now make a new slot. */
170 for (x
= 0; x
< hvf_state
->num_slots
; ++x
) {
171 mem
= &hvf_state
->slots
[x
];
177 if (x
== hvf_state
->num_slots
) {
178 error_report("No free slots");
182 mem
->size
= int128_get64(section
->size
);
183 mem
->mem
= memory_region_get_ram_ptr(area
) + section
->offset_within_region
;
184 mem
->start
= section
->offset_within_address_space
;
187 if (do_hvf_set_memory(mem
, flags
)) {
188 error_report("Error registering new memory slot");
193 static void do_hvf_cpu_synchronize_state(CPUState
*cpu
, run_on_cpu_data arg
)
195 if (!cpu
->vcpu_dirty
) {
196 hvf_get_registers(cpu
);
197 cpu
->vcpu_dirty
= true;
201 static void hvf_cpu_synchronize_state(CPUState
*cpu
)
203 if (!cpu
->vcpu_dirty
) {
204 run_on_cpu(cpu
, do_hvf_cpu_synchronize_state
, RUN_ON_CPU_NULL
);
208 static void do_hvf_cpu_synchronize_set_dirty(CPUState
*cpu
,
211 /* QEMU state is the reference, push it to HVF now and on next entry */
212 cpu
->vcpu_dirty
= true;
215 static void hvf_cpu_synchronize_post_reset(CPUState
*cpu
)
217 run_on_cpu(cpu
, do_hvf_cpu_synchronize_set_dirty
, RUN_ON_CPU_NULL
);
220 static void hvf_cpu_synchronize_post_init(CPUState
*cpu
)
222 run_on_cpu(cpu
, do_hvf_cpu_synchronize_set_dirty
, RUN_ON_CPU_NULL
);
225 static void hvf_cpu_synchronize_pre_loadvm(CPUState
*cpu
)
227 run_on_cpu(cpu
, do_hvf_cpu_synchronize_set_dirty
, RUN_ON_CPU_NULL
);
230 static void hvf_set_dirty_tracking(MemoryRegionSection
*section
, bool on
)
234 slot
= hvf_find_overlap_slot(
235 section
->offset_within_address_space
,
236 int128_get64(section
->size
));
238 /* protect region against writes; begin tracking it */
240 slot
->flags
|= HVF_SLOT_LOG
;
241 hv_vm_protect((uintptr_t)slot
->start
, (size_t)slot
->size
,
243 /* stop tracking region*/
245 slot
->flags
&= ~HVF_SLOT_LOG
;
246 hv_vm_protect((uintptr_t)slot
->start
, (size_t)slot
->size
,
247 HV_MEMORY_READ
| HV_MEMORY_WRITE
);
251 static void hvf_log_start(MemoryListener
*listener
,
252 MemoryRegionSection
*section
, int old
, int new)
258 hvf_set_dirty_tracking(section
, 1);
261 static void hvf_log_stop(MemoryListener
*listener
,
262 MemoryRegionSection
*section
, int old
, int new)
268 hvf_set_dirty_tracking(section
, 0);
271 static void hvf_log_sync(MemoryListener
*listener
,
272 MemoryRegionSection
*section
)
275 * sync of dirty pages is handled elsewhere; just make sure we keep
276 * tracking the region.
278 hvf_set_dirty_tracking(section
, 1);
281 static void hvf_region_add(MemoryListener
*listener
,
282 MemoryRegionSection
*section
)
284 hvf_set_phys_mem(section
, true);
287 static void hvf_region_del(MemoryListener
*listener
,
288 MemoryRegionSection
*section
)
290 hvf_set_phys_mem(section
, false);
293 static MemoryListener hvf_memory_listener
= {
295 .region_add
= hvf_region_add
,
296 .region_del
= hvf_region_del
,
297 .log_start
= hvf_log_start
,
298 .log_stop
= hvf_log_stop
,
299 .log_sync
= hvf_log_sync
,
302 static void dummy_signal(int sig
)
308 static int hvf_accel_init(MachineState
*ms
)
314 ret
= hv_vm_create(HV_VM_DEFAULT
);
317 s
= g_new0(HVFState
, 1);
320 for (x
= 0; x
< s
->num_slots
; ++x
) {
321 s
->slots
[x
].size
= 0;
322 s
->slots
[x
].slot_id
= x
;
326 memory_listener_register(&hvf_memory_listener
, &address_space_memory
);
330 static void hvf_accel_class_init(ObjectClass
*oc
, void *data
)
332 AccelClass
*ac
= ACCEL_CLASS(oc
);
334 ac
->init_machine
= hvf_accel_init
;
335 ac
->allowed
= &hvf_allowed
;
338 static const TypeInfo hvf_accel_type
= {
339 .name
= TYPE_HVF_ACCEL
,
340 .parent
= TYPE_ACCEL
,
341 .class_init
= hvf_accel_class_init
,
344 static void hvf_type_init(void)
346 type_register_static(&hvf_accel_type
);
349 type_init(hvf_type_init
);
351 static void hvf_vcpu_destroy(CPUState
*cpu
)
353 hv_return_t ret
= hv_vcpu_destroy(cpu
->hvf
->fd
);
356 hvf_arch_vcpu_destroy(cpu
);
361 static int hvf_init_vcpu(CPUState
*cpu
)
365 cpu
->hvf
= g_malloc0(sizeof(*cpu
->hvf
));
367 /* init cpu signals */
369 struct sigaction sigact
;
371 memset(&sigact
, 0, sizeof(sigact
));
372 sigact
.sa_handler
= dummy_signal
;
373 sigaction(SIG_IPI
, &sigact
, NULL
);
375 pthread_sigmask(SIG_BLOCK
, NULL
, &set
);
376 sigdelset(&set
, SIG_IPI
);
378 r
= hv_vcpu_create((hv_vcpuid_t
*)&cpu
->hvf
->fd
, HV_VCPU_DEFAULT
);
382 return hvf_arch_init_vcpu(cpu
);
386 * The HVF-specific vCPU thread function. This one should only run when the host
387 * CPU supports the VMX "unrestricted guest" feature.
389 static void *hvf_cpu_thread_fn(void *arg
)
395 assert(hvf_enabled());
397 rcu_register_thread();
399 qemu_mutex_lock_iothread();
400 qemu_thread_get_self(cpu
->thread
);
402 cpu
->thread_id
= qemu_get_thread_id();
408 /* signal CPU creation */
409 cpu_thread_signal_created(cpu
);
410 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
413 if (cpu_can_run(cpu
)) {
414 r
= hvf_vcpu_exec(cpu
);
415 if (r
== EXCP_DEBUG
) {
416 cpu_handle_guest_debug(cpu
);
419 qemu_wait_io_event(cpu
);
420 } while (!cpu
->unplug
|| cpu_can_run(cpu
));
422 hvf_vcpu_destroy(cpu
);
423 cpu_thread_signal_destroyed(cpu
);
424 qemu_mutex_unlock_iothread();
425 rcu_unregister_thread();
429 static void hvf_start_vcpu_thread(CPUState
*cpu
)
431 char thread_name
[VCPU_THREAD_NAME_SIZE
];
434 * HVF currently does not support TCG, and only runs in
435 * unrestricted-guest mode.
437 assert(hvf_enabled());
439 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
440 cpu
->halt_cond
= g_malloc0(sizeof(QemuCond
));
441 qemu_cond_init(cpu
->halt_cond
);
443 snprintf(thread_name
, VCPU_THREAD_NAME_SIZE
, "CPU %d/HVF",
445 qemu_thread_create(cpu
->thread
, thread_name
, hvf_cpu_thread_fn
,
446 cpu
, QEMU_THREAD_JOINABLE
);
449 static void hvf_accel_ops_class_init(ObjectClass
*oc
, void *data
)
451 AccelOpsClass
*ops
= ACCEL_OPS_CLASS(oc
);
453 ops
->create_vcpu_thread
= hvf_start_vcpu_thread
;
455 ops
->synchronize_post_reset
= hvf_cpu_synchronize_post_reset
;
456 ops
->synchronize_post_init
= hvf_cpu_synchronize_post_init
;
457 ops
->synchronize_state
= hvf_cpu_synchronize_state
;
458 ops
->synchronize_pre_loadvm
= hvf_cpu_synchronize_pre_loadvm
;
460 static const TypeInfo hvf_accel_ops_type
= {
461 .name
= ACCEL_OPS_NAME("hvf"),
463 .parent
= TYPE_ACCEL_OPS
,
464 .class_init
= hvf_accel_ops_class_init
,
467 static void hvf_accel_ops_register_types(void)
469 type_register_static(&hvf_accel_ops_type
);
471 type_init(hvf_accel_ops_register_types
);