4 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include <sys/ioctl.h>
20 #include <linux/kvm.h>
22 #include "qemu/atomic.h"
23 #include "qemu/option.h"
24 #include "qemu/config-file.h"
25 #include "qemu/error-report.h"
26 #include "qapi/error.h"
27 #include "hw/pci/msi.h"
28 #include "hw/pci/msix.h"
29 #include "hw/s390x/adapter.h"
30 #include "exec/gdbstub.h"
31 #include "sysemu/kvm_int.h"
32 #include "sysemu/runstate.h"
33 #include "sysemu/cpus.h"
34 #include "sysemu/accel-blocker.h"
35 #include "qemu/bswap.h"
36 #include "exec/memory.h"
37 #include "exec/ram_addr.h"
38 #include "qemu/event_notifier.h"
39 #include "qemu/main-loop.h"
42 #include "qapi/visitor.h"
43 #include "qapi/qapi-types-common.h"
44 #include "qapi/qapi-visit-common.h"
45 #include "sysemu/reset.h"
46 #include "qemu/guest-random.h"
47 #include "sysemu/hw_accel.h"
49 #include "sysemu/dirtylimit.h"
50 #include "qemu/range.h"
52 #include "hw/boards.h"
53 #include "sysemu/stats.h"
55 /* This check must be after config-host.h is included */
57 #include <sys/eventfd.h>
60 /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
61 * need to use the real host PAGE_SIZE, as that's what KVM will use.
66 #define PAGE_SIZE qemu_real_host_page_size()
68 #ifndef KVM_GUESTDBG_BLOCKIRQ
69 #define KVM_GUESTDBG_BLOCKIRQ 0
72 struct KVMParkedVcpu
{
73 unsigned long vcpu_id
;
75 QLIST_ENTRY(KVMParkedVcpu
) node
;
79 bool kvm_kernel_irqchip
;
80 bool kvm_split_irqchip
;
81 bool kvm_async_interrupts_allowed
;
82 bool kvm_halt_in_kernel_allowed
;
83 bool kvm_resamplefds_allowed
;
84 bool kvm_msi_via_irqfd_allowed
;
85 bool kvm_gsi_routing_allowed
;
86 bool kvm_gsi_direct_mapping
;
88 bool kvm_readonly_mem_allowed
;
89 bool kvm_vm_attributes_allowed
;
90 bool kvm_msi_use_devid
;
91 static bool kvm_has_guest_debug
;
92 static int kvm_sstep_flags
;
93 static bool kvm_immediate_exit
;
94 static uint64_t kvm_supported_memory_attributes
;
95 static bool kvm_guest_memfd_supported
;
96 static hwaddr kvm_max_slot_size
= ~0;
98 static const KVMCapabilityInfo kvm_required_capabilites
[] = {
99 KVM_CAP_INFO(USER_MEMORY
),
100 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS
),
101 KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS
),
102 KVM_CAP_INFO(INTERNAL_ERROR_DATA
),
103 KVM_CAP_INFO(IOEVENTFD
),
104 KVM_CAP_INFO(IOEVENTFD_ANY_LENGTH
),
108 static NotifierList kvm_irqchip_change_notifiers
=
109 NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers
);
111 struct KVMResampleFd
{
113 EventNotifier
*resample_event
;
114 QLIST_ENTRY(KVMResampleFd
) node
;
116 typedef struct KVMResampleFd KVMResampleFd
;
119 * Only used with split irqchip where we need to do the resample fd
120 * kick for the kernel from userspace.
122 static QLIST_HEAD(, KVMResampleFd
) kvm_resample_fd_list
=
123 QLIST_HEAD_INITIALIZER(kvm_resample_fd_list
);
125 static QemuMutex kml_slots_lock
;
127 #define kvm_slots_lock() qemu_mutex_lock(&kml_slots_lock)
128 #define kvm_slots_unlock() qemu_mutex_unlock(&kml_slots_lock)
130 static void kvm_slot_init_dirty_bitmap(KVMSlot
*mem
);
132 static inline void kvm_resample_fd_remove(int gsi
)
136 QLIST_FOREACH(rfd
, &kvm_resample_fd_list
, node
) {
137 if (rfd
->gsi
== gsi
) {
138 QLIST_REMOVE(rfd
, node
);
145 static inline void kvm_resample_fd_insert(int gsi
, EventNotifier
*event
)
147 KVMResampleFd
*rfd
= g_new0(KVMResampleFd
, 1);
150 rfd
->resample_event
= event
;
152 QLIST_INSERT_HEAD(&kvm_resample_fd_list
, rfd
, node
);
155 void kvm_resample_fd_notify(int gsi
)
159 QLIST_FOREACH(rfd
, &kvm_resample_fd_list
, node
) {
160 if (rfd
->gsi
== gsi
) {
161 event_notifier_set(rfd
->resample_event
);
162 trace_kvm_resample_fd_notify(gsi
);
168 unsigned int kvm_get_max_memslots(void)
170 KVMState
*s
= KVM_STATE(current_accel());
175 unsigned int kvm_get_free_memslots(void)
177 unsigned int used_slots
= 0;
178 KVMState
*s
= kvm_state
;
182 for (i
= 0; i
< s
->nr_as
; i
++) {
186 used_slots
= MAX(used_slots
, s
->as
[i
].ml
->nr_used_slots
);
190 return s
->nr_slots
- used_slots
;
193 /* Called with KVMMemoryListener.slots_lock held */
194 static KVMSlot
*kvm_get_free_slot(KVMMemoryListener
*kml
)
196 KVMState
*s
= kvm_state
;
199 for (i
= 0; i
< s
->nr_slots
; i
++) {
200 if (kml
->slots
[i
].memory_size
== 0) {
201 return &kml
->slots
[i
];
208 /* Called with KVMMemoryListener.slots_lock held */
209 static KVMSlot
*kvm_alloc_slot(KVMMemoryListener
*kml
)
211 KVMSlot
*slot
= kvm_get_free_slot(kml
);
217 fprintf(stderr
, "%s: no free slot available\n", __func__
);
221 static KVMSlot
*kvm_lookup_matching_slot(KVMMemoryListener
*kml
,
225 KVMState
*s
= kvm_state
;
228 for (i
= 0; i
< s
->nr_slots
; i
++) {
229 KVMSlot
*mem
= &kml
->slots
[i
];
231 if (start_addr
== mem
->start_addr
&& size
== mem
->memory_size
) {
240 * Calculate and align the start address and the size of the section.
241 * Return the size. If the size is 0, the aligned section is empty.
243 static hwaddr
kvm_align_section(MemoryRegionSection
*section
,
246 hwaddr size
= int128_get64(section
->size
);
247 hwaddr delta
, aligned
;
249 /* kvm works in page size chunks, but the function may be called
250 with sub-page size and unaligned start address. Pad the start
251 address to next and truncate size to previous page boundary. */
252 aligned
= ROUND_UP(section
->offset_within_address_space
,
253 qemu_real_host_page_size());
254 delta
= aligned
- section
->offset_within_address_space
;
260 return (size
- delta
) & qemu_real_host_page_mask();
263 int kvm_physical_memory_addr_from_host(KVMState
*s
, void *ram
,
266 KVMMemoryListener
*kml
= &s
->memory_listener
;
270 for (i
= 0; i
< s
->nr_slots
; i
++) {
271 KVMSlot
*mem
= &kml
->slots
[i
];
273 if (ram
>= mem
->ram
&& ram
< mem
->ram
+ mem
->memory_size
) {
274 *phys_addr
= mem
->start_addr
+ (ram
- mem
->ram
);
284 static int kvm_set_user_memory_region(KVMMemoryListener
*kml
, KVMSlot
*slot
, bool new)
286 KVMState
*s
= kvm_state
;
287 struct kvm_userspace_memory_region2 mem
;
290 mem
.slot
= slot
->slot
| (kml
->as_id
<< 16);
291 mem
.guest_phys_addr
= slot
->start_addr
;
292 mem
.userspace_addr
= (unsigned long)slot
->ram
;
293 mem
.flags
= slot
->flags
;
294 mem
.guest_memfd
= slot
->guest_memfd
;
295 mem
.guest_memfd_offset
= slot
->guest_memfd_offset
;
297 if (slot
->memory_size
&& !new && (mem
.flags
^ slot
->old_flags
) & KVM_MEM_READONLY
) {
298 /* Set the slot size to 0 before setting the slot to the desired
299 * value. This is needed based on KVM commit 75d61fbc. */
302 if (kvm_guest_memfd_supported
) {
303 ret
= kvm_vm_ioctl(s
, KVM_SET_USER_MEMORY_REGION2
, &mem
);
305 ret
= kvm_vm_ioctl(s
, KVM_SET_USER_MEMORY_REGION
, &mem
);
311 mem
.memory_size
= slot
->memory_size
;
312 if (kvm_guest_memfd_supported
) {
313 ret
= kvm_vm_ioctl(s
, KVM_SET_USER_MEMORY_REGION2
, &mem
);
315 ret
= kvm_vm_ioctl(s
, KVM_SET_USER_MEMORY_REGION
, &mem
);
317 slot
->old_flags
= mem
.flags
;
319 trace_kvm_set_user_memory(mem
.slot
>> 16, (uint16_t)mem
.slot
, mem
.flags
,
320 mem
.guest_phys_addr
, mem
.memory_size
,
321 mem
.userspace_addr
, mem
.guest_memfd
,
322 mem
.guest_memfd_offset
, ret
);
324 if (kvm_guest_memfd_supported
) {
325 error_report("%s: KVM_SET_USER_MEMORY_REGION2 failed, slot=%d,"
326 " start=0x%" PRIx64
", size=0x%" PRIx64
","
327 " flags=0x%" PRIx32
", guest_memfd=%" PRId32
","
328 " guest_memfd_offset=0x%" PRIx64
": %s",
329 __func__
, mem
.slot
, slot
->start_addr
,
330 (uint64_t)mem
.memory_size
, mem
.flags
,
331 mem
.guest_memfd
, (uint64_t)mem
.guest_memfd_offset
,
334 error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d,"
335 " start=0x%" PRIx64
", size=0x%" PRIx64
": %s",
336 __func__
, mem
.slot
, slot
->start_addr
,
337 (uint64_t)mem
.memory_size
, strerror(errno
));
343 static int do_kvm_destroy_vcpu(CPUState
*cpu
)
345 KVMState
*s
= kvm_state
;
347 struct KVMParkedVcpu
*vcpu
= NULL
;
350 trace_kvm_destroy_vcpu();
352 ret
= kvm_arch_destroy_vcpu(cpu
);
357 mmap_size
= kvm_ioctl(s
, KVM_GET_VCPU_MMAP_SIZE
, 0);
360 trace_kvm_failed_get_vcpu_mmap_size();
364 ret
= munmap(cpu
->kvm_run
, mmap_size
);
369 if (cpu
->kvm_dirty_gfns
) {
370 ret
= munmap(cpu
->kvm_dirty_gfns
, s
->kvm_dirty_ring_bytes
);
376 vcpu
= g_malloc0(sizeof(*vcpu
));
377 vcpu
->vcpu_id
= kvm_arch_vcpu_id(cpu
);
378 vcpu
->kvm_fd
= cpu
->kvm_fd
;
379 QLIST_INSERT_HEAD(&kvm_state
->kvm_parked_vcpus
, vcpu
, node
);
384 void kvm_destroy_vcpu(CPUState
*cpu
)
386 if (do_kvm_destroy_vcpu(cpu
) < 0) {
387 error_report("kvm_destroy_vcpu failed");
392 static int kvm_get_vcpu(KVMState
*s
, unsigned long vcpu_id
)
394 struct KVMParkedVcpu
*cpu
;
396 QLIST_FOREACH(cpu
, &s
->kvm_parked_vcpus
, node
) {
397 if (cpu
->vcpu_id
== vcpu_id
) {
400 QLIST_REMOVE(cpu
, node
);
401 kvm_fd
= cpu
->kvm_fd
;
407 return kvm_vm_ioctl(s
, KVM_CREATE_VCPU
, (void *)vcpu_id
);
410 int kvm_init_vcpu(CPUState
*cpu
, Error
**errp
)
412 KVMState
*s
= kvm_state
;
416 trace_kvm_init_vcpu(cpu
->cpu_index
, kvm_arch_vcpu_id(cpu
));
418 ret
= kvm_get_vcpu(s
, kvm_arch_vcpu_id(cpu
));
420 error_setg_errno(errp
, -ret
, "kvm_init_vcpu: kvm_get_vcpu failed (%lu)",
421 kvm_arch_vcpu_id(cpu
));
427 cpu
->vcpu_dirty
= true;
428 cpu
->dirty_pages
= 0;
429 cpu
->throttle_us_per_full
= 0;
431 mmap_size
= kvm_ioctl(s
, KVM_GET_VCPU_MMAP_SIZE
, 0);
434 error_setg_errno(errp
, -mmap_size
,
435 "kvm_init_vcpu: KVM_GET_VCPU_MMAP_SIZE failed");
439 cpu
->kvm_run
= mmap(NULL
, mmap_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
441 if (cpu
->kvm_run
== MAP_FAILED
) {
443 error_setg_errno(errp
, ret
,
444 "kvm_init_vcpu: mmap'ing vcpu state failed (%lu)",
445 kvm_arch_vcpu_id(cpu
));
449 if (s
->coalesced_mmio
&& !s
->coalesced_mmio_ring
) {
450 s
->coalesced_mmio_ring
=
451 (void *)cpu
->kvm_run
+ s
->coalesced_mmio
* PAGE_SIZE
;
454 if (s
->kvm_dirty_ring_size
) {
455 /* Use MAP_SHARED to share pages with the kernel */
456 cpu
->kvm_dirty_gfns
= mmap(NULL
, s
->kvm_dirty_ring_bytes
,
457 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
459 PAGE_SIZE
* KVM_DIRTY_LOG_PAGE_OFFSET
);
460 if (cpu
->kvm_dirty_gfns
== MAP_FAILED
) {
466 ret
= kvm_arch_init_vcpu(cpu
);
468 error_setg_errno(errp
, -ret
,
469 "kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)",
470 kvm_arch_vcpu_id(cpu
));
472 cpu
->kvm_vcpu_stats_fd
= kvm_vcpu_ioctl(cpu
, KVM_GET_STATS_FD
, NULL
);
479 * dirty pages logging control
482 static int kvm_mem_flags(MemoryRegion
*mr
)
484 bool readonly
= mr
->readonly
|| memory_region_is_romd(mr
);
487 if (memory_region_get_dirty_log_mask(mr
) != 0) {
488 flags
|= KVM_MEM_LOG_DIRTY_PAGES
;
490 if (readonly
&& kvm_readonly_mem_allowed
) {
491 flags
|= KVM_MEM_READONLY
;
493 if (memory_region_has_guest_memfd(mr
)) {
494 assert(kvm_guest_memfd_supported
);
495 flags
|= KVM_MEM_GUEST_MEMFD
;
500 /* Called with KVMMemoryListener.slots_lock held */
501 static int kvm_slot_update_flags(KVMMemoryListener
*kml
, KVMSlot
*mem
,
504 mem
->flags
= kvm_mem_flags(mr
);
506 /* If nothing changed effectively, no need to issue ioctl */
507 if (mem
->flags
== mem
->old_flags
) {
511 kvm_slot_init_dirty_bitmap(mem
);
512 return kvm_set_user_memory_region(kml
, mem
, false);
515 static int kvm_section_update_flags(KVMMemoryListener
*kml
,
516 MemoryRegionSection
*section
)
518 hwaddr start_addr
, size
, slot_size
;
522 size
= kvm_align_section(section
, &start_addr
);
529 while (size
&& !ret
) {
530 slot_size
= MIN(kvm_max_slot_size
, size
);
531 mem
= kvm_lookup_matching_slot(kml
, start_addr
, slot_size
);
533 /* We don't have a slot if we want to trap every access. */
537 ret
= kvm_slot_update_flags(kml
, mem
, section
->mr
);
538 start_addr
+= slot_size
;
547 static void kvm_log_start(MemoryListener
*listener
,
548 MemoryRegionSection
*section
,
551 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
558 r
= kvm_section_update_flags(kml
, section
);
564 static void kvm_log_stop(MemoryListener
*listener
,
565 MemoryRegionSection
*section
,
568 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
575 r
= kvm_section_update_flags(kml
, section
);
581 /* get kvm's dirty pages bitmap and update qemu's */
582 static void kvm_slot_sync_dirty_pages(KVMSlot
*slot
)
584 ram_addr_t start
= slot
->ram_start_offset
;
585 ram_addr_t pages
= slot
->memory_size
/ qemu_real_host_page_size();
587 cpu_physical_memory_set_dirty_lebitmap(slot
->dirty_bmap
, start
, pages
);
590 static void kvm_slot_reset_dirty_pages(KVMSlot
*slot
)
592 memset(slot
->dirty_bmap
, 0, slot
->dirty_bmap_size
);
595 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
597 /* Allocate the dirty bitmap for a slot */
598 static void kvm_slot_init_dirty_bitmap(KVMSlot
*mem
)
600 if (!(mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) || mem
->dirty_bmap
) {
605 * XXX bad kernel interface alert
606 * For dirty bitmap, kernel allocates array of size aligned to
607 * bits-per-long. But for case when the kernel is 64bits and
608 * the userspace is 32bits, userspace can't align to the same
609 * bits-per-long, since sizeof(long) is different between kernel
610 * and user space. This way, userspace will provide buffer which
611 * may be 4 bytes less than the kernel will use, resulting in
612 * userspace memory corruption (which is not detectable by valgrind
613 * too, in most cases).
614 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
615 * a hope that sizeof(long) won't become >8 any time soon.
617 * Note: the granule of kvm dirty log is qemu_real_host_page_size.
618 * And mem->memory_size is aligned to it (otherwise this mem can't
619 * be registered to KVM).
621 hwaddr bitmap_size
= ALIGN(mem
->memory_size
/ qemu_real_host_page_size(),
622 /*HOST_LONG_BITS*/ 64) / 8;
623 mem
->dirty_bmap
= g_malloc0(bitmap_size
);
624 mem
->dirty_bmap_size
= bitmap_size
;
628 * Sync dirty bitmap from kernel to KVMSlot.dirty_bmap, return true if
629 * succeeded, false otherwise
631 static bool kvm_slot_get_dirty_log(KVMState
*s
, KVMSlot
*slot
)
633 struct kvm_dirty_log d
= {};
636 d
.dirty_bitmap
= slot
->dirty_bmap
;
637 d
.slot
= slot
->slot
| (slot
->as_id
<< 16);
638 ret
= kvm_vm_ioctl(s
, KVM_GET_DIRTY_LOG
, &d
);
640 if (ret
== -ENOENT
) {
641 /* kernel does not have dirty bitmap in this slot */
645 error_report_once("%s: KVM_GET_DIRTY_LOG failed with %d",
651 /* Should be with all slots_lock held for the address spaces. */
652 static void kvm_dirty_ring_mark_page(KVMState
*s
, uint32_t as_id
,
653 uint32_t slot_id
, uint64_t offset
)
655 KVMMemoryListener
*kml
;
658 if (as_id
>= s
->nr_as
) {
662 kml
= s
->as
[as_id
].ml
;
663 mem
= &kml
->slots
[slot_id
];
665 if (!mem
->memory_size
|| offset
>=
666 (mem
->memory_size
/ qemu_real_host_page_size())) {
670 set_bit(offset
, mem
->dirty_bmap
);
673 static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn
*gfn
)
676 * Read the flags before the value. Pairs with barrier in
677 * KVM's kvm_dirty_ring_push() function.
679 return qatomic_load_acquire(&gfn
->flags
) == KVM_DIRTY_GFN_F_DIRTY
;
682 static void dirty_gfn_set_collected(struct kvm_dirty_gfn
*gfn
)
685 * Use a store-release so that the CPU that executes KVM_RESET_DIRTY_RINGS
686 * sees the full content of the ring:
689 * ------------------------------------------------------------------------------
691 * store-rel flags for gfn0
692 * load-acq flags for gfn0
693 * store-rel RESET for gfn0
695 * load-acq flags for gfn0
696 * check if flags have RESET
698 * The synchronization goes from CPU2 to CPU0 to CPU1.
700 qatomic_store_release(&gfn
->flags
, KVM_DIRTY_GFN_F_RESET
);
704 * Should be with all slots_lock held for the address spaces. It returns the
705 * dirty page we've collected on this dirty ring.
707 static uint32_t kvm_dirty_ring_reap_one(KVMState
*s
, CPUState
*cpu
)
709 struct kvm_dirty_gfn
*dirty_gfns
= cpu
->kvm_dirty_gfns
, *cur
;
710 uint32_t ring_size
= s
->kvm_dirty_ring_size
;
711 uint32_t count
= 0, fetch
= cpu
->kvm_fetch_index
;
714 * It's possible that we race with vcpu creation code where the vcpu is
715 * put onto the vcpus list but not yet initialized the dirty ring
716 * structures. If so, skip it.
722 assert(dirty_gfns
&& ring_size
);
723 trace_kvm_dirty_ring_reap_vcpu(cpu
->cpu_index
);
726 cur
= &dirty_gfns
[fetch
% ring_size
];
727 if (!dirty_gfn_is_dirtied(cur
)) {
730 kvm_dirty_ring_mark_page(s
, cur
->slot
>> 16, cur
->slot
& 0xffff,
732 dirty_gfn_set_collected(cur
);
733 trace_kvm_dirty_ring_page(cpu
->cpu_index
, fetch
, cur
->offset
);
737 cpu
->kvm_fetch_index
= fetch
;
738 cpu
->dirty_pages
+= count
;
743 /* Must be with slots_lock held */
744 static uint64_t kvm_dirty_ring_reap_locked(KVMState
*s
, CPUState
* cpu
)
753 total
= kvm_dirty_ring_reap_one(s
, cpu
);
756 total
+= kvm_dirty_ring_reap_one(s
, cpu
);
761 ret
= kvm_vm_ioctl(s
, KVM_RESET_DIRTY_RINGS
);
762 assert(ret
== total
);
765 stamp
= get_clock() - stamp
;
768 trace_kvm_dirty_ring_reap(total
, stamp
/ 1000);
775 * Currently for simplicity, we must hold BQL before calling this. We can
776 * consider to drop the BQL if we're clear with all the race conditions.
778 static uint64_t kvm_dirty_ring_reap(KVMState
*s
, CPUState
*cpu
)
783 * We need to lock all kvm slots for all address spaces here,
786 * (1) We need to mark dirty for dirty bitmaps in multiple slots
787 * and for tons of pages, so it's better to take the lock here
788 * once rather than once per page. And more importantly,
790 * (2) We must _NOT_ publish dirty bits to the other threads
791 * (e.g., the migration thread) via the kvm memory slot dirty
792 * bitmaps before correctly re-protect those dirtied pages.
793 * Otherwise we can have potential risk of data corruption if
794 * the page data is read in the other thread before we do
798 total
= kvm_dirty_ring_reap_locked(s
, cpu
);
804 static void do_kvm_cpu_synchronize_kick(CPUState
*cpu
, run_on_cpu_data arg
)
806 /* No need to do anything */
810 * Kick all vcpus out in a synchronized way. When returned, we
811 * guarantee that every vcpu has been kicked and at least returned to
814 static void kvm_cpu_synchronize_kick_all(void)
819 run_on_cpu(cpu
, do_kvm_cpu_synchronize_kick
, RUN_ON_CPU_NULL
);
824 * Flush all the existing dirty pages to the KVM slot buffers. When
825 * this call returns, we guarantee that all the touched dirty pages
826 * before calling this function have been put into the per-kvmslot
829 * This function must be called with BQL held.
831 static void kvm_dirty_ring_flush(void)
833 trace_kvm_dirty_ring_flush(0);
835 * The function needs to be serialized. Since this function
836 * should always be with BQL held, serialization is guaranteed.
837 * However, let's be sure of it.
839 assert(bql_locked());
841 * First make sure to flush the hardware buffers by kicking all
842 * vcpus out in a synchronous way.
844 kvm_cpu_synchronize_kick_all();
845 kvm_dirty_ring_reap(kvm_state
, NULL
);
846 trace_kvm_dirty_ring_flush(1);
850 * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
852 * This function will first try to fetch dirty bitmap from the kernel,
853 * and then updates qemu's dirty bitmap.
855 * NOTE: caller must be with kml->slots_lock held.
857 * @kml: the KVM memory listener object
858 * @section: the memory section to sync the dirty bitmap with
860 static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener
*kml
,
861 MemoryRegionSection
*section
)
863 KVMState
*s
= kvm_state
;
865 hwaddr start_addr
, size
;
868 size
= kvm_align_section(section
, &start_addr
);
870 slot_size
= MIN(kvm_max_slot_size
, size
);
871 mem
= kvm_lookup_matching_slot(kml
, start_addr
, slot_size
);
873 /* We don't have a slot if we want to trap every access. */
876 if (kvm_slot_get_dirty_log(s
, mem
)) {
877 kvm_slot_sync_dirty_pages(mem
);
879 start_addr
+= slot_size
;
884 /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
885 #define KVM_CLEAR_LOG_SHIFT 6
886 #define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size() << KVM_CLEAR_LOG_SHIFT)
887 #define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
889 static int kvm_log_clear_one_slot(KVMSlot
*mem
, int as_id
, uint64_t start
,
892 KVMState
*s
= kvm_state
;
893 uint64_t end
, bmap_start
, start_delta
, bmap_npages
;
894 struct kvm_clear_dirty_log d
;
895 unsigned long *bmap_clear
= NULL
, psize
= qemu_real_host_page_size();
899 * We need to extend either the start or the size or both to
900 * satisfy the KVM interface requirement. Firstly, do the start
901 * page alignment on 64 host pages
903 bmap_start
= start
& KVM_CLEAR_LOG_MASK
;
904 start_delta
= start
- bmap_start
;
908 * The kernel interface has restriction on the size too, that either:
910 * (1) the size is 64 host pages aligned (just like the start), or
911 * (2) the size fills up until the end of the KVM memslot.
913 bmap_npages
= DIV_ROUND_UP(size
+ start_delta
, KVM_CLEAR_LOG_ALIGN
)
914 << KVM_CLEAR_LOG_SHIFT
;
915 end
= mem
->memory_size
/ psize
;
916 if (bmap_npages
> end
- bmap_start
) {
917 bmap_npages
= end
- bmap_start
;
919 start_delta
/= psize
;
922 * Prepare the bitmap to clear dirty bits. Here we must guarantee
923 * that we won't clear any unknown dirty bits otherwise we might
924 * accidentally clear some set bits which are not yet synced from
925 * the kernel into QEMU's bitmap, then we'll lose track of the
926 * guest modifications upon those pages (which can directly lead
927 * to guest data loss or panic after migration).
929 * Layout of the KVMSlot.dirty_bmap:
931 * |<-------- bmap_npages -----------..>|
934 * |----------------|-------------|------------------|------------|
937 * start bmap_start (start) end
938 * of memslot of memslot
940 * [1] bmap_npages can be aligned to either 64 pages or the end of slot
943 assert(bmap_start
% BITS_PER_LONG
== 0);
944 /* We should never do log_clear before log_sync */
945 assert(mem
->dirty_bmap
);
946 if (start_delta
|| bmap_npages
- size
/ psize
) {
947 /* Slow path - we need to manipulate a temp bitmap */
948 bmap_clear
= bitmap_new(bmap_npages
);
949 bitmap_copy_with_src_offset(bmap_clear
, mem
->dirty_bmap
,
950 bmap_start
, start_delta
+ size
/ psize
);
952 * We need to fill the holes at start because that was not
953 * specified by the caller and we extended the bitmap only for
956 bitmap_clear(bmap_clear
, 0, start_delta
);
957 d
.dirty_bitmap
= bmap_clear
;
960 * Fast path - both start and size align well with BITS_PER_LONG
961 * (or the end of memory slot)
963 d
.dirty_bitmap
= mem
->dirty_bmap
+ BIT_WORD(bmap_start
);
966 d
.first_page
= bmap_start
;
967 /* It should never overflow. If it happens, say something */
968 assert(bmap_npages
<= UINT32_MAX
);
969 d
.num_pages
= bmap_npages
;
970 d
.slot
= mem
->slot
| (as_id
<< 16);
972 ret
= kvm_vm_ioctl(s
, KVM_CLEAR_DIRTY_LOG
, &d
);
973 if (ret
< 0 && ret
!= -ENOENT
) {
974 error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
975 "start=0x%"PRIx64
", size=0x%"PRIx32
", errno=%d",
976 __func__
, d
.slot
, (uint64_t)d
.first_page
,
977 (uint32_t)d
.num_pages
, ret
);
980 trace_kvm_clear_dirty_log(d
.slot
, d
.first_page
, d
.num_pages
);
984 * After we have updated the remote dirty bitmap, we update the
985 * cached bitmap as well for the memslot, then if another user
986 * clears the same region we know we shouldn't clear it again on
987 * the remote otherwise it's data loss as well.
989 bitmap_clear(mem
->dirty_bmap
, bmap_start
+ start_delta
,
991 /* This handles the NULL case well */
998 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
1000 * NOTE: this will be a no-op if we haven't enabled manual dirty log
1001 * protection in the host kernel because in that case this operation
1002 * will be done within log_sync().
1004 * @kml: the kvm memory listener
1005 * @section: the memory range to clear dirty bitmap
1007 static int kvm_physical_log_clear(KVMMemoryListener
*kml
,
1008 MemoryRegionSection
*section
)
1010 KVMState
*s
= kvm_state
;
1011 uint64_t start
, size
, offset
, count
;
1015 if (!s
->manual_dirty_log_protect
) {
1016 /* No need to do explicit clear */
1020 start
= section
->offset_within_address_space
;
1021 size
= int128_get64(section
->size
);
1024 /* Nothing more we can do... */
1030 for (i
= 0; i
< s
->nr_slots
; i
++) {
1031 mem
= &kml
->slots
[i
];
1032 /* Discard slots that are empty or do not overlap the section */
1033 if (!mem
->memory_size
||
1034 mem
->start_addr
> start
+ size
- 1 ||
1035 start
> mem
->start_addr
+ mem
->memory_size
- 1) {
1039 if (start
>= mem
->start_addr
) {
1040 /* The slot starts before section or is aligned to it. */
1041 offset
= start
- mem
->start_addr
;
1042 count
= MIN(mem
->memory_size
- offset
, size
);
1044 /* The slot starts after section. */
1046 count
= MIN(mem
->memory_size
, size
- (mem
->start_addr
- start
));
1048 ret
= kvm_log_clear_one_slot(mem
, kml
->as_id
, offset
, count
);
1059 static void kvm_coalesce_mmio_region(MemoryListener
*listener
,
1060 MemoryRegionSection
*secion
,
1061 hwaddr start
, hwaddr size
)
1063 KVMState
*s
= kvm_state
;
1065 if (s
->coalesced_mmio
) {
1066 struct kvm_coalesced_mmio_zone zone
;
1072 (void)kvm_vm_ioctl(s
, KVM_REGISTER_COALESCED_MMIO
, &zone
);
1076 static void kvm_uncoalesce_mmio_region(MemoryListener
*listener
,
1077 MemoryRegionSection
*secion
,
1078 hwaddr start
, hwaddr size
)
1080 KVMState
*s
= kvm_state
;
1082 if (s
->coalesced_mmio
) {
1083 struct kvm_coalesced_mmio_zone zone
;
1089 (void)kvm_vm_ioctl(s
, KVM_UNREGISTER_COALESCED_MMIO
, &zone
);
1093 static void kvm_coalesce_pio_add(MemoryListener
*listener
,
1094 MemoryRegionSection
*section
,
1095 hwaddr start
, hwaddr size
)
1097 KVMState
*s
= kvm_state
;
1099 if (s
->coalesced_pio
) {
1100 struct kvm_coalesced_mmio_zone zone
;
1106 (void)kvm_vm_ioctl(s
, KVM_REGISTER_COALESCED_MMIO
, &zone
);
1110 static void kvm_coalesce_pio_del(MemoryListener
*listener
,
1111 MemoryRegionSection
*section
,
1112 hwaddr start
, hwaddr size
)
1114 KVMState
*s
= kvm_state
;
1116 if (s
->coalesced_pio
) {
1117 struct kvm_coalesced_mmio_zone zone
;
1123 (void)kvm_vm_ioctl(s
, KVM_UNREGISTER_COALESCED_MMIO
, &zone
);
1127 int kvm_check_extension(KVMState
*s
, unsigned int extension
)
1131 ret
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, extension
);
1139 int kvm_vm_check_extension(KVMState
*s
, unsigned int extension
)
1143 ret
= kvm_vm_ioctl(s
, KVM_CHECK_EXTENSION
, extension
);
1145 /* VM wide version not implemented, use global one instead */
1146 ret
= kvm_check_extension(s
, extension
);
1153 * We track the poisoned pages to be able to:
1154 * - replace them on VM reset
1155 * - block a migration for a VM with a poisoned page
1157 typedef struct HWPoisonPage
{
1158 ram_addr_t ram_addr
;
1159 QLIST_ENTRY(HWPoisonPage
) list
;
1162 static QLIST_HEAD(, HWPoisonPage
) hwpoison_page_list
=
1163 QLIST_HEAD_INITIALIZER(hwpoison_page_list
);
1165 static void kvm_unpoison_all(void *param
)
1167 HWPoisonPage
*page
, *next_page
;
1169 QLIST_FOREACH_SAFE(page
, &hwpoison_page_list
, list
, next_page
) {
1170 QLIST_REMOVE(page
, list
);
1171 qemu_ram_remap(page
->ram_addr
, TARGET_PAGE_SIZE
);
1176 void kvm_hwpoison_page_add(ram_addr_t ram_addr
)
1180 QLIST_FOREACH(page
, &hwpoison_page_list
, list
) {
1181 if (page
->ram_addr
== ram_addr
) {
1185 page
= g_new(HWPoisonPage
, 1);
1186 page
->ram_addr
= ram_addr
;
1187 QLIST_INSERT_HEAD(&hwpoison_page_list
, page
, list
);
1190 bool kvm_hwpoisoned_mem(void)
1192 return !QLIST_EMPTY(&hwpoison_page_list
);
1195 static uint32_t adjust_ioeventfd_endianness(uint32_t val
, uint32_t size
)
1197 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
1198 /* The kernel expects ioeventfd values in HOST_BIG_ENDIAN
1199 * endianness, but the memory core hands them in target endianness.
1200 * For example, PPC is always treated as big-endian even if running
1201 * on KVM and on PPC64LE. Correct here.
1215 static int kvm_set_ioeventfd_mmio(int fd
, hwaddr addr
, uint32_t val
,
1216 bool assign
, uint32_t size
, bool datamatch
)
1219 struct kvm_ioeventfd iofd
= {
1220 .datamatch
= datamatch
? adjust_ioeventfd_endianness(val
, size
) : 0,
1227 trace_kvm_set_ioeventfd_mmio(fd
, (uint64_t)addr
, val
, assign
, size
,
1229 if (!kvm_enabled()) {
1234 iofd
.flags
|= KVM_IOEVENTFD_FLAG_DATAMATCH
;
1237 iofd
.flags
|= KVM_IOEVENTFD_FLAG_DEASSIGN
;
1240 ret
= kvm_vm_ioctl(kvm_state
, KVM_IOEVENTFD
, &iofd
);
1249 static int kvm_set_ioeventfd_pio(int fd
, uint16_t addr
, uint16_t val
,
1250 bool assign
, uint32_t size
, bool datamatch
)
1252 struct kvm_ioeventfd kick
= {
1253 .datamatch
= datamatch
? adjust_ioeventfd_endianness(val
, size
) : 0,
1255 .flags
= KVM_IOEVENTFD_FLAG_PIO
,
1260 trace_kvm_set_ioeventfd_pio(fd
, addr
, val
, assign
, size
, datamatch
);
1261 if (!kvm_enabled()) {
1265 kick
.flags
|= KVM_IOEVENTFD_FLAG_DATAMATCH
;
1268 kick
.flags
|= KVM_IOEVENTFD_FLAG_DEASSIGN
;
1270 r
= kvm_vm_ioctl(kvm_state
, KVM_IOEVENTFD
, &kick
);
1278 static const KVMCapabilityInfo
*
1279 kvm_check_extension_list(KVMState
*s
, const KVMCapabilityInfo
*list
)
1281 while (list
->name
) {
1282 if (!kvm_check_extension(s
, list
->value
)) {
1290 void kvm_set_max_memslot_size(hwaddr max_slot_size
)
1293 ROUND_UP(max_slot_size
, qemu_real_host_page_size()) == max_slot_size
1295 kvm_max_slot_size
= max_slot_size
;
1298 static int kvm_set_memory_attributes(hwaddr start
, uint64_t size
, uint64_t attr
)
1300 struct kvm_memory_attributes attrs
;
1303 assert((attr
& kvm_supported_memory_attributes
) == attr
);
1304 attrs
.attributes
= attr
;
1305 attrs
.address
= start
;
1309 r
= kvm_vm_ioctl(kvm_state
, KVM_SET_MEMORY_ATTRIBUTES
, &attrs
);
1311 error_report("failed to set memory (0x%" HWADDR_PRIx
"+0x%" PRIx64
") "
1312 "with attr 0x%" PRIx64
" error '%s'",
1313 start
, size
, attr
, strerror(errno
));
1318 int kvm_set_memory_attributes_private(hwaddr start
, uint64_t size
)
1320 return kvm_set_memory_attributes(start
, size
, KVM_MEMORY_ATTRIBUTE_PRIVATE
);
1323 int kvm_set_memory_attributes_shared(hwaddr start
, uint64_t size
)
1325 return kvm_set_memory_attributes(start
, size
, 0);
1328 /* Called with KVMMemoryListener.slots_lock held */
1329 static void kvm_set_phys_mem(KVMMemoryListener
*kml
,
1330 MemoryRegionSection
*section
, bool add
)
1334 MemoryRegion
*mr
= section
->mr
;
1335 bool writable
= !mr
->readonly
&& !mr
->rom_device
;
1336 hwaddr start_addr
, size
, slot_size
, mr_offset
;
1337 ram_addr_t ram_start_offset
;
1340 if (!memory_region_is_ram(mr
)) {
1341 if (writable
|| !kvm_readonly_mem_allowed
) {
1343 } else if (!mr
->romd_mode
) {
1344 /* If the memory device is not in romd_mode, then we actually want
1345 * to remove the kvm memory slot so all accesses will trap. */
1350 size
= kvm_align_section(section
, &start_addr
);
1355 /* The offset of the kvmslot within the memory region */
1356 mr_offset
= section
->offset_within_region
+ start_addr
-
1357 section
->offset_within_address_space
;
1359 /* use aligned delta to align the ram address and offset */
1360 ram
= memory_region_get_ram_ptr(mr
) + mr_offset
;
1361 ram_start_offset
= memory_region_get_ram_addr(mr
) + mr_offset
;
1365 slot_size
= MIN(kvm_max_slot_size
, size
);
1366 mem
= kvm_lookup_matching_slot(kml
, start_addr
, slot_size
);
1370 if (mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) {
1372 * NOTE: We should be aware of the fact that here we're only
1373 * doing a best effort to sync dirty bits. No matter whether
1374 * we're using dirty log or dirty ring, we ignored two facts:
1376 * (1) dirty bits can reside in hardware buffers (PML)
1378 * (2) after we collected dirty bits here, pages can be dirtied
1379 * again before we do the final KVM_SET_USER_MEMORY_REGION to
1382 * Not easy. Let's cross the fingers until it's fixed.
1384 if (kvm_state
->kvm_dirty_ring_size
) {
1385 kvm_dirty_ring_reap_locked(kvm_state
, NULL
);
1386 if (kvm_state
->kvm_dirty_ring_with_bitmap
) {
1387 kvm_slot_sync_dirty_pages(mem
);
1388 kvm_slot_get_dirty_log(kvm_state
, mem
);
1391 kvm_slot_get_dirty_log(kvm_state
, mem
);
1393 kvm_slot_sync_dirty_pages(mem
);
1396 /* unregister the slot */
1397 g_free(mem
->dirty_bmap
);
1398 mem
->dirty_bmap
= NULL
;
1399 mem
->memory_size
= 0;
1401 err
= kvm_set_user_memory_region(kml
, mem
, false);
1403 fprintf(stderr
, "%s: error unregistering slot: %s\n",
1404 __func__
, strerror(-err
));
1407 start_addr
+= slot_size
;
1409 kml
->nr_used_slots
--;
1414 /* register the new slot */
1416 slot_size
= MIN(kvm_max_slot_size
, size
);
1417 mem
= kvm_alloc_slot(kml
);
1418 mem
->as_id
= kml
->as_id
;
1419 mem
->memory_size
= slot_size
;
1420 mem
->start_addr
= start_addr
;
1421 mem
->ram_start_offset
= ram_start_offset
;
1423 mem
->flags
= kvm_mem_flags(mr
);
1424 mem
->guest_memfd
= mr
->ram_block
->guest_memfd
;
1425 mem
->guest_memfd_offset
= (uint8_t*)ram
- mr
->ram_block
->host
;
1427 kvm_slot_init_dirty_bitmap(mem
);
1428 err
= kvm_set_user_memory_region(kml
, mem
, true);
1430 fprintf(stderr
, "%s: error registering slot: %s\n", __func__
,
1435 if (memory_region_has_guest_memfd(mr
)) {
1436 err
= kvm_set_memory_attributes_private(start_addr
, slot_size
);
1438 error_report("%s: failed to set memory attribute private: %s",
1439 __func__
, strerror(-err
));
1444 start_addr
+= slot_size
;
1445 ram_start_offset
+= slot_size
;
1448 kml
->nr_used_slots
++;
1452 static void *kvm_dirty_ring_reaper_thread(void *data
)
1455 struct KVMDirtyRingReaper
*r
= &s
->reaper
;
1457 rcu_register_thread();
1459 trace_kvm_dirty_ring_reaper("init");
1462 r
->reaper_state
= KVM_DIRTY_RING_REAPER_WAIT
;
1463 trace_kvm_dirty_ring_reaper("wait");
1465 * TODO: provide a smarter timeout rather than a constant?
1469 /* keep sleeping so that dirtylimit not be interfered by reaper */
1470 if (dirtylimit_in_service()) {
1474 trace_kvm_dirty_ring_reaper("wakeup");
1475 r
->reaper_state
= KVM_DIRTY_RING_REAPER_REAPING
;
1478 kvm_dirty_ring_reap(s
, NULL
);
1481 r
->reaper_iteration
++;
1484 trace_kvm_dirty_ring_reaper("exit");
1486 rcu_unregister_thread();
1491 static void kvm_dirty_ring_reaper_init(KVMState
*s
)
1493 struct KVMDirtyRingReaper
*r
= &s
->reaper
;
1495 qemu_thread_create(&r
->reaper_thr
, "kvm-reaper",
1496 kvm_dirty_ring_reaper_thread
,
1497 s
, QEMU_THREAD_JOINABLE
);
1500 static int kvm_dirty_ring_init(KVMState
*s
)
1502 uint32_t ring_size
= s
->kvm_dirty_ring_size
;
1503 uint64_t ring_bytes
= ring_size
* sizeof(struct kvm_dirty_gfn
);
1504 unsigned int capability
= KVM_CAP_DIRTY_LOG_RING
;
1507 s
->kvm_dirty_ring_size
= 0;
1508 s
->kvm_dirty_ring_bytes
= 0;
1510 /* Bail if the dirty ring size isn't specified */
1516 * Read the max supported pages. Fall back to dirty logging mode
1517 * if the dirty ring isn't supported.
1519 ret
= kvm_vm_check_extension(s
, capability
);
1521 capability
= KVM_CAP_DIRTY_LOG_RING_ACQ_REL
;
1522 ret
= kvm_vm_check_extension(s
, capability
);
1526 warn_report("KVM dirty ring not available, using bitmap method");
1530 if (ring_bytes
> ret
) {
1531 error_report("KVM dirty ring size %" PRIu32
" too big "
1532 "(maximum is %ld). Please use a smaller value.",
1533 ring_size
, (long)ret
/ sizeof(struct kvm_dirty_gfn
));
1537 ret
= kvm_vm_enable_cap(s
, capability
, 0, ring_bytes
);
1539 error_report("Enabling of KVM dirty ring failed: %s. "
1540 "Suggested minimum value is 1024.", strerror(-ret
));
1544 /* Enable the backup bitmap if it is supported */
1545 ret
= kvm_vm_check_extension(s
, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP
);
1547 ret
= kvm_vm_enable_cap(s
, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP
, 0);
1549 error_report("Enabling of KVM dirty ring's backup bitmap failed: "
1550 "%s. ", strerror(-ret
));
1554 s
->kvm_dirty_ring_with_bitmap
= true;
1557 s
->kvm_dirty_ring_size
= ring_size
;
1558 s
->kvm_dirty_ring_bytes
= ring_bytes
;
1563 static void kvm_region_add(MemoryListener
*listener
,
1564 MemoryRegionSection
*section
)
1566 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
1567 KVMMemoryUpdate
*update
;
1569 update
= g_new0(KVMMemoryUpdate
, 1);
1570 update
->section
= *section
;
1572 QSIMPLEQ_INSERT_TAIL(&kml
->transaction_add
, update
, next
);
1575 static void kvm_region_del(MemoryListener
*listener
,
1576 MemoryRegionSection
*section
)
1578 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
1579 KVMMemoryUpdate
*update
;
1581 update
= g_new0(KVMMemoryUpdate
, 1);
1582 update
->section
= *section
;
1584 QSIMPLEQ_INSERT_TAIL(&kml
->transaction_del
, update
, next
);
1587 static void kvm_region_commit(MemoryListener
*listener
)
1589 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
,
1591 KVMMemoryUpdate
*u1
, *u2
;
1592 bool need_inhibit
= false;
1594 if (QSIMPLEQ_EMPTY(&kml
->transaction_add
) &&
1595 QSIMPLEQ_EMPTY(&kml
->transaction_del
)) {
1600 * We have to be careful when regions to add overlap with ranges to remove.
1601 * We have to simulate atomic KVM memslot updates by making sure no ioctl()
1602 * is currently active.
1604 * The lists are order by addresses, so it's easy to find overlaps.
1606 u1
= QSIMPLEQ_FIRST(&kml
->transaction_del
);
1607 u2
= QSIMPLEQ_FIRST(&kml
->transaction_add
);
1611 range_init_nofail(&r1
, u1
->section
.offset_within_address_space
,
1612 int128_get64(u1
->section
.size
));
1613 range_init_nofail(&r2
, u2
->section
.offset_within_address_space
,
1614 int128_get64(u2
->section
.size
));
1616 if (range_overlaps_range(&r1
, &r2
)) {
1617 need_inhibit
= true;
1620 if (range_lob(&r1
) < range_lob(&r2
)) {
1621 u1
= QSIMPLEQ_NEXT(u1
, next
);
1623 u2
= QSIMPLEQ_NEXT(u2
, next
);
1629 accel_ioctl_inhibit_begin();
1632 /* Remove all memslots before adding the new ones. */
1633 while (!QSIMPLEQ_EMPTY(&kml
->transaction_del
)) {
1634 u1
= QSIMPLEQ_FIRST(&kml
->transaction_del
);
1635 QSIMPLEQ_REMOVE_HEAD(&kml
->transaction_del
, next
);
1637 kvm_set_phys_mem(kml
, &u1
->section
, false);
1638 memory_region_unref(u1
->section
.mr
);
1642 while (!QSIMPLEQ_EMPTY(&kml
->transaction_add
)) {
1643 u1
= QSIMPLEQ_FIRST(&kml
->transaction_add
);
1644 QSIMPLEQ_REMOVE_HEAD(&kml
->transaction_add
, next
);
1646 memory_region_ref(u1
->section
.mr
);
1647 kvm_set_phys_mem(kml
, &u1
->section
, true);
1653 accel_ioctl_inhibit_end();
1658 static void kvm_log_sync(MemoryListener
*listener
,
1659 MemoryRegionSection
*section
)
1661 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
1664 kvm_physical_sync_dirty_bitmap(kml
, section
);
1668 static void kvm_log_sync_global(MemoryListener
*l
, bool last_stage
)
1670 KVMMemoryListener
*kml
= container_of(l
, KVMMemoryListener
, listener
);
1671 KVMState
*s
= kvm_state
;
1675 /* Flush all kernel dirty addresses into KVMSlot dirty bitmap */
1676 kvm_dirty_ring_flush();
1679 * TODO: make this faster when nr_slots is big while there are
1680 * only a few used slots (small VMs).
1683 for (i
= 0; i
< s
->nr_slots
; i
++) {
1684 mem
= &kml
->slots
[i
];
1685 if (mem
->memory_size
&& mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) {
1686 kvm_slot_sync_dirty_pages(mem
);
1688 if (s
->kvm_dirty_ring_with_bitmap
&& last_stage
&&
1689 kvm_slot_get_dirty_log(s
, mem
)) {
1690 kvm_slot_sync_dirty_pages(mem
);
1694 * This is not needed by KVM_GET_DIRTY_LOG because the
1695 * ioctl will unconditionally overwrite the whole region.
1696 * However kvm dirty ring has no such side effect.
1698 kvm_slot_reset_dirty_pages(mem
);
1704 static void kvm_log_clear(MemoryListener
*listener
,
1705 MemoryRegionSection
*section
)
1707 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
1710 r
= kvm_physical_log_clear(kml
, section
);
1712 error_report_once("%s: kvm log clear failed: mr=%s "
1713 "offset=%"HWADDR_PRIx
" size=%"PRIx64
, __func__
,
1714 section
->mr
->name
, section
->offset_within_region
,
1715 int128_get64(section
->size
));
1720 static void kvm_mem_ioeventfd_add(MemoryListener
*listener
,
1721 MemoryRegionSection
*section
,
1722 bool match_data
, uint64_t data
,
1725 int fd
= event_notifier_get_fd(e
);
1728 r
= kvm_set_ioeventfd_mmio(fd
, section
->offset_within_address_space
,
1729 data
, true, int128_get64(section
->size
),
1732 fprintf(stderr
, "%s: error adding ioeventfd: %s (%d)\n",
1733 __func__
, strerror(-r
), -r
);
1738 static void kvm_mem_ioeventfd_del(MemoryListener
*listener
,
1739 MemoryRegionSection
*section
,
1740 bool match_data
, uint64_t data
,
1743 int fd
= event_notifier_get_fd(e
);
1746 r
= kvm_set_ioeventfd_mmio(fd
, section
->offset_within_address_space
,
1747 data
, false, int128_get64(section
->size
),
1750 fprintf(stderr
, "%s: error deleting ioeventfd: %s (%d)\n",
1751 __func__
, strerror(-r
), -r
);
1756 static void kvm_io_ioeventfd_add(MemoryListener
*listener
,
1757 MemoryRegionSection
*section
,
1758 bool match_data
, uint64_t data
,
1761 int fd
= event_notifier_get_fd(e
);
1764 r
= kvm_set_ioeventfd_pio(fd
, section
->offset_within_address_space
,
1765 data
, true, int128_get64(section
->size
),
1768 fprintf(stderr
, "%s: error adding ioeventfd: %s (%d)\n",
1769 __func__
, strerror(-r
), -r
);
1774 static void kvm_io_ioeventfd_del(MemoryListener
*listener
,
1775 MemoryRegionSection
*section
,
1776 bool match_data
, uint64_t data
,
1780 int fd
= event_notifier_get_fd(e
);
1783 r
= kvm_set_ioeventfd_pio(fd
, section
->offset_within_address_space
,
1784 data
, false, int128_get64(section
->size
),
1787 fprintf(stderr
, "%s: error deleting ioeventfd: %s (%d)\n",
1788 __func__
, strerror(-r
), -r
);
1793 void kvm_memory_listener_register(KVMState
*s
, KVMMemoryListener
*kml
,
1794 AddressSpace
*as
, int as_id
, const char *name
)
1798 kml
->slots
= g_new0(KVMSlot
, s
->nr_slots
);
1801 for (i
= 0; i
< s
->nr_slots
; i
++) {
1802 kml
->slots
[i
].slot
= i
;
1805 QSIMPLEQ_INIT(&kml
->transaction_add
);
1806 QSIMPLEQ_INIT(&kml
->transaction_del
);
1808 kml
->listener
.region_add
= kvm_region_add
;
1809 kml
->listener
.region_del
= kvm_region_del
;
1810 kml
->listener
.commit
= kvm_region_commit
;
1811 kml
->listener
.log_start
= kvm_log_start
;
1812 kml
->listener
.log_stop
= kvm_log_stop
;
1813 kml
->listener
.priority
= MEMORY_LISTENER_PRIORITY_ACCEL
;
1814 kml
->listener
.name
= name
;
1816 if (s
->kvm_dirty_ring_size
) {
1817 kml
->listener
.log_sync_global
= kvm_log_sync_global
;
1819 kml
->listener
.log_sync
= kvm_log_sync
;
1820 kml
->listener
.log_clear
= kvm_log_clear
;
1823 memory_listener_register(&kml
->listener
, as
);
1825 for (i
= 0; i
< s
->nr_as
; ++i
) {
1834 static MemoryListener kvm_io_listener
= {
1836 .coalesced_io_add
= kvm_coalesce_pio_add
,
1837 .coalesced_io_del
= kvm_coalesce_pio_del
,
1838 .eventfd_add
= kvm_io_ioeventfd_add
,
1839 .eventfd_del
= kvm_io_ioeventfd_del
,
1840 .priority
= MEMORY_LISTENER_PRIORITY_DEV_BACKEND
,
1843 int kvm_set_irq(KVMState
*s
, int irq
, int level
)
1845 struct kvm_irq_level event
;
1848 assert(kvm_async_interrupts_enabled());
1850 event
.level
= level
;
1852 ret
= kvm_vm_ioctl(s
, s
->irq_set_ioctl
, &event
);
1854 perror("kvm_set_irq");
1858 return (s
->irq_set_ioctl
== KVM_IRQ_LINE
) ? 1 : event
.status
;
1861 #ifdef KVM_CAP_IRQ_ROUTING
1862 typedef struct KVMMSIRoute
{
1863 struct kvm_irq_routing_entry kroute
;
1864 QTAILQ_ENTRY(KVMMSIRoute
) entry
;
1867 static void set_gsi(KVMState
*s
, unsigned int gsi
)
1869 set_bit(gsi
, s
->used_gsi_bitmap
);
1872 static void clear_gsi(KVMState
*s
, unsigned int gsi
)
1874 clear_bit(gsi
, s
->used_gsi_bitmap
);
1877 void kvm_init_irq_routing(KVMState
*s
)
1881 gsi_count
= kvm_check_extension(s
, KVM_CAP_IRQ_ROUTING
) - 1;
1882 if (gsi_count
> 0) {
1883 /* Round up so we can search ints using ffs */
1884 s
->used_gsi_bitmap
= bitmap_new(gsi_count
);
1885 s
->gsi_count
= gsi_count
;
1888 s
->irq_routes
= g_malloc0(sizeof(*s
->irq_routes
));
1889 s
->nr_allocated_irq_routes
= 0;
1891 kvm_arch_init_irq_routing(s
);
1894 void kvm_irqchip_commit_routes(KVMState
*s
)
1898 if (kvm_gsi_direct_mapping()) {
1902 if (!kvm_gsi_routing_enabled()) {
1906 s
->irq_routes
->flags
= 0;
1907 trace_kvm_irqchip_commit_routes();
1908 ret
= kvm_vm_ioctl(s
, KVM_SET_GSI_ROUTING
, s
->irq_routes
);
1912 void kvm_add_routing_entry(KVMState
*s
,
1913 struct kvm_irq_routing_entry
*entry
)
1915 struct kvm_irq_routing_entry
*new;
1918 if (s
->irq_routes
->nr
== s
->nr_allocated_irq_routes
) {
1919 n
= s
->nr_allocated_irq_routes
* 2;
1923 size
= sizeof(struct kvm_irq_routing
);
1924 size
+= n
* sizeof(*new);
1925 s
->irq_routes
= g_realloc(s
->irq_routes
, size
);
1926 s
->nr_allocated_irq_routes
= n
;
1928 n
= s
->irq_routes
->nr
++;
1929 new = &s
->irq_routes
->entries
[n
];
1933 set_gsi(s
, entry
->gsi
);
1936 static int kvm_update_routing_entry(KVMState
*s
,
1937 struct kvm_irq_routing_entry
*new_entry
)
1939 struct kvm_irq_routing_entry
*entry
;
1942 for (n
= 0; n
< s
->irq_routes
->nr
; n
++) {
1943 entry
= &s
->irq_routes
->entries
[n
];
1944 if (entry
->gsi
!= new_entry
->gsi
) {
1948 if(!memcmp(entry
, new_entry
, sizeof *entry
)) {
1952 *entry
= *new_entry
;
1960 void kvm_irqchip_add_irq_route(KVMState
*s
, int irq
, int irqchip
, int pin
)
1962 struct kvm_irq_routing_entry e
= {};
1964 assert(pin
< s
->gsi_count
);
1967 e
.type
= KVM_IRQ_ROUTING_IRQCHIP
;
1969 e
.u
.irqchip
.irqchip
= irqchip
;
1970 e
.u
.irqchip
.pin
= pin
;
1971 kvm_add_routing_entry(s
, &e
);
1974 void kvm_irqchip_release_virq(KVMState
*s
, int virq
)
1976 struct kvm_irq_routing_entry
*e
;
1979 if (kvm_gsi_direct_mapping()) {
1983 for (i
= 0; i
< s
->irq_routes
->nr
; i
++) {
1984 e
= &s
->irq_routes
->entries
[i
];
1985 if (e
->gsi
== virq
) {
1986 s
->irq_routes
->nr
--;
1987 *e
= s
->irq_routes
->entries
[s
->irq_routes
->nr
];
1991 kvm_arch_release_virq_post(virq
);
1992 trace_kvm_irqchip_release_virq(virq
);
1995 void kvm_irqchip_add_change_notifier(Notifier
*n
)
1997 notifier_list_add(&kvm_irqchip_change_notifiers
, n
);
2000 void kvm_irqchip_remove_change_notifier(Notifier
*n
)
2005 void kvm_irqchip_change_notify(void)
2007 notifier_list_notify(&kvm_irqchip_change_notifiers
, NULL
);
2010 int kvm_irqchip_get_virq(KVMState
*s
)
2014 /* Return the lowest unused GSI in the bitmap */
2015 next_virq
= find_first_zero_bit(s
->used_gsi_bitmap
, s
->gsi_count
);
2016 if (next_virq
>= s
->gsi_count
) {
2023 int kvm_irqchip_send_msi(KVMState
*s
, MSIMessage msg
)
2027 msi
.address_lo
= (uint32_t)msg
.address
;
2028 msi
.address_hi
= msg
.address
>> 32;
2029 msi
.data
= le32_to_cpu(msg
.data
);
2031 memset(msi
.pad
, 0, sizeof(msi
.pad
));
2033 return kvm_vm_ioctl(s
, KVM_SIGNAL_MSI
, &msi
);
2036 int kvm_irqchip_add_msi_route(KVMRouteChange
*c
, int vector
, PCIDevice
*dev
)
2038 struct kvm_irq_routing_entry kroute
= {};
2041 MSIMessage msg
= {0, 0};
2043 if (pci_available
&& dev
) {
2044 msg
= pci_get_msi_message(dev
, vector
);
2047 if (kvm_gsi_direct_mapping()) {
2048 return kvm_arch_msi_data_to_gsi(msg
.data
);
2051 if (!kvm_gsi_routing_enabled()) {
2055 virq
= kvm_irqchip_get_virq(s
);
2061 kroute
.type
= KVM_IRQ_ROUTING_MSI
;
2063 kroute
.u
.msi
.address_lo
= (uint32_t)msg
.address
;
2064 kroute
.u
.msi
.address_hi
= msg
.address
>> 32;
2065 kroute
.u
.msi
.data
= le32_to_cpu(msg
.data
);
2066 if (pci_available
&& kvm_msi_devid_required()) {
2067 kroute
.flags
= KVM_MSI_VALID_DEVID
;
2068 kroute
.u
.msi
.devid
= pci_requester_id(dev
);
2070 if (kvm_arch_fixup_msi_route(&kroute
, msg
.address
, msg
.data
, dev
)) {
2071 kvm_irqchip_release_virq(s
, virq
);
2075 if (s
->irq_routes
->nr
< s
->gsi_count
) {
2076 trace_kvm_irqchip_add_msi_route(dev
? dev
->name
: (char *)"N/A",
2079 kvm_add_routing_entry(s
, &kroute
);
2080 kvm_arch_add_msi_route_post(&kroute
, vector
, dev
);
2083 kvm_irqchip_release_virq(s
, virq
);
2090 int kvm_irqchip_update_msi_route(KVMState
*s
, int virq
, MSIMessage msg
,
2093 struct kvm_irq_routing_entry kroute
= {};
2095 if (kvm_gsi_direct_mapping()) {
2099 if (!kvm_irqchip_in_kernel()) {
2104 kroute
.type
= KVM_IRQ_ROUTING_MSI
;
2106 kroute
.u
.msi
.address_lo
= (uint32_t)msg
.address
;
2107 kroute
.u
.msi
.address_hi
= msg
.address
>> 32;
2108 kroute
.u
.msi
.data
= le32_to_cpu(msg
.data
);
2109 if (pci_available
&& kvm_msi_devid_required()) {
2110 kroute
.flags
= KVM_MSI_VALID_DEVID
;
2111 kroute
.u
.msi
.devid
= pci_requester_id(dev
);
2113 if (kvm_arch_fixup_msi_route(&kroute
, msg
.address
, msg
.data
, dev
)) {
2117 trace_kvm_irqchip_update_msi_route(virq
);
2119 return kvm_update_routing_entry(s
, &kroute
);
2122 static int kvm_irqchip_assign_irqfd(KVMState
*s
, EventNotifier
*event
,
2123 EventNotifier
*resample
, int virq
,
2126 int fd
= event_notifier_get_fd(event
);
2127 int rfd
= resample
? event_notifier_get_fd(resample
) : -1;
2129 struct kvm_irqfd irqfd
= {
2132 .flags
= assign
? 0 : KVM_IRQFD_FLAG_DEASSIGN
,
2137 if (kvm_irqchip_is_split()) {
2139 * When the slow irqchip (e.g. IOAPIC) is in the
2140 * userspace, KVM kernel resamplefd will not work because
2141 * the EOI of the interrupt will be delivered to userspace
2142 * instead, so the KVM kernel resamplefd kick will be
2143 * skipped. The userspace here mimics what the kernel
2144 * provides with resamplefd, remember the resamplefd and
2145 * kick it when we receive EOI of this IRQ.
2147 * This is hackery because IOAPIC is mostly bypassed
2148 * (except EOI broadcasts) when irqfd is used. However
2149 * this can bring much performance back for split irqchip
2150 * with INTx IRQs (for VFIO, this gives 93% perf of the
2151 * full fast path, which is 46% perf boost comparing to
2152 * the INTx slow path).
2154 kvm_resample_fd_insert(virq
, resample
);
2156 irqfd
.flags
|= KVM_IRQFD_FLAG_RESAMPLE
;
2157 irqfd
.resamplefd
= rfd
;
2159 } else if (!assign
) {
2160 if (kvm_irqchip_is_split()) {
2161 kvm_resample_fd_remove(virq
);
2165 return kvm_vm_ioctl(s
, KVM_IRQFD
, &irqfd
);
2168 #else /* !KVM_CAP_IRQ_ROUTING */
2170 void kvm_init_irq_routing(KVMState
*s
)
2174 void kvm_irqchip_release_virq(KVMState
*s
, int virq
)
2178 int kvm_irqchip_send_msi(KVMState
*s
, MSIMessage msg
)
2183 int kvm_irqchip_add_msi_route(KVMRouteChange
*c
, int vector
, PCIDevice
*dev
)
2188 int kvm_irqchip_add_adapter_route(KVMState
*s
, AdapterInfo
*adapter
)
2193 int kvm_irqchip_add_hv_sint_route(KVMState
*s
, uint32_t vcpu
, uint32_t sint
)
2198 static int kvm_irqchip_assign_irqfd(KVMState
*s
, EventNotifier
*event
,
2199 EventNotifier
*resample
, int virq
,
2205 int kvm_irqchip_update_msi_route(KVMState
*s
, int virq
, MSIMessage msg
)
2209 #endif /* !KVM_CAP_IRQ_ROUTING */
2211 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState
*s
, EventNotifier
*n
,
2212 EventNotifier
*rn
, int virq
)
2214 return kvm_irqchip_assign_irqfd(s
, n
, rn
, virq
, true);
2217 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState
*s
, EventNotifier
*n
,
2220 return kvm_irqchip_assign_irqfd(s
, n
, NULL
, virq
, false);
2223 int kvm_irqchip_add_irqfd_notifier(KVMState
*s
, EventNotifier
*n
,
2224 EventNotifier
*rn
, qemu_irq irq
)
2227 gboolean found
= g_hash_table_lookup_extended(s
->gsimap
, irq
, &key
, &gsi
);
2232 return kvm_irqchip_add_irqfd_notifier_gsi(s
, n
, rn
, GPOINTER_TO_INT(gsi
));
2235 int kvm_irqchip_remove_irqfd_notifier(KVMState
*s
, EventNotifier
*n
,
2239 gboolean found
= g_hash_table_lookup_extended(s
->gsimap
, irq
, &key
, &gsi
);
2244 return kvm_irqchip_remove_irqfd_notifier_gsi(s
, n
, GPOINTER_TO_INT(gsi
));
2247 void kvm_irqchip_set_qemuirq_gsi(KVMState
*s
, qemu_irq irq
, int gsi
)
2249 g_hash_table_insert(s
->gsimap
, irq
, GINT_TO_POINTER(gsi
));
2252 static void kvm_irqchip_create(KVMState
*s
)
2256 assert(s
->kernel_irqchip_split
!= ON_OFF_AUTO_AUTO
);
2257 if (kvm_check_extension(s
, KVM_CAP_IRQCHIP
)) {
2259 } else if (kvm_check_extension(s
, KVM_CAP_S390_IRQCHIP
)) {
2260 ret
= kvm_vm_enable_cap(s
, KVM_CAP_S390_IRQCHIP
, 0);
2262 fprintf(stderr
, "Enable kernel irqchip failed: %s\n", strerror(-ret
));
2269 if (kvm_check_extension(s
, KVM_CAP_IRQFD
) <= 0) {
2270 fprintf(stderr
, "kvm: irqfd not implemented\n");
2274 /* First probe and see if there's a arch-specific hook to create the
2275 * in-kernel irqchip for us */
2276 ret
= kvm_arch_irqchip_create(s
);
2278 if (s
->kernel_irqchip_split
== ON_OFF_AUTO_ON
) {
2279 error_report("Split IRQ chip mode not supported.");
2282 ret
= kvm_vm_ioctl(s
, KVM_CREATE_IRQCHIP
);
2286 fprintf(stderr
, "Create kernel irqchip failed: %s\n", strerror(-ret
));
2290 kvm_kernel_irqchip
= true;
2291 /* If we have an in-kernel IRQ chip then we must have asynchronous
2292 * interrupt delivery (though the reverse is not necessarily true)
2294 kvm_async_interrupts_allowed
= true;
2295 kvm_halt_in_kernel_allowed
= true;
2297 kvm_init_irq_routing(s
);
2299 s
->gsimap
= g_hash_table_new(g_direct_hash
, g_direct_equal
);
2302 /* Find number of supported CPUs using the recommended
2303 * procedure from the kernel API documentation to cope with
2304 * older kernels that may be missing capabilities.
2306 static int kvm_recommended_vcpus(KVMState
*s
)
2308 int ret
= kvm_vm_check_extension(s
, KVM_CAP_NR_VCPUS
);
2309 return (ret
) ? ret
: 4;
2312 static int kvm_max_vcpus(KVMState
*s
)
2314 int ret
= kvm_check_extension(s
, KVM_CAP_MAX_VCPUS
);
2315 return (ret
) ? ret
: kvm_recommended_vcpus(s
);
2318 static int kvm_max_vcpu_id(KVMState
*s
)
2320 int ret
= kvm_check_extension(s
, KVM_CAP_MAX_VCPU_ID
);
2321 return (ret
) ? ret
: kvm_max_vcpus(s
);
2324 bool kvm_vcpu_id_is_valid(int vcpu_id
)
2326 KVMState
*s
= KVM_STATE(current_accel());
2327 return vcpu_id
>= 0 && vcpu_id
< kvm_max_vcpu_id(s
);
2330 bool kvm_dirty_ring_enabled(void)
2332 return kvm_state
->kvm_dirty_ring_size
? true : false;
2335 static void query_stats_cb(StatsResultList
**result
, StatsTarget target
,
2336 strList
*names
, strList
*targets
, Error
**errp
);
2337 static void query_stats_schemas_cb(StatsSchemaList
**result
, Error
**errp
);
2339 uint32_t kvm_dirty_ring_size(void)
2341 return kvm_state
->kvm_dirty_ring_size
;
2344 static int kvm_init(MachineState
*ms
)
2346 MachineClass
*mc
= MACHINE_GET_CLASS(ms
);
2347 static const char upgrade_note
[] =
2348 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
2349 "(see http://sourceforge.net/projects/kvm).\n";
2354 { "SMP", ms
->smp
.cpus
},
2355 { "hotpluggable", ms
->smp
.max_cpus
},
2356 { /* end of list */ }
2358 int soft_vcpus_limit
, hard_vcpus_limit
;
2360 const KVMCapabilityInfo
*missing_cap
;
2363 uint64_t dirty_log_manual_caps
;
2365 qemu_mutex_init(&kml_slots_lock
);
2367 s
= KVM_STATE(ms
->accelerator
);
2370 * On systems where the kernel can support different base page
2371 * sizes, host page size may be different from TARGET_PAGE_SIZE,
2372 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
2373 * page size for the system though.
2375 assert(TARGET_PAGE_SIZE
<= qemu_real_host_page_size());
2378 accel_blocker_init();
2380 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
2381 QTAILQ_INIT(&s
->kvm_sw_breakpoints
);
2383 QLIST_INIT(&s
->kvm_parked_vcpus
);
2384 s
->fd
= qemu_open_old(s
->device
?: "/dev/kvm", O_RDWR
);
2386 fprintf(stderr
, "Could not access KVM kernel module: %m\n");
2391 ret
= kvm_ioctl(s
, KVM_GET_API_VERSION
, 0);
2392 if (ret
< KVM_API_VERSION
) {
2396 fprintf(stderr
, "kvm version too old\n");
2400 if (ret
> KVM_API_VERSION
) {
2402 fprintf(stderr
, "kvm version not supported\n");
2406 kvm_supported_memory_attributes
= kvm_check_extension(s
, KVM_CAP_MEMORY_ATTRIBUTES
);
2407 kvm_guest_memfd_supported
=
2408 kvm_check_extension(s
, KVM_CAP_GUEST_MEMFD
) &&
2409 kvm_check_extension(s
, KVM_CAP_USER_MEMORY2
) &&
2410 (kvm_supported_memory_attributes
& KVM_MEMORY_ATTRIBUTE_PRIVATE
);
2412 kvm_immediate_exit
= kvm_check_extension(s
, KVM_CAP_IMMEDIATE_EXIT
);
2413 s
->nr_slots
= kvm_check_extension(s
, KVM_CAP_NR_MEMSLOTS
);
2415 /* If unspecified, use the default value */
2420 s
->nr_as
= kvm_check_extension(s
, KVM_CAP_MULTI_ADDRESS_SPACE
);
2421 if (s
->nr_as
<= 1) {
2424 s
->as
= g_new0(struct KVMAs
, s
->nr_as
);
2426 if (object_property_find(OBJECT(current_machine
), "kvm-type")) {
2427 g_autofree
char *kvm_type
= object_property_get_str(OBJECT(current_machine
),
2430 type
= mc
->kvm_type(ms
, kvm_type
);
2431 } else if (mc
->kvm_type
) {
2432 type
= mc
->kvm_type(ms
, NULL
);
2434 type
= kvm_arch_get_default_type(ms
);
2443 ret
= kvm_ioctl(s
, KVM_CREATE_VM
, type
);
2444 } while (ret
== -EINTR
);
2447 fprintf(stderr
, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret
,
2451 if (ret
== -EINVAL
) {
2453 "Host kernel setup problem detected. Please verify:\n");
2454 fprintf(stderr
, "- for kernels supporting the switch_amode or"
2455 " user_mode parameters, whether\n");
2457 " user space is running in primary address space\n");
2459 "- for kernels supporting the vm.allocate_pgste sysctl, "
2460 "whether it is enabled\n");
2462 #elif defined(TARGET_PPC)
2463 if (ret
== -EINVAL
) {
2465 "PPC KVM module is not loaded. Try modprobe kvm_%s.\n",
2466 (type
== 2) ? "pr" : "hv");
2474 /* check the vcpu limits */
2475 soft_vcpus_limit
= kvm_recommended_vcpus(s
);
2476 hard_vcpus_limit
= kvm_max_vcpus(s
);
2479 if (nc
->num
> soft_vcpus_limit
) {
2480 warn_report("Number of %s cpus requested (%d) exceeds "
2481 "the recommended cpus supported by KVM (%d)",
2482 nc
->name
, nc
->num
, soft_vcpus_limit
);
2484 if (nc
->num
> hard_vcpus_limit
) {
2485 fprintf(stderr
, "Number of %s cpus requested (%d) exceeds "
2486 "the maximum cpus supported by KVM (%d)\n",
2487 nc
->name
, nc
->num
, hard_vcpus_limit
);
2494 missing_cap
= kvm_check_extension_list(s
, kvm_required_capabilites
);
2497 kvm_check_extension_list(s
, kvm_arch_required_capabilities
);
2501 fprintf(stderr
, "kvm does not support %s\n%s",
2502 missing_cap
->name
, upgrade_note
);
2506 s
->coalesced_mmio
= kvm_check_extension(s
, KVM_CAP_COALESCED_MMIO
);
2507 s
->coalesced_pio
= s
->coalesced_mmio
&&
2508 kvm_check_extension(s
, KVM_CAP_COALESCED_PIO
);
2511 * Enable KVM dirty ring if supported, otherwise fall back to
2512 * dirty logging mode
2514 ret
= kvm_dirty_ring_init(s
);
2520 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is
2521 * enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no
2522 * page is wr-protected initially, which is against how kvm dirty ring is
2523 * usage - kvm dirty ring requires all pages are wr-protected at the very
2524 * beginning. Enabling this feature for dirty ring causes data corruption.
2526 * TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log,
2527 * we may expect a higher stall time when starting the migration. In the
2528 * future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too:
2529 * instead of clearing dirty bit, it can be a way to explicitly wr-protect
2532 if (!s
->kvm_dirty_ring_size
) {
2533 dirty_log_manual_caps
=
2534 kvm_check_extension(s
, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
);
2535 dirty_log_manual_caps
&= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE
|
2536 KVM_DIRTY_LOG_INITIALLY_SET
);
2537 s
->manual_dirty_log_protect
= dirty_log_manual_caps
;
2538 if (dirty_log_manual_caps
) {
2539 ret
= kvm_vm_enable_cap(s
, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
, 0,
2540 dirty_log_manual_caps
);
2542 warn_report("Trying to enable capability %"PRIu64
" of "
2543 "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. "
2544 "Falling back to the legacy mode. ",
2545 dirty_log_manual_caps
);
2546 s
->manual_dirty_log_protect
= 0;
2551 #ifdef KVM_CAP_VCPU_EVENTS
2552 s
->vcpu_events
= kvm_check_extension(s
, KVM_CAP_VCPU_EVENTS
);
2554 s
->max_nested_state_len
= kvm_check_extension(s
, KVM_CAP_NESTED_STATE
);
2556 s
->irq_set_ioctl
= KVM_IRQ_LINE
;
2557 if (kvm_check_extension(s
, KVM_CAP_IRQ_INJECT_STATUS
)) {
2558 s
->irq_set_ioctl
= KVM_IRQ_LINE_STATUS
;
2561 kvm_readonly_mem_allowed
=
2562 (kvm_check_extension(s
, KVM_CAP_READONLY_MEM
) > 0);
2564 kvm_resamplefds_allowed
=
2565 (kvm_check_extension(s
, KVM_CAP_IRQFD_RESAMPLE
) > 0);
2567 kvm_vm_attributes_allowed
=
2568 (kvm_check_extension(s
, KVM_CAP_VM_ATTRIBUTES
) > 0);
2570 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
2571 kvm_has_guest_debug
=
2572 (kvm_check_extension(s
, KVM_CAP_SET_GUEST_DEBUG
) > 0);
2575 kvm_sstep_flags
= 0;
2576 if (kvm_has_guest_debug
) {
2577 kvm_sstep_flags
= SSTEP_ENABLE
;
2579 #if defined TARGET_KVM_HAVE_GUEST_DEBUG
2580 int guest_debug_flags
=
2581 kvm_check_extension(s
, KVM_CAP_SET_GUEST_DEBUG2
);
2583 if (guest_debug_flags
& KVM_GUESTDBG_BLOCKIRQ
) {
2584 kvm_sstep_flags
|= SSTEP_NOIRQ
;
2591 ret
= kvm_arch_init(ms
, s
);
2596 if (s
->kernel_irqchip_split
== ON_OFF_AUTO_AUTO
) {
2597 s
->kernel_irqchip_split
= mc
->default_kernel_irqchip_split
? ON_OFF_AUTO_ON
: ON_OFF_AUTO_OFF
;
2600 qemu_register_reset(kvm_unpoison_all
, NULL
);
2602 if (s
->kernel_irqchip_allowed
) {
2603 kvm_irqchip_create(s
);
2606 s
->memory_listener
.listener
.eventfd_add
= kvm_mem_ioeventfd_add
;
2607 s
->memory_listener
.listener
.eventfd_del
= kvm_mem_ioeventfd_del
;
2608 s
->memory_listener
.listener
.coalesced_io_add
= kvm_coalesce_mmio_region
;
2609 s
->memory_listener
.listener
.coalesced_io_del
= kvm_uncoalesce_mmio_region
;
2611 kvm_memory_listener_register(s
, &s
->memory_listener
,
2612 &address_space_memory
, 0, "kvm-memory");
2613 memory_listener_register(&kvm_io_listener
,
2616 s
->sync_mmu
= !!kvm_vm_check_extension(kvm_state
, KVM_CAP_SYNC_MMU
);
2618 ret
= ram_block_discard_disable(true);
2622 if (s
->kvm_dirty_ring_size
) {
2623 kvm_dirty_ring_reaper_init(s
);
2626 if (kvm_check_extension(kvm_state
, KVM_CAP_BINARY_STATS_FD
)) {
2627 add_stats_callbacks(STATS_PROVIDER_KVM
, query_stats_cb
,
2628 query_stats_schemas_cb
);
2642 g_free(s
->memory_listener
.slots
);
2647 void kvm_set_sigmask_len(KVMState
*s
, unsigned int sigmask_len
)
2649 s
->sigmask_len
= sigmask_len
;
2652 static void kvm_handle_io(uint16_t port
, MemTxAttrs attrs
, void *data
, int direction
,
2653 int size
, uint32_t count
)
2656 uint8_t *ptr
= data
;
2658 for (i
= 0; i
< count
; i
++) {
2659 address_space_rw(&address_space_io
, port
, attrs
,
2661 direction
== KVM_EXIT_IO_OUT
);
2666 static int kvm_handle_internal_error(CPUState
*cpu
, struct kvm_run
*run
)
2670 fprintf(stderr
, "KVM internal error. Suberror: %d\n",
2671 run
->internal
.suberror
);
2673 for (i
= 0; i
< run
->internal
.ndata
; ++i
) {
2674 fprintf(stderr
, "extra data[%d]: 0x%016"PRIx64
"\n",
2675 i
, (uint64_t)run
->internal
.data
[i
]);
2677 if (run
->internal
.suberror
== KVM_INTERNAL_ERROR_EMULATION
) {
2678 fprintf(stderr
, "emulation failure\n");
2679 if (!kvm_arch_stop_on_emulation_error(cpu
)) {
2680 cpu_dump_state(cpu
, stderr
, CPU_DUMP_CODE
);
2681 return EXCP_INTERRUPT
;
2684 /* FIXME: Should trigger a qmp message to let management know
2685 * something went wrong.
2690 void kvm_flush_coalesced_mmio_buffer(void)
2692 KVMState
*s
= kvm_state
;
2694 if (!s
|| s
->coalesced_flush_in_progress
) {
2698 s
->coalesced_flush_in_progress
= true;
2700 if (s
->coalesced_mmio_ring
) {
2701 struct kvm_coalesced_mmio_ring
*ring
= s
->coalesced_mmio_ring
;
2702 while (ring
->first
!= ring
->last
) {
2703 struct kvm_coalesced_mmio
*ent
;
2705 ent
= &ring
->coalesced_mmio
[ring
->first
];
2707 if (ent
->pio
== 1) {
2708 address_space_write(&address_space_io
, ent
->phys_addr
,
2709 MEMTXATTRS_UNSPECIFIED
, ent
->data
,
2712 cpu_physical_memory_write(ent
->phys_addr
, ent
->data
, ent
->len
);
2715 ring
->first
= (ring
->first
+ 1) % KVM_COALESCED_MMIO_MAX
;
2719 s
->coalesced_flush_in_progress
= false;
2722 static void do_kvm_cpu_synchronize_state(CPUState
*cpu
, run_on_cpu_data arg
)
2724 if (!cpu
->vcpu_dirty
&& !kvm_state
->guest_state_protected
) {
2725 int ret
= kvm_arch_get_registers(cpu
);
2727 error_report("Failed to get registers: %s", strerror(-ret
));
2728 cpu_dump_state(cpu
, stderr
, CPU_DUMP_CODE
);
2729 vm_stop(RUN_STATE_INTERNAL_ERROR
);
2732 cpu
->vcpu_dirty
= true;
2736 void kvm_cpu_synchronize_state(CPUState
*cpu
)
2738 if (!cpu
->vcpu_dirty
&& !kvm_state
->guest_state_protected
) {
2739 run_on_cpu(cpu
, do_kvm_cpu_synchronize_state
, RUN_ON_CPU_NULL
);
2743 static void do_kvm_cpu_synchronize_post_reset(CPUState
*cpu
, run_on_cpu_data arg
)
2745 int ret
= kvm_arch_put_registers(cpu
, KVM_PUT_RESET_STATE
);
2747 error_report("Failed to put registers after reset: %s", strerror(-ret
));
2748 cpu_dump_state(cpu
, stderr
, CPU_DUMP_CODE
);
2749 vm_stop(RUN_STATE_INTERNAL_ERROR
);
2752 cpu
->vcpu_dirty
= false;
2755 void kvm_cpu_synchronize_post_reset(CPUState
*cpu
)
2757 run_on_cpu(cpu
, do_kvm_cpu_synchronize_post_reset
, RUN_ON_CPU_NULL
);
2760 static void do_kvm_cpu_synchronize_post_init(CPUState
*cpu
, run_on_cpu_data arg
)
2762 int ret
= kvm_arch_put_registers(cpu
, KVM_PUT_FULL_STATE
);
2764 error_report("Failed to put registers after init: %s", strerror(-ret
));
2768 cpu
->vcpu_dirty
= false;
2771 void kvm_cpu_synchronize_post_init(CPUState
*cpu
)
2773 if (!kvm_state
->guest_state_protected
) {
2775 * This runs before the machine_init_done notifiers, and is the last
2776 * opportunity to synchronize the state of confidential guests.
2778 run_on_cpu(cpu
, do_kvm_cpu_synchronize_post_init
, RUN_ON_CPU_NULL
);
2782 static void do_kvm_cpu_synchronize_pre_loadvm(CPUState
*cpu
, run_on_cpu_data arg
)
2784 cpu
->vcpu_dirty
= true;
2787 void kvm_cpu_synchronize_pre_loadvm(CPUState
*cpu
)
2789 run_on_cpu(cpu
, do_kvm_cpu_synchronize_pre_loadvm
, RUN_ON_CPU_NULL
);
2792 #ifdef KVM_HAVE_MCE_INJECTION
2793 static __thread
void *pending_sigbus_addr
;
2794 static __thread
int pending_sigbus_code
;
2795 static __thread
bool have_sigbus_pending
;
2798 static void kvm_cpu_kick(CPUState
*cpu
)
2800 qatomic_set(&cpu
->kvm_run
->immediate_exit
, 1);
2803 static void kvm_cpu_kick_self(void)
2805 if (kvm_immediate_exit
) {
2806 kvm_cpu_kick(current_cpu
);
2808 qemu_cpu_kick_self();
2812 static void kvm_eat_signals(CPUState
*cpu
)
2814 struct timespec ts
= { 0, 0 };
2820 if (kvm_immediate_exit
) {
2821 qatomic_set(&cpu
->kvm_run
->immediate_exit
, 0);
2822 /* Write kvm_run->immediate_exit before the cpu->exit_request
2823 * write in kvm_cpu_exec.
2829 sigemptyset(&waitset
);
2830 sigaddset(&waitset
, SIG_IPI
);
2833 r
= sigtimedwait(&waitset
, &siginfo
, &ts
);
2834 if (r
== -1 && !(errno
== EAGAIN
|| errno
== EINTR
)) {
2835 perror("sigtimedwait");
2839 r
= sigpending(&chkset
);
2841 perror("sigpending");
2844 } while (sigismember(&chkset
, SIG_IPI
));
2847 int kvm_convert_memory(hwaddr start
, hwaddr size
, bool to_private
)
2849 MemoryRegionSection section
;
2856 trace_kvm_convert_memory(start
, size
, to_private
? "shared_to_private" : "private_to_shared");
2858 if (!QEMU_PTR_IS_ALIGNED(start
, qemu_real_host_page_size()) ||
2859 !QEMU_PTR_IS_ALIGNED(size
, qemu_real_host_page_size())) {
2867 section
= memory_region_find(get_system_memory(), start
, size
);
2871 * Ignore converting non-assigned region to shared.
2873 * TDX requires vMMIO region to be shared to inject #VE to guest.
2874 * OVMF issues conservatively MapGPA(shared) on 32bit PCI MMIO region,
2875 * and vIO-APIC 0xFEC00000 4K page.
2876 * OVMF assigns 32bit PCI MMIO region to
2877 * [top of low memory: typically 2GB=0xC000000, 0xFC00000)
2885 if (!memory_region_has_guest_memfd(mr
)) {
2887 * Because vMMIO region must be shared, guest TD may convert vMMIO
2888 * region to shared explicitly. Don't complain such case. See
2889 * memory_region_type() for checking if the region is MMIO region.
2892 !memory_region_is_ram(mr
) &&
2893 !memory_region_is_ram_device(mr
) &&
2894 !memory_region_is_rom(mr
) &&
2895 !memory_region_is_romd(mr
)) {
2898 error_report("Convert non guest_memfd backed memory region "
2899 "(0x%"HWADDR_PRIx
" ,+ 0x%"HWADDR_PRIx
") to %s",
2900 start
, size
, to_private
? "private" : "shared");
2906 ret
= kvm_set_memory_attributes_private(start
, size
);
2908 ret
= kvm_set_memory_attributes_shared(start
, size
);
2914 addr
= memory_region_get_ram_ptr(mr
) + section
.offset_within_region
;
2915 rb
= qemu_ram_block_from_host(addr
, false, &offset
);
2918 if (rb
->page_size
!= qemu_real_host_page_size()) {
2920 * shared memory is backed by hugetlb, which is supposed to be
2921 * pre-allocated and doesn't need to be discarded
2925 ret
= ram_block_discard_range(rb
, offset
, size
);
2927 ret
= ram_block_discard_guest_memfd_range(rb
, offset
, size
);
2931 memory_region_unref(mr
);
2935 int kvm_cpu_exec(CPUState
*cpu
)
2937 struct kvm_run
*run
= cpu
->kvm_run
;
2940 trace_kvm_cpu_exec();
2942 if (kvm_arch_process_async_events(cpu
)) {
2943 qatomic_set(&cpu
->exit_request
, 0);
2948 cpu_exec_start(cpu
);
2953 if (cpu
->vcpu_dirty
) {
2954 ret
= kvm_arch_put_registers(cpu
, KVM_PUT_RUNTIME_STATE
);
2956 error_report("Failed to put registers after init: %s",
2962 cpu
->vcpu_dirty
= false;
2965 kvm_arch_pre_run(cpu
, run
);
2966 if (qatomic_read(&cpu
->exit_request
)) {
2967 trace_kvm_interrupt_exit_request();
2969 * KVM requires us to reenter the kernel after IO exits to complete
2970 * instruction emulation. This self-signal will ensure that we
2973 kvm_cpu_kick_self();
2976 /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
2977 * Matching barrier in kvm_eat_signals.
2981 run_ret
= kvm_vcpu_ioctl(cpu
, KVM_RUN
, 0);
2983 attrs
= kvm_arch_post_run(cpu
, run
);
2985 #ifdef KVM_HAVE_MCE_INJECTION
2986 if (unlikely(have_sigbus_pending
)) {
2988 kvm_arch_on_sigbus_vcpu(cpu
, pending_sigbus_code
,
2989 pending_sigbus_addr
);
2990 have_sigbus_pending
= false;
2996 if (run_ret
== -EINTR
|| run_ret
== -EAGAIN
) {
2997 trace_kvm_io_window_exit();
2998 kvm_eat_signals(cpu
);
2999 ret
= EXCP_INTERRUPT
;
3002 if (!(run_ret
== -EFAULT
&& run
->exit_reason
== KVM_EXIT_MEMORY_FAULT
)) {
3003 fprintf(stderr
, "error: kvm run failed %s\n",
3004 strerror(-run_ret
));
3006 if (run_ret
== -EBUSY
) {
3008 "This is probably because your SMT is enabled.\n"
3009 "VCPU can only run on primary threads with all "
3010 "secondary threads offline.\n");
3018 trace_kvm_run_exit(cpu
->cpu_index
, run
->exit_reason
);
3019 switch (run
->exit_reason
) {
3021 /* Called outside BQL */
3022 kvm_handle_io(run
->io
.port
, attrs
,
3023 (uint8_t *)run
+ run
->io
.data_offset
,
3030 /* Called outside BQL */
3031 address_space_rw(&address_space_memory
,
3032 run
->mmio
.phys_addr
, attrs
,
3035 run
->mmio
.is_write
);
3038 case KVM_EXIT_IRQ_WINDOW_OPEN
:
3039 ret
= EXCP_INTERRUPT
;
3041 case KVM_EXIT_SHUTDOWN
:
3042 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
3043 ret
= EXCP_INTERRUPT
;
3045 case KVM_EXIT_UNKNOWN
:
3046 fprintf(stderr
, "KVM: unknown exit, hardware reason %" PRIx64
"\n",
3047 (uint64_t)run
->hw
.hardware_exit_reason
);
3050 case KVM_EXIT_INTERNAL_ERROR
:
3051 ret
= kvm_handle_internal_error(cpu
, run
);
3053 case KVM_EXIT_DIRTY_RING_FULL
:
3055 * We shouldn't continue if the dirty ring of this vcpu is
3056 * still full. Got kicked by KVM_RESET_DIRTY_RINGS.
3058 trace_kvm_dirty_ring_full(cpu
->cpu_index
);
3061 * We throttle vCPU by making it sleep once it exit from kernel
3062 * due to dirty ring full. In the dirtylimit scenario, reaping
3063 * all vCPUs after a single vCPU dirty ring get full result in
3064 * the miss of sleep, so just reap the ring-fulled vCPU.
3066 if (dirtylimit_in_service()) {
3067 kvm_dirty_ring_reap(kvm_state
, cpu
);
3069 kvm_dirty_ring_reap(kvm_state
, NULL
);
3072 dirtylimit_vcpu_execute(cpu
);
3075 case KVM_EXIT_SYSTEM_EVENT
:
3076 trace_kvm_run_exit_system_event(cpu
->cpu_index
, run
->system_event
.type
);
3077 switch (run
->system_event
.type
) {
3078 case KVM_SYSTEM_EVENT_SHUTDOWN
:
3079 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN
);
3080 ret
= EXCP_INTERRUPT
;
3082 case KVM_SYSTEM_EVENT_RESET
:
3083 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
3084 ret
= EXCP_INTERRUPT
;
3086 case KVM_SYSTEM_EVENT_CRASH
:
3087 kvm_cpu_synchronize_state(cpu
);
3089 qemu_system_guest_panicked(cpu_get_crash_info(cpu
));
3094 ret
= kvm_arch_handle_exit(cpu
, run
);
3098 case KVM_EXIT_MEMORY_FAULT
:
3099 trace_kvm_memory_fault(run
->memory_fault
.gpa
,
3100 run
->memory_fault
.size
,
3101 run
->memory_fault
.flags
);
3102 if (run
->memory_fault
.flags
& ~KVM_MEMORY_EXIT_FLAG_PRIVATE
) {
3103 error_report("KVM_EXIT_MEMORY_FAULT: Unknown flag 0x%" PRIx64
,
3104 (uint64_t)run
->memory_fault
.flags
);
3108 ret
= kvm_convert_memory(run
->memory_fault
.gpa
, run
->memory_fault
.size
,
3109 run
->memory_fault
.flags
& KVM_MEMORY_EXIT_FLAG_PRIVATE
);
3112 ret
= kvm_arch_handle_exit(cpu
, run
);
3121 cpu_dump_state(cpu
, stderr
, CPU_DUMP_CODE
);
3122 vm_stop(RUN_STATE_INTERNAL_ERROR
);
3125 qatomic_set(&cpu
->exit_request
, 0);
3129 int kvm_ioctl(KVMState
*s
, int type
, ...)
3136 arg
= va_arg(ap
, void *);
3139 trace_kvm_ioctl(type
, arg
);
3140 ret
= ioctl(s
->fd
, type
, arg
);
3147 int kvm_vm_ioctl(KVMState
*s
, int type
, ...)
3154 arg
= va_arg(ap
, void *);
3157 trace_kvm_vm_ioctl(type
, arg
);
3158 accel_ioctl_begin();
3159 ret
= ioctl(s
->vmfd
, type
, arg
);
3167 int kvm_vcpu_ioctl(CPUState
*cpu
, int type
, ...)
3174 arg
= va_arg(ap
, void *);
3177 trace_kvm_vcpu_ioctl(cpu
->cpu_index
, type
, arg
);
3178 accel_cpu_ioctl_begin(cpu
);
3179 ret
= ioctl(cpu
->kvm_fd
, type
, arg
);
3180 accel_cpu_ioctl_end(cpu
);
3187 int kvm_device_ioctl(int fd
, int type
, ...)
3194 arg
= va_arg(ap
, void *);
3197 trace_kvm_device_ioctl(fd
, type
, arg
);
3198 accel_ioctl_begin();
3199 ret
= ioctl(fd
, type
, arg
);
3207 int kvm_vm_check_attr(KVMState
*s
, uint32_t group
, uint64_t attr
)
3210 struct kvm_device_attr attribute
= {
3215 if (!kvm_vm_attributes_allowed
) {
3219 ret
= kvm_vm_ioctl(s
, KVM_HAS_DEVICE_ATTR
, &attribute
);
3220 /* kvm returns 0 on success for HAS_DEVICE_ATTR */
3224 int kvm_device_check_attr(int dev_fd
, uint32_t group
, uint64_t attr
)
3226 struct kvm_device_attr attribute
= {
3232 return kvm_device_ioctl(dev_fd
, KVM_HAS_DEVICE_ATTR
, &attribute
) ? 0 : 1;
3235 int kvm_device_access(int fd
, int group
, uint64_t attr
,
3236 void *val
, bool write
, Error
**errp
)
3238 struct kvm_device_attr kvmattr
;
3242 kvmattr
.group
= group
;
3243 kvmattr
.attr
= attr
;
3244 kvmattr
.addr
= (uintptr_t)val
;
3246 err
= kvm_device_ioctl(fd
,
3247 write
? KVM_SET_DEVICE_ATTR
: KVM_GET_DEVICE_ATTR
,
3250 error_setg_errno(errp
, -err
,
3251 "KVM_%s_DEVICE_ATTR failed: Group %d "
3252 "attr 0x%016" PRIx64
,
3253 write
? "SET" : "GET", group
, attr
);
3258 bool kvm_has_sync_mmu(void)
3260 return kvm_state
->sync_mmu
;
3263 int kvm_has_vcpu_events(void)
3265 return kvm_state
->vcpu_events
;
3268 int kvm_max_nested_state_length(void)
3270 return kvm_state
->max_nested_state_len
;
3273 int kvm_has_gsi_routing(void)
3275 #ifdef KVM_CAP_IRQ_ROUTING
3276 return kvm_check_extension(kvm_state
, KVM_CAP_IRQ_ROUTING
);
3282 bool kvm_arm_supports_user_irq(void)
3284 return kvm_check_extension(kvm_state
, KVM_CAP_ARM_USER_IRQ
);
3287 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
3288 struct kvm_sw_breakpoint
*kvm_find_sw_breakpoint(CPUState
*cpu
, vaddr pc
)
3290 struct kvm_sw_breakpoint
*bp
;
3292 QTAILQ_FOREACH(bp
, &cpu
->kvm_state
->kvm_sw_breakpoints
, entry
) {
3300 int kvm_sw_breakpoints_active(CPUState
*cpu
)
3302 return !QTAILQ_EMPTY(&cpu
->kvm_state
->kvm_sw_breakpoints
);
3305 struct kvm_set_guest_debug_data
{
3306 struct kvm_guest_debug dbg
;
3310 static void kvm_invoke_set_guest_debug(CPUState
*cpu
, run_on_cpu_data data
)
3312 struct kvm_set_guest_debug_data
*dbg_data
=
3313 (struct kvm_set_guest_debug_data
*) data
.host_ptr
;
3315 dbg_data
->err
= kvm_vcpu_ioctl(cpu
, KVM_SET_GUEST_DEBUG
,
3319 int kvm_update_guest_debug(CPUState
*cpu
, unsigned long reinject_trap
)
3321 struct kvm_set_guest_debug_data data
;
3323 data
.dbg
.control
= reinject_trap
;
3325 if (cpu
->singlestep_enabled
) {
3326 data
.dbg
.control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_SINGLESTEP
;
3328 if (cpu
->singlestep_enabled
& SSTEP_NOIRQ
) {
3329 data
.dbg
.control
|= KVM_GUESTDBG_BLOCKIRQ
;
3332 kvm_arch_update_guest_debug(cpu
, &data
.dbg
);
3334 run_on_cpu(cpu
, kvm_invoke_set_guest_debug
,
3335 RUN_ON_CPU_HOST_PTR(&data
));
3339 bool kvm_supports_guest_debug(void)
3341 /* probed during kvm_init() */
3342 return kvm_has_guest_debug
;
3345 int kvm_insert_breakpoint(CPUState
*cpu
, int type
, vaddr addr
, vaddr len
)
3347 struct kvm_sw_breakpoint
*bp
;
3350 if (type
== GDB_BREAKPOINT_SW
) {
3351 bp
= kvm_find_sw_breakpoint(cpu
, addr
);
3357 bp
= g_new(struct kvm_sw_breakpoint
, 1);
3360 err
= kvm_arch_insert_sw_breakpoint(cpu
, bp
);
3366 QTAILQ_INSERT_HEAD(&cpu
->kvm_state
->kvm_sw_breakpoints
, bp
, entry
);
3368 err
= kvm_arch_insert_hw_breakpoint(addr
, len
, type
);
3375 err
= kvm_update_guest_debug(cpu
, 0);
3383 int kvm_remove_breakpoint(CPUState
*cpu
, int type
, vaddr addr
, vaddr len
)
3385 struct kvm_sw_breakpoint
*bp
;
3388 if (type
== GDB_BREAKPOINT_SW
) {
3389 bp
= kvm_find_sw_breakpoint(cpu
, addr
);
3394 if (bp
->use_count
> 1) {
3399 err
= kvm_arch_remove_sw_breakpoint(cpu
, bp
);
3404 QTAILQ_REMOVE(&cpu
->kvm_state
->kvm_sw_breakpoints
, bp
, entry
);
3407 err
= kvm_arch_remove_hw_breakpoint(addr
, len
, type
);
3414 err
= kvm_update_guest_debug(cpu
, 0);
3422 void kvm_remove_all_breakpoints(CPUState
*cpu
)
3424 struct kvm_sw_breakpoint
*bp
, *next
;
3425 KVMState
*s
= cpu
->kvm_state
;
3428 QTAILQ_FOREACH_SAFE(bp
, &s
->kvm_sw_breakpoints
, entry
, next
) {
3429 if (kvm_arch_remove_sw_breakpoint(cpu
, bp
) != 0) {
3430 /* Try harder to find a CPU that currently sees the breakpoint. */
3431 CPU_FOREACH(tmpcpu
) {
3432 if (kvm_arch_remove_sw_breakpoint(tmpcpu
, bp
) == 0) {
3437 QTAILQ_REMOVE(&s
->kvm_sw_breakpoints
, bp
, entry
);
3440 kvm_arch_remove_all_hw_breakpoints();
3443 kvm_update_guest_debug(cpu
, 0);
3447 #endif /* !TARGET_KVM_HAVE_GUEST_DEBUG */
3449 static int kvm_set_signal_mask(CPUState
*cpu
, const sigset_t
*sigset
)
3451 KVMState
*s
= kvm_state
;
3452 struct kvm_signal_mask
*sigmask
;
3455 sigmask
= g_malloc(sizeof(*sigmask
) + sizeof(*sigset
));
3457 sigmask
->len
= s
->sigmask_len
;
3458 memcpy(sigmask
->sigset
, sigset
, sizeof(*sigset
));
3459 r
= kvm_vcpu_ioctl(cpu
, KVM_SET_SIGNAL_MASK
, sigmask
);
3465 static void kvm_ipi_signal(int sig
)
3468 assert(kvm_immediate_exit
);
3469 kvm_cpu_kick(current_cpu
);
3473 void kvm_init_cpu_signals(CPUState
*cpu
)
3477 struct sigaction sigact
;
3479 memset(&sigact
, 0, sizeof(sigact
));
3480 sigact
.sa_handler
= kvm_ipi_signal
;
3481 sigaction(SIG_IPI
, &sigact
, NULL
);
3483 pthread_sigmask(SIG_BLOCK
, NULL
, &set
);
3484 #if defined KVM_HAVE_MCE_INJECTION
3485 sigdelset(&set
, SIGBUS
);
3486 pthread_sigmask(SIG_SETMASK
, &set
, NULL
);
3488 sigdelset(&set
, SIG_IPI
);
3489 if (kvm_immediate_exit
) {
3490 r
= pthread_sigmask(SIG_SETMASK
, &set
, NULL
);
3492 r
= kvm_set_signal_mask(cpu
, &set
);
3495 fprintf(stderr
, "kvm_set_signal_mask: %s\n", strerror(-r
));
3500 /* Called asynchronously in VCPU thread. */
3501 int kvm_on_sigbus_vcpu(CPUState
*cpu
, int code
, void *addr
)
3503 #ifdef KVM_HAVE_MCE_INJECTION
3504 if (have_sigbus_pending
) {
3507 have_sigbus_pending
= true;
3508 pending_sigbus_addr
= addr
;
3509 pending_sigbus_code
= code
;
3510 qatomic_set(&cpu
->exit_request
, 1);
3517 /* Called synchronously (via signalfd) in main thread. */
3518 int kvm_on_sigbus(int code
, void *addr
)
3520 #ifdef KVM_HAVE_MCE_INJECTION
3521 /* Action required MCE kills the process if SIGBUS is blocked. Because
3522 * that's what happens in the I/O thread, where we handle MCE via signalfd,
3523 * we can only get action optional here.
3525 assert(code
!= BUS_MCEERR_AR
);
3526 kvm_arch_on_sigbus_vcpu(first_cpu
, code
, addr
);
3533 int kvm_create_device(KVMState
*s
, uint64_t type
, bool test
)
3536 struct kvm_create_device create_dev
;
3538 create_dev
.type
= type
;
3540 create_dev
.flags
= test
? KVM_CREATE_DEVICE_TEST
: 0;
3542 if (!kvm_check_extension(s
, KVM_CAP_DEVICE_CTRL
)) {
3546 ret
= kvm_vm_ioctl(s
, KVM_CREATE_DEVICE
, &create_dev
);
3551 return test
? 0 : create_dev
.fd
;
3554 bool kvm_device_supported(int vmfd
, uint64_t type
)
3556 struct kvm_create_device create_dev
= {
3559 .flags
= KVM_CREATE_DEVICE_TEST
,
3562 if (ioctl(vmfd
, KVM_CHECK_EXTENSION
, KVM_CAP_DEVICE_CTRL
) <= 0) {
3566 return (ioctl(vmfd
, KVM_CREATE_DEVICE
, &create_dev
) >= 0);
3569 int kvm_set_one_reg(CPUState
*cs
, uint64_t id
, void *source
)
3571 struct kvm_one_reg reg
;
3575 reg
.addr
= (uintptr_t) source
;
3576 r
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
3578 trace_kvm_failed_reg_set(id
, strerror(-r
));
3583 int kvm_get_one_reg(CPUState
*cs
, uint64_t id
, void *target
)
3585 struct kvm_one_reg reg
;
3589 reg
.addr
= (uintptr_t) target
;
3590 r
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
3592 trace_kvm_failed_reg_get(id
, strerror(-r
));
3597 static bool kvm_accel_has_memory(MachineState
*ms
, AddressSpace
*as
,
3598 hwaddr start_addr
, hwaddr size
)
3600 KVMState
*kvm
= KVM_STATE(ms
->accelerator
);
3603 for (i
= 0; i
< kvm
->nr_as
; ++i
) {
3604 if (kvm
->as
[i
].as
== as
&& kvm
->as
[i
].ml
) {
3605 size
= MIN(kvm_max_slot_size
, size
);
3606 return NULL
!= kvm_lookup_matching_slot(kvm
->as
[i
].ml
,
3614 static void kvm_get_kvm_shadow_mem(Object
*obj
, Visitor
*v
,
3615 const char *name
, void *opaque
,
3618 KVMState
*s
= KVM_STATE(obj
);
3619 int64_t value
= s
->kvm_shadow_mem
;
3621 visit_type_int(v
, name
, &value
, errp
);
3624 static void kvm_set_kvm_shadow_mem(Object
*obj
, Visitor
*v
,
3625 const char *name
, void *opaque
,
3628 KVMState
*s
= KVM_STATE(obj
);
3632 error_setg(errp
, "Cannot set properties after the accelerator has been initialized");
3636 if (!visit_type_int(v
, name
, &value
, errp
)) {
3640 s
->kvm_shadow_mem
= value
;
3643 static void kvm_set_kernel_irqchip(Object
*obj
, Visitor
*v
,
3644 const char *name
, void *opaque
,
3647 KVMState
*s
= KVM_STATE(obj
);
3651 error_setg(errp
, "Cannot set properties after the accelerator has been initialized");
3655 if (!visit_type_OnOffSplit(v
, name
, &mode
, errp
)) {
3659 case ON_OFF_SPLIT_ON
:
3660 s
->kernel_irqchip_allowed
= true;
3661 s
->kernel_irqchip_required
= true;
3662 s
->kernel_irqchip_split
= ON_OFF_AUTO_OFF
;
3664 case ON_OFF_SPLIT_OFF
:
3665 s
->kernel_irqchip_allowed
= false;
3666 s
->kernel_irqchip_required
= false;
3667 s
->kernel_irqchip_split
= ON_OFF_AUTO_OFF
;
3669 case ON_OFF_SPLIT_SPLIT
:
3670 s
->kernel_irqchip_allowed
= true;
3671 s
->kernel_irqchip_required
= true;
3672 s
->kernel_irqchip_split
= ON_OFF_AUTO_ON
;
3675 /* The value was checked in visit_type_OnOffSplit() above. If
3676 * we get here, then something is wrong in QEMU.
3682 bool kvm_kernel_irqchip_allowed(void)
3684 return kvm_state
->kernel_irqchip_allowed
;
3687 bool kvm_kernel_irqchip_required(void)
3689 return kvm_state
->kernel_irqchip_required
;
3692 bool kvm_kernel_irqchip_split(void)
3694 return kvm_state
->kernel_irqchip_split
== ON_OFF_AUTO_ON
;
3697 static void kvm_get_dirty_ring_size(Object
*obj
, Visitor
*v
,
3698 const char *name
, void *opaque
,
3701 KVMState
*s
= KVM_STATE(obj
);
3702 uint32_t value
= s
->kvm_dirty_ring_size
;
3704 visit_type_uint32(v
, name
, &value
, errp
);
3707 static void kvm_set_dirty_ring_size(Object
*obj
, Visitor
*v
,
3708 const char *name
, void *opaque
,
3711 KVMState
*s
= KVM_STATE(obj
);
3715 error_setg(errp
, "Cannot set properties after the accelerator has been initialized");
3719 if (!visit_type_uint32(v
, name
, &value
, errp
)) {
3722 if (value
& (value
- 1)) {
3723 error_setg(errp
, "dirty-ring-size must be a power of two.");
3727 s
->kvm_dirty_ring_size
= value
;
3730 static char *kvm_get_device(Object
*obj
,
3731 Error
**errp G_GNUC_UNUSED
)
3733 KVMState
*s
= KVM_STATE(obj
);
3735 return g_strdup(s
->device
);
3738 static void kvm_set_device(Object
*obj
,
3740 Error
**errp G_GNUC_UNUSED
)
3742 KVMState
*s
= KVM_STATE(obj
);
3745 s
->device
= g_strdup(value
);
3748 static void kvm_accel_instance_init(Object
*obj
)
3750 KVMState
*s
= KVM_STATE(obj
);
3754 s
->kvm_shadow_mem
= -1;
3755 s
->kernel_irqchip_allowed
= true;
3756 s
->kernel_irqchip_split
= ON_OFF_AUTO_AUTO
;
3757 /* KVM dirty ring is by default off */
3758 s
->kvm_dirty_ring_size
= 0;
3759 s
->kvm_dirty_ring_with_bitmap
= false;
3760 s
->kvm_eager_split_size
= 0;
3761 s
->notify_vmexit
= NOTIFY_VMEXIT_OPTION_RUN
;
3762 s
->notify_window
= 0;
3764 s
->xen_gnttab_max_frames
= 64;
3765 s
->xen_evtchn_max_pirq
= 256;
3770 * kvm_gdbstub_sstep_flags():
3772 * Returns: SSTEP_* flags that KVM supports for guest debug. The
3773 * support is probed during kvm_init()
3775 static int kvm_gdbstub_sstep_flags(void)
3777 return kvm_sstep_flags
;
3780 static void kvm_accel_class_init(ObjectClass
*oc
, void *data
)
3782 AccelClass
*ac
= ACCEL_CLASS(oc
);
3784 ac
->init_machine
= kvm_init
;
3785 ac
->has_memory
= kvm_accel_has_memory
;
3786 ac
->allowed
= &kvm_allowed
;
3787 ac
->gdbstub_supported_sstep_flags
= kvm_gdbstub_sstep_flags
;
3789 object_class_property_add(oc
, "kernel-irqchip", "on|off|split",
3790 NULL
, kvm_set_kernel_irqchip
,
3792 object_class_property_set_description(oc
, "kernel-irqchip",
3793 "Configure KVM in-kernel irqchip");
3795 object_class_property_add(oc
, "kvm-shadow-mem", "int",
3796 kvm_get_kvm_shadow_mem
, kvm_set_kvm_shadow_mem
,
3798 object_class_property_set_description(oc
, "kvm-shadow-mem",
3799 "KVM shadow MMU size");
3801 object_class_property_add(oc
, "dirty-ring-size", "uint32",
3802 kvm_get_dirty_ring_size
, kvm_set_dirty_ring_size
,
3804 object_class_property_set_description(oc
, "dirty-ring-size",
3805 "Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)");
3807 object_class_property_add_str(oc
, "device", kvm_get_device
, kvm_set_device
);
3808 object_class_property_set_description(oc
, "device",
3809 "Path to the device node to use (default: /dev/kvm)");
3811 kvm_arch_accel_class_init(oc
);
3814 static const TypeInfo kvm_accel_type
= {
3815 .name
= TYPE_KVM_ACCEL
,
3816 .parent
= TYPE_ACCEL
,
3817 .instance_init
= kvm_accel_instance_init
,
3818 .class_init
= kvm_accel_class_init
,
3819 .instance_size
= sizeof(KVMState
),
3822 static void kvm_type_init(void)
3824 type_register_static(&kvm_accel_type
);
3827 type_init(kvm_type_init
);
3829 typedef struct StatsArgs
{
3830 union StatsResultsType
{
3831 StatsResultList
**stats
;
3832 StatsSchemaList
**schema
;
3838 static StatsList
*add_kvmstat_entry(struct kvm_stats_desc
*pdesc
,
3839 uint64_t *stats_data
,
3840 StatsList
*stats_list
,
3845 uint64List
*val_list
= NULL
;
3847 /* Only add stats that we understand. */
3848 switch (pdesc
->flags
& KVM_STATS_TYPE_MASK
) {
3849 case KVM_STATS_TYPE_CUMULATIVE
:
3850 case KVM_STATS_TYPE_INSTANT
:
3851 case KVM_STATS_TYPE_PEAK
:
3852 case KVM_STATS_TYPE_LINEAR_HIST
:
3853 case KVM_STATS_TYPE_LOG_HIST
:
3859 switch (pdesc
->flags
& KVM_STATS_UNIT_MASK
) {
3860 case KVM_STATS_UNIT_NONE
:
3861 case KVM_STATS_UNIT_BYTES
:
3862 case KVM_STATS_UNIT_CYCLES
:
3863 case KVM_STATS_UNIT_SECONDS
:
3864 case KVM_STATS_UNIT_BOOLEAN
:
3870 switch (pdesc
->flags
& KVM_STATS_BASE_MASK
) {
3871 case KVM_STATS_BASE_POW10
:
3872 case KVM_STATS_BASE_POW2
:
3878 /* Alloc and populate data list */
3879 stats
= g_new0(Stats
, 1);
3880 stats
->name
= g_strdup(pdesc
->name
);
3881 stats
->value
= g_new0(StatsValue
, 1);;
3883 if ((pdesc
->flags
& KVM_STATS_UNIT_MASK
) == KVM_STATS_UNIT_BOOLEAN
) {
3884 stats
->value
->u
.boolean
= *stats_data
;
3885 stats
->value
->type
= QTYPE_QBOOL
;
3886 } else if (pdesc
->size
== 1) {
3887 stats
->value
->u
.scalar
= *stats_data
;
3888 stats
->value
->type
= QTYPE_QNUM
;
3891 for (i
= 0; i
< pdesc
->size
; i
++) {
3892 QAPI_LIST_PREPEND(val_list
, stats_data
[i
]);
3894 stats
->value
->u
.list
= val_list
;
3895 stats
->value
->type
= QTYPE_QLIST
;
3898 QAPI_LIST_PREPEND(stats_list
, stats
);
3902 static StatsSchemaValueList
*add_kvmschema_entry(struct kvm_stats_desc
*pdesc
,
3903 StatsSchemaValueList
*list
,
3906 StatsSchemaValueList
*schema_entry
= g_new0(StatsSchemaValueList
, 1);
3907 schema_entry
->value
= g_new0(StatsSchemaValue
, 1);
3909 switch (pdesc
->flags
& KVM_STATS_TYPE_MASK
) {
3910 case KVM_STATS_TYPE_CUMULATIVE
:
3911 schema_entry
->value
->type
= STATS_TYPE_CUMULATIVE
;
3913 case KVM_STATS_TYPE_INSTANT
:
3914 schema_entry
->value
->type
= STATS_TYPE_INSTANT
;
3916 case KVM_STATS_TYPE_PEAK
:
3917 schema_entry
->value
->type
= STATS_TYPE_PEAK
;
3919 case KVM_STATS_TYPE_LINEAR_HIST
:
3920 schema_entry
->value
->type
= STATS_TYPE_LINEAR_HISTOGRAM
;
3921 schema_entry
->value
->bucket_size
= pdesc
->bucket_size
;
3922 schema_entry
->value
->has_bucket_size
= true;
3924 case KVM_STATS_TYPE_LOG_HIST
:
3925 schema_entry
->value
->type
= STATS_TYPE_LOG2_HISTOGRAM
;
3931 switch (pdesc
->flags
& KVM_STATS_UNIT_MASK
) {
3932 case KVM_STATS_UNIT_NONE
:
3934 case KVM_STATS_UNIT_BOOLEAN
:
3935 schema_entry
->value
->has_unit
= true;
3936 schema_entry
->value
->unit
= STATS_UNIT_BOOLEAN
;
3938 case KVM_STATS_UNIT_BYTES
:
3939 schema_entry
->value
->has_unit
= true;
3940 schema_entry
->value
->unit
= STATS_UNIT_BYTES
;
3942 case KVM_STATS_UNIT_CYCLES
:
3943 schema_entry
->value
->has_unit
= true;
3944 schema_entry
->value
->unit
= STATS_UNIT_CYCLES
;
3946 case KVM_STATS_UNIT_SECONDS
:
3947 schema_entry
->value
->has_unit
= true;
3948 schema_entry
->value
->unit
= STATS_UNIT_SECONDS
;
3954 schema_entry
->value
->exponent
= pdesc
->exponent
;
3955 if (pdesc
->exponent
) {
3956 switch (pdesc
->flags
& KVM_STATS_BASE_MASK
) {
3957 case KVM_STATS_BASE_POW10
:
3958 schema_entry
->value
->has_base
= true;
3959 schema_entry
->value
->base
= 10;
3961 case KVM_STATS_BASE_POW2
:
3962 schema_entry
->value
->has_base
= true;
3963 schema_entry
->value
->base
= 2;
3970 schema_entry
->value
->name
= g_strdup(pdesc
->name
);
3971 schema_entry
->next
= list
;
3972 return schema_entry
;
3974 g_free(schema_entry
->value
);
3975 g_free(schema_entry
);
3979 /* Cached stats descriptors */
3980 typedef struct StatsDescriptors
{
3981 const char *ident
; /* cache key, currently the StatsTarget */
3982 struct kvm_stats_desc
*kvm_stats_desc
;
3983 struct kvm_stats_header kvm_stats_header
;
3984 QTAILQ_ENTRY(StatsDescriptors
) next
;
3987 static QTAILQ_HEAD(, StatsDescriptors
) stats_descriptors
=
3988 QTAILQ_HEAD_INITIALIZER(stats_descriptors
);
3991 * Return the descriptors for 'target', that either have already been read
3992 * or are retrieved from 'stats_fd'.
3994 static StatsDescriptors
*find_stats_descriptors(StatsTarget target
, int stats_fd
,
3997 StatsDescriptors
*descriptors
;
3999 struct kvm_stats_desc
*kvm_stats_desc
;
4000 struct kvm_stats_header
*kvm_stats_header
;
4004 ident
= StatsTarget_str(target
);
4005 QTAILQ_FOREACH(descriptors
, &stats_descriptors
, next
) {
4006 if (g_str_equal(descriptors
->ident
, ident
)) {
4011 descriptors
= g_new0(StatsDescriptors
, 1);
4013 /* Read stats header */
4014 kvm_stats_header
= &descriptors
->kvm_stats_header
;
4015 ret
= pread(stats_fd
, kvm_stats_header
, sizeof(*kvm_stats_header
), 0);
4016 if (ret
!= sizeof(*kvm_stats_header
)) {
4017 error_setg(errp
, "KVM stats: failed to read stats header: "
4018 "expected %zu actual %zu",
4019 sizeof(*kvm_stats_header
), ret
);
4020 g_free(descriptors
);
4023 size_desc
= sizeof(*kvm_stats_desc
) + kvm_stats_header
->name_size
;
4025 /* Read stats descriptors */
4026 kvm_stats_desc
= g_malloc0_n(kvm_stats_header
->num_desc
, size_desc
);
4027 ret
= pread(stats_fd
, kvm_stats_desc
,
4028 size_desc
* kvm_stats_header
->num_desc
,
4029 kvm_stats_header
->desc_offset
);
4031 if (ret
!= size_desc
* kvm_stats_header
->num_desc
) {
4032 error_setg(errp
, "KVM stats: failed to read stats descriptors: "
4033 "expected %zu actual %zu",
4034 size_desc
* kvm_stats_header
->num_desc
, ret
);
4035 g_free(descriptors
);
4036 g_free(kvm_stats_desc
);
4039 descriptors
->kvm_stats_desc
= kvm_stats_desc
;
4040 descriptors
->ident
= ident
;
4041 QTAILQ_INSERT_TAIL(&stats_descriptors
, descriptors
, next
);
4045 static void query_stats(StatsResultList
**result
, StatsTarget target
,
4046 strList
*names
, int stats_fd
, CPUState
*cpu
,
4049 struct kvm_stats_desc
*kvm_stats_desc
;
4050 struct kvm_stats_header
*kvm_stats_header
;
4051 StatsDescriptors
*descriptors
;
4052 g_autofree
uint64_t *stats_data
= NULL
;
4053 struct kvm_stats_desc
*pdesc
;
4054 StatsList
*stats_list
= NULL
;
4055 size_t size_desc
, size_data
= 0;
4059 descriptors
= find_stats_descriptors(target
, stats_fd
, errp
);
4064 kvm_stats_header
= &descriptors
->kvm_stats_header
;
4065 kvm_stats_desc
= descriptors
->kvm_stats_desc
;
4066 size_desc
= sizeof(*kvm_stats_desc
) + kvm_stats_header
->name_size
;
4068 /* Tally the total data size; read schema data */
4069 for (i
= 0; i
< kvm_stats_header
->num_desc
; ++i
) {
4070 pdesc
= (void *)kvm_stats_desc
+ i
* size_desc
;
4071 size_data
+= pdesc
->size
* sizeof(*stats_data
);
4074 stats_data
= g_malloc0(size_data
);
4075 ret
= pread(stats_fd
, stats_data
, size_data
, kvm_stats_header
->data_offset
);
4077 if (ret
!= size_data
) {
4078 error_setg(errp
, "KVM stats: failed to read data: "
4079 "expected %zu actual %zu", size_data
, ret
);
4083 for (i
= 0; i
< kvm_stats_header
->num_desc
; ++i
) {
4085 pdesc
= (void *)kvm_stats_desc
+ i
* size_desc
;
4087 /* Add entry to the list */
4088 stats
= (void *)stats_data
+ pdesc
->offset
;
4089 if (!apply_str_list_filter(pdesc
->name
, names
)) {
4092 stats_list
= add_kvmstat_entry(pdesc
, stats
, stats_list
, errp
);
4100 case STATS_TARGET_VM
:
4101 add_stats_entry(result
, STATS_PROVIDER_KVM
, NULL
, stats_list
);
4103 case STATS_TARGET_VCPU
:
4104 add_stats_entry(result
, STATS_PROVIDER_KVM
,
4105 cpu
->parent_obj
.canonical_path
,
4109 g_assert_not_reached();
4113 static void query_stats_schema(StatsSchemaList
**result
, StatsTarget target
,
4114 int stats_fd
, Error
**errp
)
4116 struct kvm_stats_desc
*kvm_stats_desc
;
4117 struct kvm_stats_header
*kvm_stats_header
;
4118 StatsDescriptors
*descriptors
;
4119 struct kvm_stats_desc
*pdesc
;
4120 StatsSchemaValueList
*stats_list
= NULL
;
4124 descriptors
= find_stats_descriptors(target
, stats_fd
, errp
);
4129 kvm_stats_header
= &descriptors
->kvm_stats_header
;
4130 kvm_stats_desc
= descriptors
->kvm_stats_desc
;
4131 size_desc
= sizeof(*kvm_stats_desc
) + kvm_stats_header
->name_size
;
4133 /* Tally the total data size; read schema data */
4134 for (i
= 0; i
< kvm_stats_header
->num_desc
; ++i
) {
4135 pdesc
= (void *)kvm_stats_desc
+ i
* size_desc
;
4136 stats_list
= add_kvmschema_entry(pdesc
, stats_list
, errp
);
4139 add_stats_schema(result
, STATS_PROVIDER_KVM
, target
, stats_list
);
4142 static void query_stats_vcpu(CPUState
*cpu
, StatsArgs
*kvm_stats_args
)
4144 int stats_fd
= cpu
->kvm_vcpu_stats_fd
;
4145 Error
*local_err
= NULL
;
4147 if (stats_fd
== -1) {
4148 error_setg_errno(&local_err
, errno
, "KVM stats: ioctl failed");
4149 error_propagate(kvm_stats_args
->errp
, local_err
);
4152 query_stats(kvm_stats_args
->result
.stats
, STATS_TARGET_VCPU
,
4153 kvm_stats_args
->names
, stats_fd
, cpu
,
4154 kvm_stats_args
->errp
);
4157 static void query_stats_schema_vcpu(CPUState
*cpu
, StatsArgs
*kvm_stats_args
)
4159 int stats_fd
= cpu
->kvm_vcpu_stats_fd
;
4160 Error
*local_err
= NULL
;
4162 if (stats_fd
== -1) {
4163 error_setg_errno(&local_err
, errno
, "KVM stats: ioctl failed");
4164 error_propagate(kvm_stats_args
->errp
, local_err
);
4167 query_stats_schema(kvm_stats_args
->result
.schema
, STATS_TARGET_VCPU
, stats_fd
,
4168 kvm_stats_args
->errp
);
4171 static void query_stats_cb(StatsResultList
**result
, StatsTarget target
,
4172 strList
*names
, strList
*targets
, Error
**errp
)
4174 KVMState
*s
= kvm_state
;
4179 case STATS_TARGET_VM
:
4181 stats_fd
= kvm_vm_ioctl(s
, KVM_GET_STATS_FD
, NULL
);
4182 if (stats_fd
== -1) {
4183 error_setg_errno(errp
, errno
, "KVM stats: ioctl failed");
4186 query_stats(result
, target
, names
, stats_fd
, NULL
, errp
);
4190 case STATS_TARGET_VCPU
:
4192 StatsArgs stats_args
;
4193 stats_args
.result
.stats
= result
;
4194 stats_args
.names
= names
;
4195 stats_args
.errp
= errp
;
4197 if (!apply_str_list_filter(cpu
->parent_obj
.canonical_path
, targets
)) {
4200 query_stats_vcpu(cpu
, &stats_args
);
4209 void query_stats_schemas_cb(StatsSchemaList
**result
, Error
**errp
)
4211 StatsArgs stats_args
;
4212 KVMState
*s
= kvm_state
;
4215 stats_fd
= kvm_vm_ioctl(s
, KVM_GET_STATS_FD
, NULL
);
4216 if (stats_fd
== -1) {
4217 error_setg_errno(errp
, errno
, "KVM stats: ioctl failed");
4220 query_stats_schema(result
, STATS_TARGET_VM
, stats_fd
, errp
);
4224 stats_args
.result
.schema
= result
;
4225 stats_args
.errp
= errp
;
4226 query_stats_schema_vcpu(first_cpu
, &stats_args
);
4230 void kvm_mark_guest_state_protected(void)
4232 kvm_state
->guest_state_protected
= true;
4235 int kvm_create_guest_memfd(uint64_t size
, uint64_t flags
, Error
**errp
)
4238 struct kvm_create_guest_memfd guest_memfd
= {
4243 if (!kvm_guest_memfd_supported
) {
4244 error_setg(errp
, "KVM does not support guest_memfd");
4248 fd
= kvm_vm_ioctl(kvm_state
, KVM_CREATE_GUEST_MEMFD
, &guest_memfd
);
4250 error_setg_errno(errp
, errno
, "Error creating KVM guest_memfd");