4 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include <sys/ioctl.h>
20 #include <linux/kvm.h>
22 #include "qemu/atomic.h"
23 #include "qemu/option.h"
24 #include "qemu/config-file.h"
25 #include "qemu/error-report.h"
26 #include "qapi/error.h"
27 #include "hw/pci/msi.h"
28 #include "hw/pci/msix.h"
29 #include "hw/s390x/adapter.h"
30 #include "exec/gdbstub.h"
31 #include "sysemu/kvm_int.h"
32 #include "sysemu/runstate.h"
33 #include "sysemu/cpus.h"
34 #include "sysemu/accel-blocker.h"
35 #include "qemu/bswap.h"
36 #include "exec/memory.h"
37 #include "exec/ram_addr.h"
38 #include "qemu/event_notifier.h"
39 #include "qemu/main-loop.h"
42 #include "qapi/visitor.h"
43 #include "qapi/qapi-types-common.h"
44 #include "qapi/qapi-visit-common.h"
45 #include "sysemu/reset.h"
46 #include "qemu/guest-random.h"
47 #include "sysemu/hw_accel.h"
49 #include "sysemu/dirtylimit.h"
50 #include "qemu/range.h"
52 #include "hw/boards.h"
53 #include "sysemu/stats.h"
55 /* This check must be after config-host.h is included */
57 #include <sys/eventfd.h>
60 /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
61 * need to use the real host PAGE_SIZE, as that's what KVM will use.
66 #define PAGE_SIZE qemu_real_host_page_size()
68 #ifndef KVM_GUESTDBG_BLOCKIRQ
69 #define KVM_GUESTDBG_BLOCKIRQ 0
72 struct KVMParkedVcpu
{
73 unsigned long vcpu_id
;
75 QLIST_ENTRY(KVMParkedVcpu
) node
;
79 bool kvm_kernel_irqchip
;
80 bool kvm_split_irqchip
;
81 bool kvm_async_interrupts_allowed
;
82 bool kvm_halt_in_kernel_allowed
;
83 bool kvm_resamplefds_allowed
;
84 bool kvm_msi_via_irqfd_allowed
;
85 bool kvm_gsi_routing_allowed
;
86 bool kvm_gsi_direct_mapping
;
88 bool kvm_readonly_mem_allowed
;
89 bool kvm_vm_attributes_allowed
;
90 bool kvm_msi_use_devid
;
91 static bool kvm_has_guest_debug
;
92 static int kvm_sstep_flags
;
93 static bool kvm_immediate_exit
;
94 static hwaddr kvm_max_slot_size
= ~0;
96 static const KVMCapabilityInfo kvm_required_capabilites
[] = {
97 KVM_CAP_INFO(USER_MEMORY
),
98 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS
),
99 KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS
),
100 KVM_CAP_INFO(INTERNAL_ERROR_DATA
),
101 KVM_CAP_INFO(IOEVENTFD
),
102 KVM_CAP_INFO(IOEVENTFD_ANY_LENGTH
),
106 static NotifierList kvm_irqchip_change_notifiers
=
107 NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers
);
109 struct KVMResampleFd
{
111 EventNotifier
*resample_event
;
112 QLIST_ENTRY(KVMResampleFd
) node
;
114 typedef struct KVMResampleFd KVMResampleFd
;
117 * Only used with split irqchip where we need to do the resample fd
118 * kick for the kernel from userspace.
120 static QLIST_HEAD(, KVMResampleFd
) kvm_resample_fd_list
=
121 QLIST_HEAD_INITIALIZER(kvm_resample_fd_list
);
123 static QemuMutex kml_slots_lock
;
125 #define kvm_slots_lock() qemu_mutex_lock(&kml_slots_lock)
126 #define kvm_slots_unlock() qemu_mutex_unlock(&kml_slots_lock)
128 static void kvm_slot_init_dirty_bitmap(KVMSlot
*mem
);
130 static inline void kvm_resample_fd_remove(int gsi
)
134 QLIST_FOREACH(rfd
, &kvm_resample_fd_list
, node
) {
135 if (rfd
->gsi
== gsi
) {
136 QLIST_REMOVE(rfd
, node
);
143 static inline void kvm_resample_fd_insert(int gsi
, EventNotifier
*event
)
145 KVMResampleFd
*rfd
= g_new0(KVMResampleFd
, 1);
148 rfd
->resample_event
= event
;
150 QLIST_INSERT_HEAD(&kvm_resample_fd_list
, rfd
, node
);
153 void kvm_resample_fd_notify(int gsi
)
157 QLIST_FOREACH(rfd
, &kvm_resample_fd_list
, node
) {
158 if (rfd
->gsi
== gsi
) {
159 event_notifier_set(rfd
->resample_event
);
160 trace_kvm_resample_fd_notify(gsi
);
166 unsigned int kvm_get_max_memslots(void)
168 KVMState
*s
= KVM_STATE(current_accel());
173 unsigned int kvm_get_free_memslots(void)
175 unsigned int used_slots
= 0;
176 KVMState
*s
= kvm_state
;
180 for (i
= 0; i
< s
->nr_as
; i
++) {
184 used_slots
= MAX(used_slots
, s
->as
[i
].ml
->nr_used_slots
);
188 return s
->nr_slots
- used_slots
;
191 /* Called with KVMMemoryListener.slots_lock held */
192 static KVMSlot
*kvm_get_free_slot(KVMMemoryListener
*kml
)
194 KVMState
*s
= kvm_state
;
197 for (i
= 0; i
< s
->nr_slots
; i
++) {
198 if (kml
->slots
[i
].memory_size
== 0) {
199 return &kml
->slots
[i
];
206 /* Called with KVMMemoryListener.slots_lock held */
207 static KVMSlot
*kvm_alloc_slot(KVMMemoryListener
*kml
)
209 KVMSlot
*slot
= kvm_get_free_slot(kml
);
215 fprintf(stderr
, "%s: no free slot available\n", __func__
);
219 static KVMSlot
*kvm_lookup_matching_slot(KVMMemoryListener
*kml
,
223 KVMState
*s
= kvm_state
;
226 for (i
= 0; i
< s
->nr_slots
; i
++) {
227 KVMSlot
*mem
= &kml
->slots
[i
];
229 if (start_addr
== mem
->start_addr
&& size
== mem
->memory_size
) {
238 * Calculate and align the start address and the size of the section.
239 * Return the size. If the size is 0, the aligned section is empty.
241 static hwaddr
kvm_align_section(MemoryRegionSection
*section
,
244 hwaddr size
= int128_get64(section
->size
);
245 hwaddr delta
, aligned
;
247 /* kvm works in page size chunks, but the function may be called
248 with sub-page size and unaligned start address. Pad the start
249 address to next and truncate size to previous page boundary. */
250 aligned
= ROUND_UP(section
->offset_within_address_space
,
251 qemu_real_host_page_size());
252 delta
= aligned
- section
->offset_within_address_space
;
258 return (size
- delta
) & qemu_real_host_page_mask();
261 int kvm_physical_memory_addr_from_host(KVMState
*s
, void *ram
,
264 KVMMemoryListener
*kml
= &s
->memory_listener
;
268 for (i
= 0; i
< s
->nr_slots
; i
++) {
269 KVMSlot
*mem
= &kml
->slots
[i
];
271 if (ram
>= mem
->ram
&& ram
< mem
->ram
+ mem
->memory_size
) {
272 *phys_addr
= mem
->start_addr
+ (ram
- mem
->ram
);
282 static int kvm_set_user_memory_region(KVMMemoryListener
*kml
, KVMSlot
*slot
, bool new)
284 KVMState
*s
= kvm_state
;
285 struct kvm_userspace_memory_region mem
;
288 mem
.slot
= slot
->slot
| (kml
->as_id
<< 16);
289 mem
.guest_phys_addr
= slot
->start_addr
;
290 mem
.userspace_addr
= (unsigned long)slot
->ram
;
291 mem
.flags
= slot
->flags
;
293 if (slot
->memory_size
&& !new && (mem
.flags
^ slot
->old_flags
) & KVM_MEM_READONLY
) {
294 /* Set the slot size to 0 before setting the slot to the desired
295 * value. This is needed based on KVM commit 75d61fbc. */
297 ret
= kvm_vm_ioctl(s
, KVM_SET_USER_MEMORY_REGION
, &mem
);
302 mem
.memory_size
= slot
->memory_size
;
303 ret
= kvm_vm_ioctl(s
, KVM_SET_USER_MEMORY_REGION
, &mem
);
304 slot
->old_flags
= mem
.flags
;
306 trace_kvm_set_user_memory(mem
.slot
, mem
.flags
, mem
.guest_phys_addr
,
307 mem
.memory_size
, mem
.userspace_addr
, ret
);
309 error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d,"
310 " start=0x%" PRIx64
", size=0x%" PRIx64
": %s",
311 __func__
, mem
.slot
, slot
->start_addr
,
312 (uint64_t)mem
.memory_size
, strerror(errno
));
317 static int do_kvm_destroy_vcpu(CPUState
*cpu
)
319 KVMState
*s
= kvm_state
;
321 struct KVMParkedVcpu
*vcpu
= NULL
;
324 trace_kvm_destroy_vcpu();
326 ret
= kvm_arch_destroy_vcpu(cpu
);
331 mmap_size
= kvm_ioctl(s
, KVM_GET_VCPU_MMAP_SIZE
, 0);
334 trace_kvm_failed_get_vcpu_mmap_size();
338 ret
= munmap(cpu
->kvm_run
, mmap_size
);
343 if (cpu
->kvm_dirty_gfns
) {
344 ret
= munmap(cpu
->kvm_dirty_gfns
, s
->kvm_dirty_ring_bytes
);
350 vcpu
= g_malloc0(sizeof(*vcpu
));
351 vcpu
->vcpu_id
= kvm_arch_vcpu_id(cpu
);
352 vcpu
->kvm_fd
= cpu
->kvm_fd
;
353 QLIST_INSERT_HEAD(&kvm_state
->kvm_parked_vcpus
, vcpu
, node
);
358 void kvm_destroy_vcpu(CPUState
*cpu
)
360 if (do_kvm_destroy_vcpu(cpu
) < 0) {
361 error_report("kvm_destroy_vcpu failed");
366 static int kvm_get_vcpu(KVMState
*s
, unsigned long vcpu_id
)
368 struct KVMParkedVcpu
*cpu
;
370 QLIST_FOREACH(cpu
, &s
->kvm_parked_vcpus
, node
) {
371 if (cpu
->vcpu_id
== vcpu_id
) {
374 QLIST_REMOVE(cpu
, node
);
375 kvm_fd
= cpu
->kvm_fd
;
381 return kvm_vm_ioctl(s
, KVM_CREATE_VCPU
, (void *)vcpu_id
);
384 int kvm_init_vcpu(CPUState
*cpu
, Error
**errp
)
386 KVMState
*s
= kvm_state
;
390 trace_kvm_init_vcpu(cpu
->cpu_index
, kvm_arch_vcpu_id(cpu
));
392 ret
= kvm_get_vcpu(s
, kvm_arch_vcpu_id(cpu
));
394 error_setg_errno(errp
, -ret
, "kvm_init_vcpu: kvm_get_vcpu failed (%lu)",
395 kvm_arch_vcpu_id(cpu
));
401 cpu
->vcpu_dirty
= true;
402 cpu
->dirty_pages
= 0;
403 cpu
->throttle_us_per_full
= 0;
405 mmap_size
= kvm_ioctl(s
, KVM_GET_VCPU_MMAP_SIZE
, 0);
408 error_setg_errno(errp
, -mmap_size
,
409 "kvm_init_vcpu: KVM_GET_VCPU_MMAP_SIZE failed");
413 cpu
->kvm_run
= mmap(NULL
, mmap_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
415 if (cpu
->kvm_run
== MAP_FAILED
) {
417 error_setg_errno(errp
, ret
,
418 "kvm_init_vcpu: mmap'ing vcpu state failed (%lu)",
419 kvm_arch_vcpu_id(cpu
));
423 if (s
->coalesced_mmio
&& !s
->coalesced_mmio_ring
) {
424 s
->coalesced_mmio_ring
=
425 (void *)cpu
->kvm_run
+ s
->coalesced_mmio
* PAGE_SIZE
;
428 if (s
->kvm_dirty_ring_size
) {
429 /* Use MAP_SHARED to share pages with the kernel */
430 cpu
->kvm_dirty_gfns
= mmap(NULL
, s
->kvm_dirty_ring_bytes
,
431 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
433 PAGE_SIZE
* KVM_DIRTY_LOG_PAGE_OFFSET
);
434 if (cpu
->kvm_dirty_gfns
== MAP_FAILED
) {
440 ret
= kvm_arch_init_vcpu(cpu
);
442 error_setg_errno(errp
, -ret
,
443 "kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)",
444 kvm_arch_vcpu_id(cpu
));
446 cpu
->kvm_vcpu_stats_fd
= kvm_vcpu_ioctl(cpu
, KVM_GET_STATS_FD
, NULL
);
453 * dirty pages logging control
456 static int kvm_mem_flags(MemoryRegion
*mr
)
458 bool readonly
= mr
->readonly
|| memory_region_is_romd(mr
);
461 if (memory_region_get_dirty_log_mask(mr
) != 0) {
462 flags
|= KVM_MEM_LOG_DIRTY_PAGES
;
464 if (readonly
&& kvm_readonly_mem_allowed
) {
465 flags
|= KVM_MEM_READONLY
;
470 /* Called with KVMMemoryListener.slots_lock held */
471 static int kvm_slot_update_flags(KVMMemoryListener
*kml
, KVMSlot
*mem
,
474 mem
->flags
= kvm_mem_flags(mr
);
476 /* If nothing changed effectively, no need to issue ioctl */
477 if (mem
->flags
== mem
->old_flags
) {
481 kvm_slot_init_dirty_bitmap(mem
);
482 return kvm_set_user_memory_region(kml
, mem
, false);
485 static int kvm_section_update_flags(KVMMemoryListener
*kml
,
486 MemoryRegionSection
*section
)
488 hwaddr start_addr
, size
, slot_size
;
492 size
= kvm_align_section(section
, &start_addr
);
499 while (size
&& !ret
) {
500 slot_size
= MIN(kvm_max_slot_size
, size
);
501 mem
= kvm_lookup_matching_slot(kml
, start_addr
, slot_size
);
503 /* We don't have a slot if we want to trap every access. */
507 ret
= kvm_slot_update_flags(kml
, mem
, section
->mr
);
508 start_addr
+= slot_size
;
517 static void kvm_log_start(MemoryListener
*listener
,
518 MemoryRegionSection
*section
,
521 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
528 r
= kvm_section_update_flags(kml
, section
);
534 static void kvm_log_stop(MemoryListener
*listener
,
535 MemoryRegionSection
*section
,
538 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
545 r
= kvm_section_update_flags(kml
, section
);
551 /* get kvm's dirty pages bitmap and update qemu's */
552 static void kvm_slot_sync_dirty_pages(KVMSlot
*slot
)
554 ram_addr_t start
= slot
->ram_start_offset
;
555 ram_addr_t pages
= slot
->memory_size
/ qemu_real_host_page_size();
557 cpu_physical_memory_set_dirty_lebitmap(slot
->dirty_bmap
, start
, pages
);
560 static void kvm_slot_reset_dirty_pages(KVMSlot
*slot
)
562 memset(slot
->dirty_bmap
, 0, slot
->dirty_bmap_size
);
565 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
567 /* Allocate the dirty bitmap for a slot */
568 static void kvm_slot_init_dirty_bitmap(KVMSlot
*mem
)
570 if (!(mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) || mem
->dirty_bmap
) {
575 * XXX bad kernel interface alert
576 * For dirty bitmap, kernel allocates array of size aligned to
577 * bits-per-long. But for case when the kernel is 64bits and
578 * the userspace is 32bits, userspace can't align to the same
579 * bits-per-long, since sizeof(long) is different between kernel
580 * and user space. This way, userspace will provide buffer which
581 * may be 4 bytes less than the kernel will use, resulting in
582 * userspace memory corruption (which is not detectable by valgrind
583 * too, in most cases).
584 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
585 * a hope that sizeof(long) won't become >8 any time soon.
587 * Note: the granule of kvm dirty log is qemu_real_host_page_size.
588 * And mem->memory_size is aligned to it (otherwise this mem can't
589 * be registered to KVM).
591 hwaddr bitmap_size
= ALIGN(mem
->memory_size
/ qemu_real_host_page_size(),
592 /*HOST_LONG_BITS*/ 64) / 8;
593 mem
->dirty_bmap
= g_malloc0(bitmap_size
);
594 mem
->dirty_bmap_size
= bitmap_size
;
598 * Sync dirty bitmap from kernel to KVMSlot.dirty_bmap, return true if
599 * succeeded, false otherwise
601 static bool kvm_slot_get_dirty_log(KVMState
*s
, KVMSlot
*slot
)
603 struct kvm_dirty_log d
= {};
606 d
.dirty_bitmap
= slot
->dirty_bmap
;
607 d
.slot
= slot
->slot
| (slot
->as_id
<< 16);
608 ret
= kvm_vm_ioctl(s
, KVM_GET_DIRTY_LOG
, &d
);
610 if (ret
== -ENOENT
) {
611 /* kernel does not have dirty bitmap in this slot */
615 error_report_once("%s: KVM_GET_DIRTY_LOG failed with %d",
621 /* Should be with all slots_lock held for the address spaces. */
622 static void kvm_dirty_ring_mark_page(KVMState
*s
, uint32_t as_id
,
623 uint32_t slot_id
, uint64_t offset
)
625 KVMMemoryListener
*kml
;
628 if (as_id
>= s
->nr_as
) {
632 kml
= s
->as
[as_id
].ml
;
633 mem
= &kml
->slots
[slot_id
];
635 if (!mem
->memory_size
|| offset
>=
636 (mem
->memory_size
/ qemu_real_host_page_size())) {
640 set_bit(offset
, mem
->dirty_bmap
);
643 static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn
*gfn
)
646 * Read the flags before the value. Pairs with barrier in
647 * KVM's kvm_dirty_ring_push() function.
649 return qatomic_load_acquire(&gfn
->flags
) == KVM_DIRTY_GFN_F_DIRTY
;
652 static void dirty_gfn_set_collected(struct kvm_dirty_gfn
*gfn
)
655 * Use a store-release so that the CPU that executes KVM_RESET_DIRTY_RINGS
656 * sees the full content of the ring:
659 * ------------------------------------------------------------------------------
661 * store-rel flags for gfn0
662 * load-acq flags for gfn0
663 * store-rel RESET for gfn0
665 * load-acq flags for gfn0
666 * check if flags have RESET
668 * The synchronization goes from CPU2 to CPU0 to CPU1.
670 qatomic_store_release(&gfn
->flags
, KVM_DIRTY_GFN_F_RESET
);
674 * Should be with all slots_lock held for the address spaces. It returns the
675 * dirty page we've collected on this dirty ring.
677 static uint32_t kvm_dirty_ring_reap_one(KVMState
*s
, CPUState
*cpu
)
679 struct kvm_dirty_gfn
*dirty_gfns
= cpu
->kvm_dirty_gfns
, *cur
;
680 uint32_t ring_size
= s
->kvm_dirty_ring_size
;
681 uint32_t count
= 0, fetch
= cpu
->kvm_fetch_index
;
684 * It's possible that we race with vcpu creation code where the vcpu is
685 * put onto the vcpus list but not yet initialized the dirty ring
686 * structures. If so, skip it.
692 assert(dirty_gfns
&& ring_size
);
693 trace_kvm_dirty_ring_reap_vcpu(cpu
->cpu_index
);
696 cur
= &dirty_gfns
[fetch
% ring_size
];
697 if (!dirty_gfn_is_dirtied(cur
)) {
700 kvm_dirty_ring_mark_page(s
, cur
->slot
>> 16, cur
->slot
& 0xffff,
702 dirty_gfn_set_collected(cur
);
703 trace_kvm_dirty_ring_page(cpu
->cpu_index
, fetch
, cur
->offset
);
707 cpu
->kvm_fetch_index
= fetch
;
708 cpu
->dirty_pages
+= count
;
713 /* Must be with slots_lock held */
714 static uint64_t kvm_dirty_ring_reap_locked(KVMState
*s
, CPUState
* cpu
)
723 total
= kvm_dirty_ring_reap_one(s
, cpu
);
726 total
+= kvm_dirty_ring_reap_one(s
, cpu
);
731 ret
= kvm_vm_ioctl(s
, KVM_RESET_DIRTY_RINGS
);
732 assert(ret
== total
);
735 stamp
= get_clock() - stamp
;
738 trace_kvm_dirty_ring_reap(total
, stamp
/ 1000);
745 * Currently for simplicity, we must hold BQL before calling this. We can
746 * consider to drop the BQL if we're clear with all the race conditions.
748 static uint64_t kvm_dirty_ring_reap(KVMState
*s
, CPUState
*cpu
)
753 * We need to lock all kvm slots for all address spaces here,
756 * (1) We need to mark dirty for dirty bitmaps in multiple slots
757 * and for tons of pages, so it's better to take the lock here
758 * once rather than once per page. And more importantly,
760 * (2) We must _NOT_ publish dirty bits to the other threads
761 * (e.g., the migration thread) via the kvm memory slot dirty
762 * bitmaps before correctly re-protect those dirtied pages.
763 * Otherwise we can have potential risk of data corruption if
764 * the page data is read in the other thread before we do
768 total
= kvm_dirty_ring_reap_locked(s
, cpu
);
774 static void do_kvm_cpu_synchronize_kick(CPUState
*cpu
, run_on_cpu_data arg
)
776 /* No need to do anything */
780 * Kick all vcpus out in a synchronized way. When returned, we
781 * guarantee that every vcpu has been kicked and at least returned to
784 static void kvm_cpu_synchronize_kick_all(void)
789 run_on_cpu(cpu
, do_kvm_cpu_synchronize_kick
, RUN_ON_CPU_NULL
);
794 * Flush all the existing dirty pages to the KVM slot buffers. When
795 * this call returns, we guarantee that all the touched dirty pages
796 * before calling this function have been put into the per-kvmslot
799 * This function must be called with BQL held.
801 static void kvm_dirty_ring_flush(void)
803 trace_kvm_dirty_ring_flush(0);
805 * The function needs to be serialized. Since this function
806 * should always be with BQL held, serialization is guaranteed.
807 * However, let's be sure of it.
809 assert(bql_locked());
811 * First make sure to flush the hardware buffers by kicking all
812 * vcpus out in a synchronous way.
814 kvm_cpu_synchronize_kick_all();
815 kvm_dirty_ring_reap(kvm_state
, NULL
);
816 trace_kvm_dirty_ring_flush(1);
820 * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
822 * This function will first try to fetch dirty bitmap from the kernel,
823 * and then updates qemu's dirty bitmap.
825 * NOTE: caller must be with kml->slots_lock held.
827 * @kml: the KVM memory listener object
828 * @section: the memory section to sync the dirty bitmap with
830 static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener
*kml
,
831 MemoryRegionSection
*section
)
833 KVMState
*s
= kvm_state
;
835 hwaddr start_addr
, size
;
838 size
= kvm_align_section(section
, &start_addr
);
840 slot_size
= MIN(kvm_max_slot_size
, size
);
841 mem
= kvm_lookup_matching_slot(kml
, start_addr
, slot_size
);
843 /* We don't have a slot if we want to trap every access. */
846 if (kvm_slot_get_dirty_log(s
, mem
)) {
847 kvm_slot_sync_dirty_pages(mem
);
849 start_addr
+= slot_size
;
854 /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
855 #define KVM_CLEAR_LOG_SHIFT 6
856 #define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size() << KVM_CLEAR_LOG_SHIFT)
857 #define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
859 static int kvm_log_clear_one_slot(KVMSlot
*mem
, int as_id
, uint64_t start
,
862 KVMState
*s
= kvm_state
;
863 uint64_t end
, bmap_start
, start_delta
, bmap_npages
;
864 struct kvm_clear_dirty_log d
;
865 unsigned long *bmap_clear
= NULL
, psize
= qemu_real_host_page_size();
869 * We need to extend either the start or the size or both to
870 * satisfy the KVM interface requirement. Firstly, do the start
871 * page alignment on 64 host pages
873 bmap_start
= start
& KVM_CLEAR_LOG_MASK
;
874 start_delta
= start
- bmap_start
;
878 * The kernel interface has restriction on the size too, that either:
880 * (1) the size is 64 host pages aligned (just like the start), or
881 * (2) the size fills up until the end of the KVM memslot.
883 bmap_npages
= DIV_ROUND_UP(size
+ start_delta
, KVM_CLEAR_LOG_ALIGN
)
884 << KVM_CLEAR_LOG_SHIFT
;
885 end
= mem
->memory_size
/ psize
;
886 if (bmap_npages
> end
- bmap_start
) {
887 bmap_npages
= end
- bmap_start
;
889 start_delta
/= psize
;
892 * Prepare the bitmap to clear dirty bits. Here we must guarantee
893 * that we won't clear any unknown dirty bits otherwise we might
894 * accidentally clear some set bits which are not yet synced from
895 * the kernel into QEMU's bitmap, then we'll lose track of the
896 * guest modifications upon those pages (which can directly lead
897 * to guest data loss or panic after migration).
899 * Layout of the KVMSlot.dirty_bmap:
901 * |<-------- bmap_npages -----------..>|
904 * |----------------|-------------|------------------|------------|
907 * start bmap_start (start) end
908 * of memslot of memslot
910 * [1] bmap_npages can be aligned to either 64 pages or the end of slot
913 assert(bmap_start
% BITS_PER_LONG
== 0);
914 /* We should never do log_clear before log_sync */
915 assert(mem
->dirty_bmap
);
916 if (start_delta
|| bmap_npages
- size
/ psize
) {
917 /* Slow path - we need to manipulate a temp bitmap */
918 bmap_clear
= bitmap_new(bmap_npages
);
919 bitmap_copy_with_src_offset(bmap_clear
, mem
->dirty_bmap
,
920 bmap_start
, start_delta
+ size
/ psize
);
922 * We need to fill the holes at start because that was not
923 * specified by the caller and we extended the bitmap only for
926 bitmap_clear(bmap_clear
, 0, start_delta
);
927 d
.dirty_bitmap
= bmap_clear
;
930 * Fast path - both start and size align well with BITS_PER_LONG
931 * (or the end of memory slot)
933 d
.dirty_bitmap
= mem
->dirty_bmap
+ BIT_WORD(bmap_start
);
936 d
.first_page
= bmap_start
;
937 /* It should never overflow. If it happens, say something */
938 assert(bmap_npages
<= UINT32_MAX
);
939 d
.num_pages
= bmap_npages
;
940 d
.slot
= mem
->slot
| (as_id
<< 16);
942 ret
= kvm_vm_ioctl(s
, KVM_CLEAR_DIRTY_LOG
, &d
);
943 if (ret
< 0 && ret
!= -ENOENT
) {
944 error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
945 "start=0x%"PRIx64
", size=0x%"PRIx32
", errno=%d",
946 __func__
, d
.slot
, (uint64_t)d
.first_page
,
947 (uint32_t)d
.num_pages
, ret
);
950 trace_kvm_clear_dirty_log(d
.slot
, d
.first_page
, d
.num_pages
);
954 * After we have updated the remote dirty bitmap, we update the
955 * cached bitmap as well for the memslot, then if another user
956 * clears the same region we know we shouldn't clear it again on
957 * the remote otherwise it's data loss as well.
959 bitmap_clear(mem
->dirty_bmap
, bmap_start
+ start_delta
,
961 /* This handles the NULL case well */
968 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
970 * NOTE: this will be a no-op if we haven't enabled manual dirty log
971 * protection in the host kernel because in that case this operation
972 * will be done within log_sync().
974 * @kml: the kvm memory listener
975 * @section: the memory range to clear dirty bitmap
977 static int kvm_physical_log_clear(KVMMemoryListener
*kml
,
978 MemoryRegionSection
*section
)
980 KVMState
*s
= kvm_state
;
981 uint64_t start
, size
, offset
, count
;
985 if (!s
->manual_dirty_log_protect
) {
986 /* No need to do explicit clear */
990 start
= section
->offset_within_address_space
;
991 size
= int128_get64(section
->size
);
994 /* Nothing more we can do... */
1000 for (i
= 0; i
< s
->nr_slots
; i
++) {
1001 mem
= &kml
->slots
[i
];
1002 /* Discard slots that are empty or do not overlap the section */
1003 if (!mem
->memory_size
||
1004 mem
->start_addr
> start
+ size
- 1 ||
1005 start
> mem
->start_addr
+ mem
->memory_size
- 1) {
1009 if (start
>= mem
->start_addr
) {
1010 /* The slot starts before section or is aligned to it. */
1011 offset
= start
- mem
->start_addr
;
1012 count
= MIN(mem
->memory_size
- offset
, size
);
1014 /* The slot starts after section. */
1016 count
= MIN(mem
->memory_size
, size
- (mem
->start_addr
- start
));
1018 ret
= kvm_log_clear_one_slot(mem
, kml
->as_id
, offset
, count
);
1029 static void kvm_coalesce_mmio_region(MemoryListener
*listener
,
1030 MemoryRegionSection
*secion
,
1031 hwaddr start
, hwaddr size
)
1033 KVMState
*s
= kvm_state
;
1035 if (s
->coalesced_mmio
) {
1036 struct kvm_coalesced_mmio_zone zone
;
1042 (void)kvm_vm_ioctl(s
, KVM_REGISTER_COALESCED_MMIO
, &zone
);
1046 static void kvm_uncoalesce_mmio_region(MemoryListener
*listener
,
1047 MemoryRegionSection
*secion
,
1048 hwaddr start
, hwaddr size
)
1050 KVMState
*s
= kvm_state
;
1052 if (s
->coalesced_mmio
) {
1053 struct kvm_coalesced_mmio_zone zone
;
1059 (void)kvm_vm_ioctl(s
, KVM_UNREGISTER_COALESCED_MMIO
, &zone
);
1063 static void kvm_coalesce_pio_add(MemoryListener
*listener
,
1064 MemoryRegionSection
*section
,
1065 hwaddr start
, hwaddr size
)
1067 KVMState
*s
= kvm_state
;
1069 if (s
->coalesced_pio
) {
1070 struct kvm_coalesced_mmio_zone zone
;
1076 (void)kvm_vm_ioctl(s
, KVM_REGISTER_COALESCED_MMIO
, &zone
);
1080 static void kvm_coalesce_pio_del(MemoryListener
*listener
,
1081 MemoryRegionSection
*section
,
1082 hwaddr start
, hwaddr size
)
1084 KVMState
*s
= kvm_state
;
1086 if (s
->coalesced_pio
) {
1087 struct kvm_coalesced_mmio_zone zone
;
1093 (void)kvm_vm_ioctl(s
, KVM_UNREGISTER_COALESCED_MMIO
, &zone
);
1097 int kvm_check_extension(KVMState
*s
, unsigned int extension
)
1101 ret
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, extension
);
1109 int kvm_vm_check_extension(KVMState
*s
, unsigned int extension
)
1113 ret
= kvm_vm_ioctl(s
, KVM_CHECK_EXTENSION
, extension
);
1115 /* VM wide version not implemented, use global one instead */
1116 ret
= kvm_check_extension(s
, extension
);
1123 * We track the poisoned pages to be able to:
1124 * - replace them on VM reset
1125 * - block a migration for a VM with a poisoned page
1127 typedef struct HWPoisonPage
{
1128 ram_addr_t ram_addr
;
1129 QLIST_ENTRY(HWPoisonPage
) list
;
1132 static QLIST_HEAD(, HWPoisonPage
) hwpoison_page_list
=
1133 QLIST_HEAD_INITIALIZER(hwpoison_page_list
);
1135 static void kvm_unpoison_all(void *param
)
1137 HWPoisonPage
*page
, *next_page
;
1139 QLIST_FOREACH_SAFE(page
, &hwpoison_page_list
, list
, next_page
) {
1140 QLIST_REMOVE(page
, list
);
1141 qemu_ram_remap(page
->ram_addr
, TARGET_PAGE_SIZE
);
1146 void kvm_hwpoison_page_add(ram_addr_t ram_addr
)
1150 QLIST_FOREACH(page
, &hwpoison_page_list
, list
) {
1151 if (page
->ram_addr
== ram_addr
) {
1155 page
= g_new(HWPoisonPage
, 1);
1156 page
->ram_addr
= ram_addr
;
1157 QLIST_INSERT_HEAD(&hwpoison_page_list
, page
, list
);
1160 bool kvm_hwpoisoned_mem(void)
1162 return !QLIST_EMPTY(&hwpoison_page_list
);
1165 static uint32_t adjust_ioeventfd_endianness(uint32_t val
, uint32_t size
)
1167 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
1168 /* The kernel expects ioeventfd values in HOST_BIG_ENDIAN
1169 * endianness, but the memory core hands them in target endianness.
1170 * For example, PPC is always treated as big-endian even if running
1171 * on KVM and on PPC64LE. Correct here.
1185 static int kvm_set_ioeventfd_mmio(int fd
, hwaddr addr
, uint32_t val
,
1186 bool assign
, uint32_t size
, bool datamatch
)
1189 struct kvm_ioeventfd iofd
= {
1190 .datamatch
= datamatch
? adjust_ioeventfd_endianness(val
, size
) : 0,
1197 trace_kvm_set_ioeventfd_mmio(fd
, (uint64_t)addr
, val
, assign
, size
,
1199 if (!kvm_enabled()) {
1204 iofd
.flags
|= KVM_IOEVENTFD_FLAG_DATAMATCH
;
1207 iofd
.flags
|= KVM_IOEVENTFD_FLAG_DEASSIGN
;
1210 ret
= kvm_vm_ioctl(kvm_state
, KVM_IOEVENTFD
, &iofd
);
1219 static int kvm_set_ioeventfd_pio(int fd
, uint16_t addr
, uint16_t val
,
1220 bool assign
, uint32_t size
, bool datamatch
)
1222 struct kvm_ioeventfd kick
= {
1223 .datamatch
= datamatch
? adjust_ioeventfd_endianness(val
, size
) : 0,
1225 .flags
= KVM_IOEVENTFD_FLAG_PIO
,
1230 trace_kvm_set_ioeventfd_pio(fd
, addr
, val
, assign
, size
, datamatch
);
1231 if (!kvm_enabled()) {
1235 kick
.flags
|= KVM_IOEVENTFD_FLAG_DATAMATCH
;
1238 kick
.flags
|= KVM_IOEVENTFD_FLAG_DEASSIGN
;
1240 r
= kvm_vm_ioctl(kvm_state
, KVM_IOEVENTFD
, &kick
);
1248 static const KVMCapabilityInfo
*
1249 kvm_check_extension_list(KVMState
*s
, const KVMCapabilityInfo
*list
)
1251 while (list
->name
) {
1252 if (!kvm_check_extension(s
, list
->value
)) {
1260 void kvm_set_max_memslot_size(hwaddr max_slot_size
)
1263 ROUND_UP(max_slot_size
, qemu_real_host_page_size()) == max_slot_size
1265 kvm_max_slot_size
= max_slot_size
;
1268 /* Called with KVMMemoryListener.slots_lock held */
1269 static void kvm_set_phys_mem(KVMMemoryListener
*kml
,
1270 MemoryRegionSection
*section
, bool add
)
1274 MemoryRegion
*mr
= section
->mr
;
1275 bool writable
= !mr
->readonly
&& !mr
->rom_device
;
1276 hwaddr start_addr
, size
, slot_size
, mr_offset
;
1277 ram_addr_t ram_start_offset
;
1280 if (!memory_region_is_ram(mr
)) {
1281 if (writable
|| !kvm_readonly_mem_allowed
) {
1283 } else if (!mr
->romd_mode
) {
1284 /* If the memory device is not in romd_mode, then we actually want
1285 * to remove the kvm memory slot so all accesses will trap. */
1290 size
= kvm_align_section(section
, &start_addr
);
1295 /* The offset of the kvmslot within the memory region */
1296 mr_offset
= section
->offset_within_region
+ start_addr
-
1297 section
->offset_within_address_space
;
1299 /* use aligned delta to align the ram address and offset */
1300 ram
= memory_region_get_ram_ptr(mr
) + mr_offset
;
1301 ram_start_offset
= memory_region_get_ram_addr(mr
) + mr_offset
;
1305 slot_size
= MIN(kvm_max_slot_size
, size
);
1306 mem
= kvm_lookup_matching_slot(kml
, start_addr
, slot_size
);
1310 if (mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) {
1312 * NOTE: We should be aware of the fact that here we're only
1313 * doing a best effort to sync dirty bits. No matter whether
1314 * we're using dirty log or dirty ring, we ignored two facts:
1316 * (1) dirty bits can reside in hardware buffers (PML)
1318 * (2) after we collected dirty bits here, pages can be dirtied
1319 * again before we do the final KVM_SET_USER_MEMORY_REGION to
1322 * Not easy. Let's cross the fingers until it's fixed.
1324 if (kvm_state
->kvm_dirty_ring_size
) {
1325 kvm_dirty_ring_reap_locked(kvm_state
, NULL
);
1326 if (kvm_state
->kvm_dirty_ring_with_bitmap
) {
1327 kvm_slot_sync_dirty_pages(mem
);
1328 kvm_slot_get_dirty_log(kvm_state
, mem
);
1331 kvm_slot_get_dirty_log(kvm_state
, mem
);
1333 kvm_slot_sync_dirty_pages(mem
);
1336 /* unregister the slot */
1337 g_free(mem
->dirty_bmap
);
1338 mem
->dirty_bmap
= NULL
;
1339 mem
->memory_size
= 0;
1341 err
= kvm_set_user_memory_region(kml
, mem
, false);
1343 fprintf(stderr
, "%s: error unregistering slot: %s\n",
1344 __func__
, strerror(-err
));
1347 start_addr
+= slot_size
;
1349 kml
->nr_used_slots
--;
1354 /* register the new slot */
1356 slot_size
= MIN(kvm_max_slot_size
, size
);
1357 mem
= kvm_alloc_slot(kml
);
1358 mem
->as_id
= kml
->as_id
;
1359 mem
->memory_size
= slot_size
;
1360 mem
->start_addr
= start_addr
;
1361 mem
->ram_start_offset
= ram_start_offset
;
1363 mem
->flags
= kvm_mem_flags(mr
);
1364 kvm_slot_init_dirty_bitmap(mem
);
1365 err
= kvm_set_user_memory_region(kml
, mem
, true);
1367 fprintf(stderr
, "%s: error registering slot: %s\n", __func__
,
1371 start_addr
+= slot_size
;
1372 ram_start_offset
+= slot_size
;
1375 kml
->nr_used_slots
++;
1379 static void *kvm_dirty_ring_reaper_thread(void *data
)
1382 struct KVMDirtyRingReaper
*r
= &s
->reaper
;
1384 rcu_register_thread();
1386 trace_kvm_dirty_ring_reaper("init");
1389 r
->reaper_state
= KVM_DIRTY_RING_REAPER_WAIT
;
1390 trace_kvm_dirty_ring_reaper("wait");
1392 * TODO: provide a smarter timeout rather than a constant?
1396 /* keep sleeping so that dirtylimit not be interfered by reaper */
1397 if (dirtylimit_in_service()) {
1401 trace_kvm_dirty_ring_reaper("wakeup");
1402 r
->reaper_state
= KVM_DIRTY_RING_REAPER_REAPING
;
1405 kvm_dirty_ring_reap(s
, NULL
);
1408 r
->reaper_iteration
++;
1411 trace_kvm_dirty_ring_reaper("exit");
1413 rcu_unregister_thread();
1418 static void kvm_dirty_ring_reaper_init(KVMState
*s
)
1420 struct KVMDirtyRingReaper
*r
= &s
->reaper
;
1422 qemu_thread_create(&r
->reaper_thr
, "kvm-reaper",
1423 kvm_dirty_ring_reaper_thread
,
1424 s
, QEMU_THREAD_JOINABLE
);
1427 static int kvm_dirty_ring_init(KVMState
*s
)
1429 uint32_t ring_size
= s
->kvm_dirty_ring_size
;
1430 uint64_t ring_bytes
= ring_size
* sizeof(struct kvm_dirty_gfn
);
1431 unsigned int capability
= KVM_CAP_DIRTY_LOG_RING
;
1434 s
->kvm_dirty_ring_size
= 0;
1435 s
->kvm_dirty_ring_bytes
= 0;
1437 /* Bail if the dirty ring size isn't specified */
1443 * Read the max supported pages. Fall back to dirty logging mode
1444 * if the dirty ring isn't supported.
1446 ret
= kvm_vm_check_extension(s
, capability
);
1448 capability
= KVM_CAP_DIRTY_LOG_RING_ACQ_REL
;
1449 ret
= kvm_vm_check_extension(s
, capability
);
1453 warn_report("KVM dirty ring not available, using bitmap method");
1457 if (ring_bytes
> ret
) {
1458 error_report("KVM dirty ring size %" PRIu32
" too big "
1459 "(maximum is %ld). Please use a smaller value.",
1460 ring_size
, (long)ret
/ sizeof(struct kvm_dirty_gfn
));
1464 ret
= kvm_vm_enable_cap(s
, capability
, 0, ring_bytes
);
1466 error_report("Enabling of KVM dirty ring failed: %s. "
1467 "Suggested minimum value is 1024.", strerror(-ret
));
1471 /* Enable the backup bitmap if it is supported */
1472 ret
= kvm_vm_check_extension(s
, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP
);
1474 ret
= kvm_vm_enable_cap(s
, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP
, 0);
1476 error_report("Enabling of KVM dirty ring's backup bitmap failed: "
1477 "%s. ", strerror(-ret
));
1481 s
->kvm_dirty_ring_with_bitmap
= true;
1484 s
->kvm_dirty_ring_size
= ring_size
;
1485 s
->kvm_dirty_ring_bytes
= ring_bytes
;
1490 static void kvm_region_add(MemoryListener
*listener
,
1491 MemoryRegionSection
*section
)
1493 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
1494 KVMMemoryUpdate
*update
;
1496 update
= g_new0(KVMMemoryUpdate
, 1);
1497 update
->section
= *section
;
1499 QSIMPLEQ_INSERT_TAIL(&kml
->transaction_add
, update
, next
);
1502 static void kvm_region_del(MemoryListener
*listener
,
1503 MemoryRegionSection
*section
)
1505 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
1506 KVMMemoryUpdate
*update
;
1508 update
= g_new0(KVMMemoryUpdate
, 1);
1509 update
->section
= *section
;
1511 QSIMPLEQ_INSERT_TAIL(&kml
->transaction_del
, update
, next
);
1514 static void kvm_region_commit(MemoryListener
*listener
)
1516 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
,
1518 KVMMemoryUpdate
*u1
, *u2
;
1519 bool need_inhibit
= false;
1521 if (QSIMPLEQ_EMPTY(&kml
->transaction_add
) &&
1522 QSIMPLEQ_EMPTY(&kml
->transaction_del
)) {
1527 * We have to be careful when regions to add overlap with ranges to remove.
1528 * We have to simulate atomic KVM memslot updates by making sure no ioctl()
1529 * is currently active.
1531 * The lists are order by addresses, so it's easy to find overlaps.
1533 u1
= QSIMPLEQ_FIRST(&kml
->transaction_del
);
1534 u2
= QSIMPLEQ_FIRST(&kml
->transaction_add
);
1538 range_init_nofail(&r1
, u1
->section
.offset_within_address_space
,
1539 int128_get64(u1
->section
.size
));
1540 range_init_nofail(&r2
, u2
->section
.offset_within_address_space
,
1541 int128_get64(u2
->section
.size
));
1543 if (range_overlaps_range(&r1
, &r2
)) {
1544 need_inhibit
= true;
1547 if (range_lob(&r1
) < range_lob(&r2
)) {
1548 u1
= QSIMPLEQ_NEXT(u1
, next
);
1550 u2
= QSIMPLEQ_NEXT(u2
, next
);
1556 accel_ioctl_inhibit_begin();
1559 /* Remove all memslots before adding the new ones. */
1560 while (!QSIMPLEQ_EMPTY(&kml
->transaction_del
)) {
1561 u1
= QSIMPLEQ_FIRST(&kml
->transaction_del
);
1562 QSIMPLEQ_REMOVE_HEAD(&kml
->transaction_del
, next
);
1564 kvm_set_phys_mem(kml
, &u1
->section
, false);
1565 memory_region_unref(u1
->section
.mr
);
1569 while (!QSIMPLEQ_EMPTY(&kml
->transaction_add
)) {
1570 u1
= QSIMPLEQ_FIRST(&kml
->transaction_add
);
1571 QSIMPLEQ_REMOVE_HEAD(&kml
->transaction_add
, next
);
1573 memory_region_ref(u1
->section
.mr
);
1574 kvm_set_phys_mem(kml
, &u1
->section
, true);
1580 accel_ioctl_inhibit_end();
1585 static void kvm_log_sync(MemoryListener
*listener
,
1586 MemoryRegionSection
*section
)
1588 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
1591 kvm_physical_sync_dirty_bitmap(kml
, section
);
1595 static void kvm_log_sync_global(MemoryListener
*l
, bool last_stage
)
1597 KVMMemoryListener
*kml
= container_of(l
, KVMMemoryListener
, listener
);
1598 KVMState
*s
= kvm_state
;
1602 /* Flush all kernel dirty addresses into KVMSlot dirty bitmap */
1603 kvm_dirty_ring_flush();
1606 * TODO: make this faster when nr_slots is big while there are
1607 * only a few used slots (small VMs).
1610 for (i
= 0; i
< s
->nr_slots
; i
++) {
1611 mem
= &kml
->slots
[i
];
1612 if (mem
->memory_size
&& mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) {
1613 kvm_slot_sync_dirty_pages(mem
);
1615 if (s
->kvm_dirty_ring_with_bitmap
&& last_stage
&&
1616 kvm_slot_get_dirty_log(s
, mem
)) {
1617 kvm_slot_sync_dirty_pages(mem
);
1621 * This is not needed by KVM_GET_DIRTY_LOG because the
1622 * ioctl will unconditionally overwrite the whole region.
1623 * However kvm dirty ring has no such side effect.
1625 kvm_slot_reset_dirty_pages(mem
);
1631 static void kvm_log_clear(MemoryListener
*listener
,
1632 MemoryRegionSection
*section
)
1634 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
1637 r
= kvm_physical_log_clear(kml
, section
);
1639 error_report_once("%s: kvm log clear failed: mr=%s "
1640 "offset=%"HWADDR_PRIx
" size=%"PRIx64
, __func__
,
1641 section
->mr
->name
, section
->offset_within_region
,
1642 int128_get64(section
->size
));
1647 static void kvm_mem_ioeventfd_add(MemoryListener
*listener
,
1648 MemoryRegionSection
*section
,
1649 bool match_data
, uint64_t data
,
1652 int fd
= event_notifier_get_fd(e
);
1655 r
= kvm_set_ioeventfd_mmio(fd
, section
->offset_within_address_space
,
1656 data
, true, int128_get64(section
->size
),
1659 fprintf(stderr
, "%s: error adding ioeventfd: %s (%d)\n",
1660 __func__
, strerror(-r
), -r
);
1665 static void kvm_mem_ioeventfd_del(MemoryListener
*listener
,
1666 MemoryRegionSection
*section
,
1667 bool match_data
, uint64_t data
,
1670 int fd
= event_notifier_get_fd(e
);
1673 r
= kvm_set_ioeventfd_mmio(fd
, section
->offset_within_address_space
,
1674 data
, false, int128_get64(section
->size
),
1677 fprintf(stderr
, "%s: error deleting ioeventfd: %s (%d)\n",
1678 __func__
, strerror(-r
), -r
);
1683 static void kvm_io_ioeventfd_add(MemoryListener
*listener
,
1684 MemoryRegionSection
*section
,
1685 bool match_data
, uint64_t data
,
1688 int fd
= event_notifier_get_fd(e
);
1691 r
= kvm_set_ioeventfd_pio(fd
, section
->offset_within_address_space
,
1692 data
, true, int128_get64(section
->size
),
1695 fprintf(stderr
, "%s: error adding ioeventfd: %s (%d)\n",
1696 __func__
, strerror(-r
), -r
);
1701 static void kvm_io_ioeventfd_del(MemoryListener
*listener
,
1702 MemoryRegionSection
*section
,
1703 bool match_data
, uint64_t data
,
1707 int fd
= event_notifier_get_fd(e
);
1710 r
= kvm_set_ioeventfd_pio(fd
, section
->offset_within_address_space
,
1711 data
, false, int128_get64(section
->size
),
1714 fprintf(stderr
, "%s: error deleting ioeventfd: %s (%d)\n",
1715 __func__
, strerror(-r
), -r
);
1720 void kvm_memory_listener_register(KVMState
*s
, KVMMemoryListener
*kml
,
1721 AddressSpace
*as
, int as_id
, const char *name
)
1725 kml
->slots
= g_new0(KVMSlot
, s
->nr_slots
);
1728 for (i
= 0; i
< s
->nr_slots
; i
++) {
1729 kml
->slots
[i
].slot
= i
;
1732 QSIMPLEQ_INIT(&kml
->transaction_add
);
1733 QSIMPLEQ_INIT(&kml
->transaction_del
);
1735 kml
->listener
.region_add
= kvm_region_add
;
1736 kml
->listener
.region_del
= kvm_region_del
;
1737 kml
->listener
.commit
= kvm_region_commit
;
1738 kml
->listener
.log_start
= kvm_log_start
;
1739 kml
->listener
.log_stop
= kvm_log_stop
;
1740 kml
->listener
.priority
= MEMORY_LISTENER_PRIORITY_ACCEL
;
1741 kml
->listener
.name
= name
;
1743 if (s
->kvm_dirty_ring_size
) {
1744 kml
->listener
.log_sync_global
= kvm_log_sync_global
;
1746 kml
->listener
.log_sync
= kvm_log_sync
;
1747 kml
->listener
.log_clear
= kvm_log_clear
;
1750 memory_listener_register(&kml
->listener
, as
);
1752 for (i
= 0; i
< s
->nr_as
; ++i
) {
1761 static MemoryListener kvm_io_listener
= {
1763 .coalesced_io_add
= kvm_coalesce_pio_add
,
1764 .coalesced_io_del
= kvm_coalesce_pio_del
,
1765 .eventfd_add
= kvm_io_ioeventfd_add
,
1766 .eventfd_del
= kvm_io_ioeventfd_del
,
1767 .priority
= MEMORY_LISTENER_PRIORITY_DEV_BACKEND
,
1770 int kvm_set_irq(KVMState
*s
, int irq
, int level
)
1772 struct kvm_irq_level event
;
1775 assert(kvm_async_interrupts_enabled());
1777 event
.level
= level
;
1779 ret
= kvm_vm_ioctl(s
, s
->irq_set_ioctl
, &event
);
1781 perror("kvm_set_irq");
1785 return (s
->irq_set_ioctl
== KVM_IRQ_LINE
) ? 1 : event
.status
;
1788 #ifdef KVM_CAP_IRQ_ROUTING
1789 typedef struct KVMMSIRoute
{
1790 struct kvm_irq_routing_entry kroute
;
1791 QTAILQ_ENTRY(KVMMSIRoute
) entry
;
1794 static void set_gsi(KVMState
*s
, unsigned int gsi
)
1796 set_bit(gsi
, s
->used_gsi_bitmap
);
1799 static void clear_gsi(KVMState
*s
, unsigned int gsi
)
1801 clear_bit(gsi
, s
->used_gsi_bitmap
);
1804 void kvm_init_irq_routing(KVMState
*s
)
1808 gsi_count
= kvm_check_extension(s
, KVM_CAP_IRQ_ROUTING
) - 1;
1809 if (gsi_count
> 0) {
1810 /* Round up so we can search ints using ffs */
1811 s
->used_gsi_bitmap
= bitmap_new(gsi_count
);
1812 s
->gsi_count
= gsi_count
;
1815 s
->irq_routes
= g_malloc0(sizeof(*s
->irq_routes
));
1816 s
->nr_allocated_irq_routes
= 0;
1818 kvm_arch_init_irq_routing(s
);
1821 void kvm_irqchip_commit_routes(KVMState
*s
)
1825 if (kvm_gsi_direct_mapping()) {
1829 if (!kvm_gsi_routing_enabled()) {
1833 s
->irq_routes
->flags
= 0;
1834 trace_kvm_irqchip_commit_routes();
1835 ret
= kvm_vm_ioctl(s
, KVM_SET_GSI_ROUTING
, s
->irq_routes
);
1839 static void kvm_add_routing_entry(KVMState
*s
,
1840 struct kvm_irq_routing_entry
*entry
)
1842 struct kvm_irq_routing_entry
*new;
1845 if (s
->irq_routes
->nr
== s
->nr_allocated_irq_routes
) {
1846 n
= s
->nr_allocated_irq_routes
* 2;
1850 size
= sizeof(struct kvm_irq_routing
);
1851 size
+= n
* sizeof(*new);
1852 s
->irq_routes
= g_realloc(s
->irq_routes
, size
);
1853 s
->nr_allocated_irq_routes
= n
;
1855 n
= s
->irq_routes
->nr
++;
1856 new = &s
->irq_routes
->entries
[n
];
1860 set_gsi(s
, entry
->gsi
);
1863 static int kvm_update_routing_entry(KVMState
*s
,
1864 struct kvm_irq_routing_entry
*new_entry
)
1866 struct kvm_irq_routing_entry
*entry
;
1869 for (n
= 0; n
< s
->irq_routes
->nr
; n
++) {
1870 entry
= &s
->irq_routes
->entries
[n
];
1871 if (entry
->gsi
!= new_entry
->gsi
) {
1875 if(!memcmp(entry
, new_entry
, sizeof *entry
)) {
1879 *entry
= *new_entry
;
1887 void kvm_irqchip_add_irq_route(KVMState
*s
, int irq
, int irqchip
, int pin
)
1889 struct kvm_irq_routing_entry e
= {};
1891 assert(pin
< s
->gsi_count
);
1894 e
.type
= KVM_IRQ_ROUTING_IRQCHIP
;
1896 e
.u
.irqchip
.irqchip
= irqchip
;
1897 e
.u
.irqchip
.pin
= pin
;
1898 kvm_add_routing_entry(s
, &e
);
1901 void kvm_irqchip_release_virq(KVMState
*s
, int virq
)
1903 struct kvm_irq_routing_entry
*e
;
1906 if (kvm_gsi_direct_mapping()) {
1910 for (i
= 0; i
< s
->irq_routes
->nr
; i
++) {
1911 e
= &s
->irq_routes
->entries
[i
];
1912 if (e
->gsi
== virq
) {
1913 s
->irq_routes
->nr
--;
1914 *e
= s
->irq_routes
->entries
[s
->irq_routes
->nr
];
1918 kvm_arch_release_virq_post(virq
);
1919 trace_kvm_irqchip_release_virq(virq
);
1922 void kvm_irqchip_add_change_notifier(Notifier
*n
)
1924 notifier_list_add(&kvm_irqchip_change_notifiers
, n
);
1927 void kvm_irqchip_remove_change_notifier(Notifier
*n
)
1932 void kvm_irqchip_change_notify(void)
1934 notifier_list_notify(&kvm_irqchip_change_notifiers
, NULL
);
1937 static int kvm_irqchip_get_virq(KVMState
*s
)
1941 /* Return the lowest unused GSI in the bitmap */
1942 next_virq
= find_first_zero_bit(s
->used_gsi_bitmap
, s
->gsi_count
);
1943 if (next_virq
>= s
->gsi_count
) {
1950 int kvm_irqchip_send_msi(KVMState
*s
, MSIMessage msg
)
1954 msi
.address_lo
= (uint32_t)msg
.address
;
1955 msi
.address_hi
= msg
.address
>> 32;
1956 msi
.data
= le32_to_cpu(msg
.data
);
1958 memset(msi
.pad
, 0, sizeof(msi
.pad
));
1960 return kvm_vm_ioctl(s
, KVM_SIGNAL_MSI
, &msi
);
1963 int kvm_irqchip_add_msi_route(KVMRouteChange
*c
, int vector
, PCIDevice
*dev
)
1965 struct kvm_irq_routing_entry kroute
= {};
1968 MSIMessage msg
= {0, 0};
1970 if (pci_available
&& dev
) {
1971 msg
= pci_get_msi_message(dev
, vector
);
1974 if (kvm_gsi_direct_mapping()) {
1975 return kvm_arch_msi_data_to_gsi(msg
.data
);
1978 if (!kvm_gsi_routing_enabled()) {
1982 virq
= kvm_irqchip_get_virq(s
);
1988 kroute
.type
= KVM_IRQ_ROUTING_MSI
;
1990 kroute
.u
.msi
.address_lo
= (uint32_t)msg
.address
;
1991 kroute
.u
.msi
.address_hi
= msg
.address
>> 32;
1992 kroute
.u
.msi
.data
= le32_to_cpu(msg
.data
);
1993 if (pci_available
&& kvm_msi_devid_required()) {
1994 kroute
.flags
= KVM_MSI_VALID_DEVID
;
1995 kroute
.u
.msi
.devid
= pci_requester_id(dev
);
1997 if (kvm_arch_fixup_msi_route(&kroute
, msg
.address
, msg
.data
, dev
)) {
1998 kvm_irqchip_release_virq(s
, virq
);
2002 trace_kvm_irqchip_add_msi_route(dev
? dev
->name
: (char *)"N/A",
2005 kvm_add_routing_entry(s
, &kroute
);
2006 kvm_arch_add_msi_route_post(&kroute
, vector
, dev
);
2012 int kvm_irqchip_update_msi_route(KVMState
*s
, int virq
, MSIMessage msg
,
2015 struct kvm_irq_routing_entry kroute
= {};
2017 if (kvm_gsi_direct_mapping()) {
2021 if (!kvm_irqchip_in_kernel()) {
2026 kroute
.type
= KVM_IRQ_ROUTING_MSI
;
2028 kroute
.u
.msi
.address_lo
= (uint32_t)msg
.address
;
2029 kroute
.u
.msi
.address_hi
= msg
.address
>> 32;
2030 kroute
.u
.msi
.data
= le32_to_cpu(msg
.data
);
2031 if (pci_available
&& kvm_msi_devid_required()) {
2032 kroute
.flags
= KVM_MSI_VALID_DEVID
;
2033 kroute
.u
.msi
.devid
= pci_requester_id(dev
);
2035 if (kvm_arch_fixup_msi_route(&kroute
, msg
.address
, msg
.data
, dev
)) {
2039 trace_kvm_irqchip_update_msi_route(virq
);
2041 return kvm_update_routing_entry(s
, &kroute
);
2044 static int kvm_irqchip_assign_irqfd(KVMState
*s
, EventNotifier
*event
,
2045 EventNotifier
*resample
, int virq
,
2048 int fd
= event_notifier_get_fd(event
);
2049 int rfd
= resample
? event_notifier_get_fd(resample
) : -1;
2051 struct kvm_irqfd irqfd
= {
2054 .flags
= assign
? 0 : KVM_IRQFD_FLAG_DEASSIGN
,
2059 if (kvm_irqchip_is_split()) {
2061 * When the slow irqchip (e.g. IOAPIC) is in the
2062 * userspace, KVM kernel resamplefd will not work because
2063 * the EOI of the interrupt will be delivered to userspace
2064 * instead, so the KVM kernel resamplefd kick will be
2065 * skipped. The userspace here mimics what the kernel
2066 * provides with resamplefd, remember the resamplefd and
2067 * kick it when we receive EOI of this IRQ.
2069 * This is hackery because IOAPIC is mostly bypassed
2070 * (except EOI broadcasts) when irqfd is used. However
2071 * this can bring much performance back for split irqchip
2072 * with INTx IRQs (for VFIO, this gives 93% perf of the
2073 * full fast path, which is 46% perf boost comparing to
2074 * the INTx slow path).
2076 kvm_resample_fd_insert(virq
, resample
);
2078 irqfd
.flags
|= KVM_IRQFD_FLAG_RESAMPLE
;
2079 irqfd
.resamplefd
= rfd
;
2081 } else if (!assign
) {
2082 if (kvm_irqchip_is_split()) {
2083 kvm_resample_fd_remove(virq
);
2087 return kvm_vm_ioctl(s
, KVM_IRQFD
, &irqfd
);
2090 int kvm_irqchip_add_adapter_route(KVMState
*s
, AdapterInfo
*adapter
)
2092 struct kvm_irq_routing_entry kroute
= {};
2095 if (!kvm_gsi_routing_enabled()) {
2099 virq
= kvm_irqchip_get_virq(s
);
2105 kroute
.type
= KVM_IRQ_ROUTING_S390_ADAPTER
;
2107 kroute
.u
.adapter
.summary_addr
= adapter
->summary_addr
;
2108 kroute
.u
.adapter
.ind_addr
= adapter
->ind_addr
;
2109 kroute
.u
.adapter
.summary_offset
= adapter
->summary_offset
;
2110 kroute
.u
.adapter
.ind_offset
= adapter
->ind_offset
;
2111 kroute
.u
.adapter
.adapter_id
= adapter
->adapter_id
;
2113 kvm_add_routing_entry(s
, &kroute
);
2118 int kvm_irqchip_add_hv_sint_route(KVMState
*s
, uint32_t vcpu
, uint32_t sint
)
2120 struct kvm_irq_routing_entry kroute
= {};
2123 if (!kvm_gsi_routing_enabled()) {
2126 if (!kvm_check_extension(s
, KVM_CAP_HYPERV_SYNIC
)) {
2129 virq
= kvm_irqchip_get_virq(s
);
2135 kroute
.type
= KVM_IRQ_ROUTING_HV_SINT
;
2137 kroute
.u
.hv_sint
.vcpu
= vcpu
;
2138 kroute
.u
.hv_sint
.sint
= sint
;
2140 kvm_add_routing_entry(s
, &kroute
);
2141 kvm_irqchip_commit_routes(s
);
2146 #else /* !KVM_CAP_IRQ_ROUTING */
2148 void kvm_init_irq_routing(KVMState
*s
)
2152 void kvm_irqchip_release_virq(KVMState
*s
, int virq
)
2156 int kvm_irqchip_send_msi(KVMState
*s
, MSIMessage msg
)
2161 int kvm_irqchip_add_msi_route(KVMRouteChange
*c
, int vector
, PCIDevice
*dev
)
2166 int kvm_irqchip_add_adapter_route(KVMState
*s
, AdapterInfo
*adapter
)
2171 int kvm_irqchip_add_hv_sint_route(KVMState
*s
, uint32_t vcpu
, uint32_t sint
)
2176 static int kvm_irqchip_assign_irqfd(KVMState
*s
, EventNotifier
*event
,
2177 EventNotifier
*resample
, int virq
,
2183 int kvm_irqchip_update_msi_route(KVMState
*s
, int virq
, MSIMessage msg
)
2187 #endif /* !KVM_CAP_IRQ_ROUTING */
2189 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState
*s
, EventNotifier
*n
,
2190 EventNotifier
*rn
, int virq
)
2192 return kvm_irqchip_assign_irqfd(s
, n
, rn
, virq
, true);
2195 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState
*s
, EventNotifier
*n
,
2198 return kvm_irqchip_assign_irqfd(s
, n
, NULL
, virq
, false);
2201 int kvm_irqchip_add_irqfd_notifier(KVMState
*s
, EventNotifier
*n
,
2202 EventNotifier
*rn
, qemu_irq irq
)
2205 gboolean found
= g_hash_table_lookup_extended(s
->gsimap
, irq
, &key
, &gsi
);
2210 return kvm_irqchip_add_irqfd_notifier_gsi(s
, n
, rn
, GPOINTER_TO_INT(gsi
));
2213 int kvm_irqchip_remove_irqfd_notifier(KVMState
*s
, EventNotifier
*n
,
2217 gboolean found
= g_hash_table_lookup_extended(s
->gsimap
, irq
, &key
, &gsi
);
2222 return kvm_irqchip_remove_irqfd_notifier_gsi(s
, n
, GPOINTER_TO_INT(gsi
));
2225 void kvm_irqchip_set_qemuirq_gsi(KVMState
*s
, qemu_irq irq
, int gsi
)
2227 g_hash_table_insert(s
->gsimap
, irq
, GINT_TO_POINTER(gsi
));
2230 static void kvm_irqchip_create(KVMState
*s
)
2234 assert(s
->kernel_irqchip_split
!= ON_OFF_AUTO_AUTO
);
2235 if (kvm_check_extension(s
, KVM_CAP_IRQCHIP
)) {
2237 } else if (kvm_check_extension(s
, KVM_CAP_S390_IRQCHIP
)) {
2238 ret
= kvm_vm_enable_cap(s
, KVM_CAP_S390_IRQCHIP
, 0);
2240 fprintf(stderr
, "Enable kernel irqchip failed: %s\n", strerror(-ret
));
2247 if (kvm_check_extension(s
, KVM_CAP_IRQFD
) <= 0) {
2248 fprintf(stderr
, "kvm: irqfd not implemented\n");
2252 /* First probe and see if there's a arch-specific hook to create the
2253 * in-kernel irqchip for us */
2254 ret
= kvm_arch_irqchip_create(s
);
2256 if (s
->kernel_irqchip_split
== ON_OFF_AUTO_ON
) {
2257 error_report("Split IRQ chip mode not supported.");
2260 ret
= kvm_vm_ioctl(s
, KVM_CREATE_IRQCHIP
);
2264 fprintf(stderr
, "Create kernel irqchip failed: %s\n", strerror(-ret
));
2268 kvm_kernel_irqchip
= true;
2269 /* If we have an in-kernel IRQ chip then we must have asynchronous
2270 * interrupt delivery (though the reverse is not necessarily true)
2272 kvm_async_interrupts_allowed
= true;
2273 kvm_halt_in_kernel_allowed
= true;
2275 kvm_init_irq_routing(s
);
2277 s
->gsimap
= g_hash_table_new(g_direct_hash
, g_direct_equal
);
2280 /* Find number of supported CPUs using the recommended
2281 * procedure from the kernel API documentation to cope with
2282 * older kernels that may be missing capabilities.
2284 static int kvm_recommended_vcpus(KVMState
*s
)
2286 int ret
= kvm_vm_check_extension(s
, KVM_CAP_NR_VCPUS
);
2287 return (ret
) ? ret
: 4;
2290 static int kvm_max_vcpus(KVMState
*s
)
2292 int ret
= kvm_check_extension(s
, KVM_CAP_MAX_VCPUS
);
2293 return (ret
) ? ret
: kvm_recommended_vcpus(s
);
2296 static int kvm_max_vcpu_id(KVMState
*s
)
2298 int ret
= kvm_check_extension(s
, KVM_CAP_MAX_VCPU_ID
);
2299 return (ret
) ? ret
: kvm_max_vcpus(s
);
2302 bool kvm_vcpu_id_is_valid(int vcpu_id
)
2304 KVMState
*s
= KVM_STATE(current_accel());
2305 return vcpu_id
>= 0 && vcpu_id
< kvm_max_vcpu_id(s
);
2308 bool kvm_dirty_ring_enabled(void)
2310 return kvm_state
->kvm_dirty_ring_size
? true : false;
2313 static void query_stats_cb(StatsResultList
**result
, StatsTarget target
,
2314 strList
*names
, strList
*targets
, Error
**errp
);
2315 static void query_stats_schemas_cb(StatsSchemaList
**result
, Error
**errp
);
2317 uint32_t kvm_dirty_ring_size(void)
2319 return kvm_state
->kvm_dirty_ring_size
;
2322 static int kvm_init(MachineState
*ms
)
2324 MachineClass
*mc
= MACHINE_GET_CLASS(ms
);
2325 static const char upgrade_note
[] =
2326 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
2327 "(see http://sourceforge.net/projects/kvm).\n";
2332 { "SMP", ms
->smp
.cpus
},
2333 { "hotpluggable", ms
->smp
.max_cpus
},
2334 { /* end of list */ }
2336 int soft_vcpus_limit
, hard_vcpus_limit
;
2338 const KVMCapabilityInfo
*missing_cap
;
2341 uint64_t dirty_log_manual_caps
;
2343 qemu_mutex_init(&kml_slots_lock
);
2345 s
= KVM_STATE(ms
->accelerator
);
2348 * On systems where the kernel can support different base page
2349 * sizes, host page size may be different from TARGET_PAGE_SIZE,
2350 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
2351 * page size for the system though.
2353 assert(TARGET_PAGE_SIZE
<= qemu_real_host_page_size());
2356 accel_blocker_init();
2358 #ifdef KVM_CAP_SET_GUEST_DEBUG
2359 QTAILQ_INIT(&s
->kvm_sw_breakpoints
);
2361 QLIST_INIT(&s
->kvm_parked_vcpus
);
2362 s
->fd
= qemu_open_old(s
->device
?: "/dev/kvm", O_RDWR
);
2364 fprintf(stderr
, "Could not access KVM kernel module: %m\n");
2369 ret
= kvm_ioctl(s
, KVM_GET_API_VERSION
, 0);
2370 if (ret
< KVM_API_VERSION
) {
2374 fprintf(stderr
, "kvm version too old\n");
2378 if (ret
> KVM_API_VERSION
) {
2380 fprintf(stderr
, "kvm version not supported\n");
2384 kvm_immediate_exit
= kvm_check_extension(s
, KVM_CAP_IMMEDIATE_EXIT
);
2385 s
->nr_slots
= kvm_check_extension(s
, KVM_CAP_NR_MEMSLOTS
);
2387 /* If unspecified, use the default value */
2392 s
->nr_as
= kvm_check_extension(s
, KVM_CAP_MULTI_ADDRESS_SPACE
);
2393 if (s
->nr_as
<= 1) {
2396 s
->as
= g_new0(struct KVMAs
, s
->nr_as
);
2398 if (object_property_find(OBJECT(current_machine
), "kvm-type")) {
2399 g_autofree
char *kvm_type
= object_property_get_str(OBJECT(current_machine
),
2402 type
= mc
->kvm_type(ms
, kvm_type
);
2403 } else if (mc
->kvm_type
) {
2404 type
= mc
->kvm_type(ms
, NULL
);
2406 type
= kvm_arch_get_default_type(ms
);
2415 ret
= kvm_ioctl(s
, KVM_CREATE_VM
, type
);
2416 } while (ret
== -EINTR
);
2419 fprintf(stderr
, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret
,
2423 if (ret
== -EINVAL
) {
2425 "Host kernel setup problem detected. Please verify:\n");
2426 fprintf(stderr
, "- for kernels supporting the switch_amode or"
2427 " user_mode parameters, whether\n");
2429 " user space is running in primary address space\n");
2431 "- for kernels supporting the vm.allocate_pgste sysctl, "
2432 "whether it is enabled\n");
2434 #elif defined(TARGET_PPC)
2435 if (ret
== -EINVAL
) {
2437 "PPC KVM module is not loaded. Try modprobe kvm_%s.\n",
2438 (type
== 2) ? "pr" : "hv");
2446 /* check the vcpu limits */
2447 soft_vcpus_limit
= kvm_recommended_vcpus(s
);
2448 hard_vcpus_limit
= kvm_max_vcpus(s
);
2451 if (nc
->num
> soft_vcpus_limit
) {
2452 warn_report("Number of %s cpus requested (%d) exceeds "
2453 "the recommended cpus supported by KVM (%d)",
2454 nc
->name
, nc
->num
, soft_vcpus_limit
);
2456 if (nc
->num
> hard_vcpus_limit
) {
2457 fprintf(stderr
, "Number of %s cpus requested (%d) exceeds "
2458 "the maximum cpus supported by KVM (%d)\n",
2459 nc
->name
, nc
->num
, hard_vcpus_limit
);
2466 missing_cap
= kvm_check_extension_list(s
, kvm_required_capabilites
);
2469 kvm_check_extension_list(s
, kvm_arch_required_capabilities
);
2473 fprintf(stderr
, "kvm does not support %s\n%s",
2474 missing_cap
->name
, upgrade_note
);
2478 s
->coalesced_mmio
= kvm_check_extension(s
, KVM_CAP_COALESCED_MMIO
);
2479 s
->coalesced_pio
= s
->coalesced_mmio
&&
2480 kvm_check_extension(s
, KVM_CAP_COALESCED_PIO
);
2483 * Enable KVM dirty ring if supported, otherwise fall back to
2484 * dirty logging mode
2486 ret
= kvm_dirty_ring_init(s
);
2492 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is
2493 * enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no
2494 * page is wr-protected initially, which is against how kvm dirty ring is
2495 * usage - kvm dirty ring requires all pages are wr-protected at the very
2496 * beginning. Enabling this feature for dirty ring causes data corruption.
2498 * TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log,
2499 * we may expect a higher stall time when starting the migration. In the
2500 * future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too:
2501 * instead of clearing dirty bit, it can be a way to explicitly wr-protect
2504 if (!s
->kvm_dirty_ring_size
) {
2505 dirty_log_manual_caps
=
2506 kvm_check_extension(s
, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
);
2507 dirty_log_manual_caps
&= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE
|
2508 KVM_DIRTY_LOG_INITIALLY_SET
);
2509 s
->manual_dirty_log_protect
= dirty_log_manual_caps
;
2510 if (dirty_log_manual_caps
) {
2511 ret
= kvm_vm_enable_cap(s
, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
, 0,
2512 dirty_log_manual_caps
);
2514 warn_report("Trying to enable capability %"PRIu64
" of "
2515 "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. "
2516 "Falling back to the legacy mode. ",
2517 dirty_log_manual_caps
);
2518 s
->manual_dirty_log_protect
= 0;
2523 #ifdef KVM_CAP_VCPU_EVENTS
2524 s
->vcpu_events
= kvm_check_extension(s
, KVM_CAP_VCPU_EVENTS
);
2526 s
->max_nested_state_len
= kvm_check_extension(s
, KVM_CAP_NESTED_STATE
);
2528 s
->irq_set_ioctl
= KVM_IRQ_LINE
;
2529 if (kvm_check_extension(s
, KVM_CAP_IRQ_INJECT_STATUS
)) {
2530 s
->irq_set_ioctl
= KVM_IRQ_LINE_STATUS
;
2533 kvm_readonly_mem_allowed
=
2534 (kvm_check_extension(s
, KVM_CAP_READONLY_MEM
) > 0);
2536 kvm_resamplefds_allowed
=
2537 (kvm_check_extension(s
, KVM_CAP_IRQFD_RESAMPLE
) > 0);
2539 kvm_vm_attributes_allowed
=
2540 (kvm_check_extension(s
, KVM_CAP_VM_ATTRIBUTES
) > 0);
2542 #ifdef KVM_CAP_SET_GUEST_DEBUG
2543 kvm_has_guest_debug
=
2544 (kvm_check_extension(s
, KVM_CAP_SET_GUEST_DEBUG
) > 0);
2547 kvm_sstep_flags
= 0;
2548 if (kvm_has_guest_debug
) {
2549 kvm_sstep_flags
= SSTEP_ENABLE
;
2551 #if defined KVM_CAP_SET_GUEST_DEBUG2
2552 int guest_debug_flags
=
2553 kvm_check_extension(s
, KVM_CAP_SET_GUEST_DEBUG2
);
2555 if (guest_debug_flags
& KVM_GUESTDBG_BLOCKIRQ
) {
2556 kvm_sstep_flags
|= SSTEP_NOIRQ
;
2563 ret
= kvm_arch_init(ms
, s
);
2568 if (s
->kernel_irqchip_split
== ON_OFF_AUTO_AUTO
) {
2569 s
->kernel_irqchip_split
= mc
->default_kernel_irqchip_split
? ON_OFF_AUTO_ON
: ON_OFF_AUTO_OFF
;
2572 qemu_register_reset(kvm_unpoison_all
, NULL
);
2574 if (s
->kernel_irqchip_allowed
) {
2575 kvm_irqchip_create(s
);
2578 s
->memory_listener
.listener
.eventfd_add
= kvm_mem_ioeventfd_add
;
2579 s
->memory_listener
.listener
.eventfd_del
= kvm_mem_ioeventfd_del
;
2580 s
->memory_listener
.listener
.coalesced_io_add
= kvm_coalesce_mmio_region
;
2581 s
->memory_listener
.listener
.coalesced_io_del
= kvm_uncoalesce_mmio_region
;
2583 kvm_memory_listener_register(s
, &s
->memory_listener
,
2584 &address_space_memory
, 0, "kvm-memory");
2585 memory_listener_register(&kvm_io_listener
,
2588 s
->sync_mmu
= !!kvm_vm_check_extension(kvm_state
, KVM_CAP_SYNC_MMU
);
2590 ret
= ram_block_discard_disable(true);
2594 if (s
->kvm_dirty_ring_size
) {
2595 kvm_dirty_ring_reaper_init(s
);
2598 if (kvm_check_extension(kvm_state
, KVM_CAP_BINARY_STATS_FD
)) {
2599 add_stats_callbacks(STATS_PROVIDER_KVM
, query_stats_cb
,
2600 query_stats_schemas_cb
);
2614 g_free(s
->memory_listener
.slots
);
2619 void kvm_set_sigmask_len(KVMState
*s
, unsigned int sigmask_len
)
2621 s
->sigmask_len
= sigmask_len
;
2624 static void kvm_handle_io(uint16_t port
, MemTxAttrs attrs
, void *data
, int direction
,
2625 int size
, uint32_t count
)
2628 uint8_t *ptr
= data
;
2630 for (i
= 0; i
< count
; i
++) {
2631 address_space_rw(&address_space_io
, port
, attrs
,
2633 direction
== KVM_EXIT_IO_OUT
);
2638 static int kvm_handle_internal_error(CPUState
*cpu
, struct kvm_run
*run
)
2642 fprintf(stderr
, "KVM internal error. Suberror: %d\n",
2643 run
->internal
.suberror
);
2645 for (i
= 0; i
< run
->internal
.ndata
; ++i
) {
2646 fprintf(stderr
, "extra data[%d]: 0x%016"PRIx64
"\n",
2647 i
, (uint64_t)run
->internal
.data
[i
]);
2649 if (run
->internal
.suberror
== KVM_INTERNAL_ERROR_EMULATION
) {
2650 fprintf(stderr
, "emulation failure\n");
2651 if (!kvm_arch_stop_on_emulation_error(cpu
)) {
2652 cpu_dump_state(cpu
, stderr
, CPU_DUMP_CODE
);
2653 return EXCP_INTERRUPT
;
2656 /* FIXME: Should trigger a qmp message to let management know
2657 * something went wrong.
2662 void kvm_flush_coalesced_mmio_buffer(void)
2664 KVMState
*s
= kvm_state
;
2666 if (!s
|| s
->coalesced_flush_in_progress
) {
2670 s
->coalesced_flush_in_progress
= true;
2672 if (s
->coalesced_mmio_ring
) {
2673 struct kvm_coalesced_mmio_ring
*ring
= s
->coalesced_mmio_ring
;
2674 while (ring
->first
!= ring
->last
) {
2675 struct kvm_coalesced_mmio
*ent
;
2677 ent
= &ring
->coalesced_mmio
[ring
->first
];
2679 if (ent
->pio
== 1) {
2680 address_space_write(&address_space_io
, ent
->phys_addr
,
2681 MEMTXATTRS_UNSPECIFIED
, ent
->data
,
2684 cpu_physical_memory_write(ent
->phys_addr
, ent
->data
, ent
->len
);
2687 ring
->first
= (ring
->first
+ 1) % KVM_COALESCED_MMIO_MAX
;
2691 s
->coalesced_flush_in_progress
= false;
2694 bool kvm_cpu_check_are_resettable(void)
2696 return kvm_arch_cpu_check_are_resettable();
2699 static void do_kvm_cpu_synchronize_state(CPUState
*cpu
, run_on_cpu_data arg
)
2701 if (!cpu
->vcpu_dirty
) {
2702 int ret
= kvm_arch_get_registers(cpu
);
2704 error_report("Failed to get registers: %s", strerror(-ret
));
2705 cpu_dump_state(cpu
, stderr
, CPU_DUMP_CODE
);
2706 vm_stop(RUN_STATE_INTERNAL_ERROR
);
2709 cpu
->vcpu_dirty
= true;
2713 void kvm_cpu_synchronize_state(CPUState
*cpu
)
2715 if (!cpu
->vcpu_dirty
) {
2716 run_on_cpu(cpu
, do_kvm_cpu_synchronize_state
, RUN_ON_CPU_NULL
);
2720 static void do_kvm_cpu_synchronize_post_reset(CPUState
*cpu
, run_on_cpu_data arg
)
2722 int ret
= kvm_arch_put_registers(cpu
, KVM_PUT_RESET_STATE
);
2724 error_report("Failed to put registers after reset: %s", strerror(-ret
));
2725 cpu_dump_state(cpu
, stderr
, CPU_DUMP_CODE
);
2726 vm_stop(RUN_STATE_INTERNAL_ERROR
);
2729 cpu
->vcpu_dirty
= false;
2732 void kvm_cpu_synchronize_post_reset(CPUState
*cpu
)
2734 run_on_cpu(cpu
, do_kvm_cpu_synchronize_post_reset
, RUN_ON_CPU_NULL
);
2737 static void do_kvm_cpu_synchronize_post_init(CPUState
*cpu
, run_on_cpu_data arg
)
2739 int ret
= kvm_arch_put_registers(cpu
, KVM_PUT_FULL_STATE
);
2741 error_report("Failed to put registers after init: %s", strerror(-ret
));
2745 cpu
->vcpu_dirty
= false;
2748 void kvm_cpu_synchronize_post_init(CPUState
*cpu
)
2750 run_on_cpu(cpu
, do_kvm_cpu_synchronize_post_init
, RUN_ON_CPU_NULL
);
2753 static void do_kvm_cpu_synchronize_pre_loadvm(CPUState
*cpu
, run_on_cpu_data arg
)
2755 cpu
->vcpu_dirty
= true;
2758 void kvm_cpu_synchronize_pre_loadvm(CPUState
*cpu
)
2760 run_on_cpu(cpu
, do_kvm_cpu_synchronize_pre_loadvm
, RUN_ON_CPU_NULL
);
2763 #ifdef KVM_HAVE_MCE_INJECTION
2764 static __thread
void *pending_sigbus_addr
;
2765 static __thread
int pending_sigbus_code
;
2766 static __thread
bool have_sigbus_pending
;
2769 static void kvm_cpu_kick(CPUState
*cpu
)
2771 qatomic_set(&cpu
->kvm_run
->immediate_exit
, 1);
2774 static void kvm_cpu_kick_self(void)
2776 if (kvm_immediate_exit
) {
2777 kvm_cpu_kick(current_cpu
);
2779 qemu_cpu_kick_self();
2783 static void kvm_eat_signals(CPUState
*cpu
)
2785 struct timespec ts
= { 0, 0 };
2791 if (kvm_immediate_exit
) {
2792 qatomic_set(&cpu
->kvm_run
->immediate_exit
, 0);
2793 /* Write kvm_run->immediate_exit before the cpu->exit_request
2794 * write in kvm_cpu_exec.
2800 sigemptyset(&waitset
);
2801 sigaddset(&waitset
, SIG_IPI
);
2804 r
= sigtimedwait(&waitset
, &siginfo
, &ts
);
2805 if (r
== -1 && !(errno
== EAGAIN
|| errno
== EINTR
)) {
2806 perror("sigtimedwait");
2810 r
= sigpending(&chkset
);
2812 perror("sigpending");
2815 } while (sigismember(&chkset
, SIG_IPI
));
2818 int kvm_cpu_exec(CPUState
*cpu
)
2820 struct kvm_run
*run
= cpu
->kvm_run
;
2823 trace_kvm_cpu_exec();
2825 if (kvm_arch_process_async_events(cpu
)) {
2826 qatomic_set(&cpu
->exit_request
, 0);
2831 cpu_exec_start(cpu
);
2836 if (cpu
->vcpu_dirty
) {
2837 ret
= kvm_arch_put_registers(cpu
, KVM_PUT_RUNTIME_STATE
);
2839 error_report("Failed to put registers after init: %s",
2845 cpu
->vcpu_dirty
= false;
2848 kvm_arch_pre_run(cpu
, run
);
2849 if (qatomic_read(&cpu
->exit_request
)) {
2850 trace_kvm_interrupt_exit_request();
2852 * KVM requires us to reenter the kernel after IO exits to complete
2853 * instruction emulation. This self-signal will ensure that we
2856 kvm_cpu_kick_self();
2859 /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
2860 * Matching barrier in kvm_eat_signals.
2864 run_ret
= kvm_vcpu_ioctl(cpu
, KVM_RUN
, 0);
2866 attrs
= kvm_arch_post_run(cpu
, run
);
2868 #ifdef KVM_HAVE_MCE_INJECTION
2869 if (unlikely(have_sigbus_pending
)) {
2871 kvm_arch_on_sigbus_vcpu(cpu
, pending_sigbus_code
,
2872 pending_sigbus_addr
);
2873 have_sigbus_pending
= false;
2879 if (run_ret
== -EINTR
|| run_ret
== -EAGAIN
) {
2880 trace_kvm_io_window_exit();
2881 kvm_eat_signals(cpu
);
2882 ret
= EXCP_INTERRUPT
;
2885 fprintf(stderr
, "error: kvm run failed %s\n",
2886 strerror(-run_ret
));
2888 if (run_ret
== -EBUSY
) {
2890 "This is probably because your SMT is enabled.\n"
2891 "VCPU can only run on primary threads with all "
2892 "secondary threads offline.\n");
2899 trace_kvm_run_exit(cpu
->cpu_index
, run
->exit_reason
);
2900 switch (run
->exit_reason
) {
2902 /* Called outside BQL */
2903 kvm_handle_io(run
->io
.port
, attrs
,
2904 (uint8_t *)run
+ run
->io
.data_offset
,
2911 /* Called outside BQL */
2912 address_space_rw(&address_space_memory
,
2913 run
->mmio
.phys_addr
, attrs
,
2916 run
->mmio
.is_write
);
2919 case KVM_EXIT_IRQ_WINDOW_OPEN
:
2920 ret
= EXCP_INTERRUPT
;
2922 case KVM_EXIT_SHUTDOWN
:
2923 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
2924 ret
= EXCP_INTERRUPT
;
2926 case KVM_EXIT_UNKNOWN
:
2927 fprintf(stderr
, "KVM: unknown exit, hardware reason %" PRIx64
"\n",
2928 (uint64_t)run
->hw
.hardware_exit_reason
);
2931 case KVM_EXIT_INTERNAL_ERROR
:
2932 ret
= kvm_handle_internal_error(cpu
, run
);
2934 case KVM_EXIT_DIRTY_RING_FULL
:
2936 * We shouldn't continue if the dirty ring of this vcpu is
2937 * still full. Got kicked by KVM_RESET_DIRTY_RINGS.
2939 trace_kvm_dirty_ring_full(cpu
->cpu_index
);
2942 * We throttle vCPU by making it sleep once it exit from kernel
2943 * due to dirty ring full. In the dirtylimit scenario, reaping
2944 * all vCPUs after a single vCPU dirty ring get full result in
2945 * the miss of sleep, so just reap the ring-fulled vCPU.
2947 if (dirtylimit_in_service()) {
2948 kvm_dirty_ring_reap(kvm_state
, cpu
);
2950 kvm_dirty_ring_reap(kvm_state
, NULL
);
2953 dirtylimit_vcpu_execute(cpu
);
2956 case KVM_EXIT_SYSTEM_EVENT
:
2957 trace_kvm_run_exit_system_event(cpu
->cpu_index
, run
->system_event
.type
);
2958 switch (run
->system_event
.type
) {
2959 case KVM_SYSTEM_EVENT_SHUTDOWN
:
2960 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN
);
2961 ret
= EXCP_INTERRUPT
;
2963 case KVM_SYSTEM_EVENT_RESET
:
2964 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
2965 ret
= EXCP_INTERRUPT
;
2967 case KVM_SYSTEM_EVENT_CRASH
:
2968 kvm_cpu_synchronize_state(cpu
);
2970 qemu_system_guest_panicked(cpu_get_crash_info(cpu
));
2975 ret
= kvm_arch_handle_exit(cpu
, run
);
2980 ret
= kvm_arch_handle_exit(cpu
, run
);
2989 cpu_dump_state(cpu
, stderr
, CPU_DUMP_CODE
);
2990 vm_stop(RUN_STATE_INTERNAL_ERROR
);
2993 qatomic_set(&cpu
->exit_request
, 0);
2997 int kvm_ioctl(KVMState
*s
, int type
, ...)
3004 arg
= va_arg(ap
, void *);
3007 trace_kvm_ioctl(type
, arg
);
3008 ret
= ioctl(s
->fd
, type
, arg
);
3015 int kvm_vm_ioctl(KVMState
*s
, int type
, ...)
3022 arg
= va_arg(ap
, void *);
3025 trace_kvm_vm_ioctl(type
, arg
);
3026 accel_ioctl_begin();
3027 ret
= ioctl(s
->vmfd
, type
, arg
);
3035 int kvm_vcpu_ioctl(CPUState
*cpu
, int type
, ...)
3042 arg
= va_arg(ap
, void *);
3045 trace_kvm_vcpu_ioctl(cpu
->cpu_index
, type
, arg
);
3046 accel_cpu_ioctl_begin(cpu
);
3047 ret
= ioctl(cpu
->kvm_fd
, type
, arg
);
3048 accel_cpu_ioctl_end(cpu
);
3055 int kvm_device_ioctl(int fd
, int type
, ...)
3062 arg
= va_arg(ap
, void *);
3065 trace_kvm_device_ioctl(fd
, type
, arg
);
3066 accel_ioctl_begin();
3067 ret
= ioctl(fd
, type
, arg
);
3075 int kvm_vm_check_attr(KVMState
*s
, uint32_t group
, uint64_t attr
)
3078 struct kvm_device_attr attribute
= {
3083 if (!kvm_vm_attributes_allowed
) {
3087 ret
= kvm_vm_ioctl(s
, KVM_HAS_DEVICE_ATTR
, &attribute
);
3088 /* kvm returns 0 on success for HAS_DEVICE_ATTR */
3092 int kvm_device_check_attr(int dev_fd
, uint32_t group
, uint64_t attr
)
3094 struct kvm_device_attr attribute
= {
3100 return kvm_device_ioctl(dev_fd
, KVM_HAS_DEVICE_ATTR
, &attribute
) ? 0 : 1;
3103 int kvm_device_access(int fd
, int group
, uint64_t attr
,
3104 void *val
, bool write
, Error
**errp
)
3106 struct kvm_device_attr kvmattr
;
3110 kvmattr
.group
= group
;
3111 kvmattr
.attr
= attr
;
3112 kvmattr
.addr
= (uintptr_t)val
;
3114 err
= kvm_device_ioctl(fd
,
3115 write
? KVM_SET_DEVICE_ATTR
: KVM_GET_DEVICE_ATTR
,
3118 error_setg_errno(errp
, -err
,
3119 "KVM_%s_DEVICE_ATTR failed: Group %d "
3120 "attr 0x%016" PRIx64
,
3121 write
? "SET" : "GET", group
, attr
);
3126 bool kvm_has_sync_mmu(void)
3128 return kvm_state
->sync_mmu
;
3131 int kvm_has_vcpu_events(void)
3133 return kvm_state
->vcpu_events
;
3136 int kvm_max_nested_state_length(void)
3138 return kvm_state
->max_nested_state_len
;
3141 int kvm_has_gsi_routing(void)
3143 #ifdef KVM_CAP_IRQ_ROUTING
3144 return kvm_check_extension(kvm_state
, KVM_CAP_IRQ_ROUTING
);
3150 bool kvm_arm_supports_user_irq(void)
3152 return kvm_check_extension(kvm_state
, KVM_CAP_ARM_USER_IRQ
);
3155 #ifdef KVM_CAP_SET_GUEST_DEBUG
3156 struct kvm_sw_breakpoint
*kvm_find_sw_breakpoint(CPUState
*cpu
, vaddr pc
)
3158 struct kvm_sw_breakpoint
*bp
;
3160 QTAILQ_FOREACH(bp
, &cpu
->kvm_state
->kvm_sw_breakpoints
, entry
) {
3168 int kvm_sw_breakpoints_active(CPUState
*cpu
)
3170 return !QTAILQ_EMPTY(&cpu
->kvm_state
->kvm_sw_breakpoints
);
3173 struct kvm_set_guest_debug_data
{
3174 struct kvm_guest_debug dbg
;
3178 static void kvm_invoke_set_guest_debug(CPUState
*cpu
, run_on_cpu_data data
)
3180 struct kvm_set_guest_debug_data
*dbg_data
=
3181 (struct kvm_set_guest_debug_data
*) data
.host_ptr
;
3183 dbg_data
->err
= kvm_vcpu_ioctl(cpu
, KVM_SET_GUEST_DEBUG
,
3187 int kvm_update_guest_debug(CPUState
*cpu
, unsigned long reinject_trap
)
3189 struct kvm_set_guest_debug_data data
;
3191 data
.dbg
.control
= reinject_trap
;
3193 if (cpu
->singlestep_enabled
) {
3194 data
.dbg
.control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_SINGLESTEP
;
3196 if (cpu
->singlestep_enabled
& SSTEP_NOIRQ
) {
3197 data
.dbg
.control
|= KVM_GUESTDBG_BLOCKIRQ
;
3200 kvm_arch_update_guest_debug(cpu
, &data
.dbg
);
3202 run_on_cpu(cpu
, kvm_invoke_set_guest_debug
,
3203 RUN_ON_CPU_HOST_PTR(&data
));
3207 bool kvm_supports_guest_debug(void)
3209 /* probed during kvm_init() */
3210 return kvm_has_guest_debug
;
3213 int kvm_insert_breakpoint(CPUState
*cpu
, int type
, vaddr addr
, vaddr len
)
3215 struct kvm_sw_breakpoint
*bp
;
3218 if (type
== GDB_BREAKPOINT_SW
) {
3219 bp
= kvm_find_sw_breakpoint(cpu
, addr
);
3225 bp
= g_new(struct kvm_sw_breakpoint
, 1);
3228 err
= kvm_arch_insert_sw_breakpoint(cpu
, bp
);
3234 QTAILQ_INSERT_HEAD(&cpu
->kvm_state
->kvm_sw_breakpoints
, bp
, entry
);
3236 err
= kvm_arch_insert_hw_breakpoint(addr
, len
, type
);
3243 err
= kvm_update_guest_debug(cpu
, 0);
3251 int kvm_remove_breakpoint(CPUState
*cpu
, int type
, vaddr addr
, vaddr len
)
3253 struct kvm_sw_breakpoint
*bp
;
3256 if (type
== GDB_BREAKPOINT_SW
) {
3257 bp
= kvm_find_sw_breakpoint(cpu
, addr
);
3262 if (bp
->use_count
> 1) {
3267 err
= kvm_arch_remove_sw_breakpoint(cpu
, bp
);
3272 QTAILQ_REMOVE(&cpu
->kvm_state
->kvm_sw_breakpoints
, bp
, entry
);
3275 err
= kvm_arch_remove_hw_breakpoint(addr
, len
, type
);
3282 err
= kvm_update_guest_debug(cpu
, 0);
3290 void kvm_remove_all_breakpoints(CPUState
*cpu
)
3292 struct kvm_sw_breakpoint
*bp
, *next
;
3293 KVMState
*s
= cpu
->kvm_state
;
3296 QTAILQ_FOREACH_SAFE(bp
, &s
->kvm_sw_breakpoints
, entry
, next
) {
3297 if (kvm_arch_remove_sw_breakpoint(cpu
, bp
) != 0) {
3298 /* Try harder to find a CPU that currently sees the breakpoint. */
3299 CPU_FOREACH(tmpcpu
) {
3300 if (kvm_arch_remove_sw_breakpoint(tmpcpu
, bp
) == 0) {
3305 QTAILQ_REMOVE(&s
->kvm_sw_breakpoints
, bp
, entry
);
3308 kvm_arch_remove_all_hw_breakpoints();
3311 kvm_update_guest_debug(cpu
, 0);
3315 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
3317 static int kvm_set_signal_mask(CPUState
*cpu
, const sigset_t
*sigset
)
3319 KVMState
*s
= kvm_state
;
3320 struct kvm_signal_mask
*sigmask
;
3323 sigmask
= g_malloc(sizeof(*sigmask
) + sizeof(*sigset
));
3325 sigmask
->len
= s
->sigmask_len
;
3326 memcpy(sigmask
->sigset
, sigset
, sizeof(*sigset
));
3327 r
= kvm_vcpu_ioctl(cpu
, KVM_SET_SIGNAL_MASK
, sigmask
);
3333 static void kvm_ipi_signal(int sig
)
3336 assert(kvm_immediate_exit
);
3337 kvm_cpu_kick(current_cpu
);
3341 void kvm_init_cpu_signals(CPUState
*cpu
)
3345 struct sigaction sigact
;
3347 memset(&sigact
, 0, sizeof(sigact
));
3348 sigact
.sa_handler
= kvm_ipi_signal
;
3349 sigaction(SIG_IPI
, &sigact
, NULL
);
3351 pthread_sigmask(SIG_BLOCK
, NULL
, &set
);
3352 #if defined KVM_HAVE_MCE_INJECTION
3353 sigdelset(&set
, SIGBUS
);
3354 pthread_sigmask(SIG_SETMASK
, &set
, NULL
);
3356 sigdelset(&set
, SIG_IPI
);
3357 if (kvm_immediate_exit
) {
3358 r
= pthread_sigmask(SIG_SETMASK
, &set
, NULL
);
3360 r
= kvm_set_signal_mask(cpu
, &set
);
3363 fprintf(stderr
, "kvm_set_signal_mask: %s\n", strerror(-r
));
3368 /* Called asynchronously in VCPU thread. */
3369 int kvm_on_sigbus_vcpu(CPUState
*cpu
, int code
, void *addr
)
3371 #ifdef KVM_HAVE_MCE_INJECTION
3372 if (have_sigbus_pending
) {
3375 have_sigbus_pending
= true;
3376 pending_sigbus_addr
= addr
;
3377 pending_sigbus_code
= code
;
3378 qatomic_set(&cpu
->exit_request
, 1);
3385 /* Called synchronously (via signalfd) in main thread. */
3386 int kvm_on_sigbus(int code
, void *addr
)
3388 #ifdef KVM_HAVE_MCE_INJECTION
3389 /* Action required MCE kills the process if SIGBUS is blocked. Because
3390 * that's what happens in the I/O thread, where we handle MCE via signalfd,
3391 * we can only get action optional here.
3393 assert(code
!= BUS_MCEERR_AR
);
3394 kvm_arch_on_sigbus_vcpu(first_cpu
, code
, addr
);
3401 int kvm_create_device(KVMState
*s
, uint64_t type
, bool test
)
3404 struct kvm_create_device create_dev
;
3406 create_dev
.type
= type
;
3408 create_dev
.flags
= test
? KVM_CREATE_DEVICE_TEST
: 0;
3410 if (!kvm_check_extension(s
, KVM_CAP_DEVICE_CTRL
)) {
3414 ret
= kvm_vm_ioctl(s
, KVM_CREATE_DEVICE
, &create_dev
);
3419 return test
? 0 : create_dev
.fd
;
3422 bool kvm_device_supported(int vmfd
, uint64_t type
)
3424 struct kvm_create_device create_dev
= {
3427 .flags
= KVM_CREATE_DEVICE_TEST
,
3430 if (ioctl(vmfd
, KVM_CHECK_EXTENSION
, KVM_CAP_DEVICE_CTRL
) <= 0) {
3434 return (ioctl(vmfd
, KVM_CREATE_DEVICE
, &create_dev
) >= 0);
3437 int kvm_set_one_reg(CPUState
*cs
, uint64_t id
, void *source
)
3439 struct kvm_one_reg reg
;
3443 reg
.addr
= (uintptr_t) source
;
3444 r
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
3446 trace_kvm_failed_reg_set(id
, strerror(-r
));
3451 int kvm_get_one_reg(CPUState
*cs
, uint64_t id
, void *target
)
3453 struct kvm_one_reg reg
;
3457 reg
.addr
= (uintptr_t) target
;
3458 r
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
3460 trace_kvm_failed_reg_get(id
, strerror(-r
));
3465 static bool kvm_accel_has_memory(MachineState
*ms
, AddressSpace
*as
,
3466 hwaddr start_addr
, hwaddr size
)
3468 KVMState
*kvm
= KVM_STATE(ms
->accelerator
);
3471 for (i
= 0; i
< kvm
->nr_as
; ++i
) {
3472 if (kvm
->as
[i
].as
== as
&& kvm
->as
[i
].ml
) {
3473 size
= MIN(kvm_max_slot_size
, size
);
3474 return NULL
!= kvm_lookup_matching_slot(kvm
->as
[i
].ml
,
3482 static void kvm_get_kvm_shadow_mem(Object
*obj
, Visitor
*v
,
3483 const char *name
, void *opaque
,
3486 KVMState
*s
= KVM_STATE(obj
);
3487 int64_t value
= s
->kvm_shadow_mem
;
3489 visit_type_int(v
, name
, &value
, errp
);
3492 static void kvm_set_kvm_shadow_mem(Object
*obj
, Visitor
*v
,
3493 const char *name
, void *opaque
,
3496 KVMState
*s
= KVM_STATE(obj
);
3500 error_setg(errp
, "Cannot set properties after the accelerator has been initialized");
3504 if (!visit_type_int(v
, name
, &value
, errp
)) {
3508 s
->kvm_shadow_mem
= value
;
3511 static void kvm_set_kernel_irqchip(Object
*obj
, Visitor
*v
,
3512 const char *name
, void *opaque
,
3515 KVMState
*s
= KVM_STATE(obj
);
3519 error_setg(errp
, "Cannot set properties after the accelerator has been initialized");
3523 if (!visit_type_OnOffSplit(v
, name
, &mode
, errp
)) {
3527 case ON_OFF_SPLIT_ON
:
3528 s
->kernel_irqchip_allowed
= true;
3529 s
->kernel_irqchip_required
= true;
3530 s
->kernel_irqchip_split
= ON_OFF_AUTO_OFF
;
3532 case ON_OFF_SPLIT_OFF
:
3533 s
->kernel_irqchip_allowed
= false;
3534 s
->kernel_irqchip_required
= false;
3535 s
->kernel_irqchip_split
= ON_OFF_AUTO_OFF
;
3537 case ON_OFF_SPLIT_SPLIT
:
3538 s
->kernel_irqchip_allowed
= true;
3539 s
->kernel_irqchip_required
= true;
3540 s
->kernel_irqchip_split
= ON_OFF_AUTO_ON
;
3543 /* The value was checked in visit_type_OnOffSplit() above. If
3544 * we get here, then something is wrong in QEMU.
3550 bool kvm_kernel_irqchip_allowed(void)
3552 return kvm_state
->kernel_irqchip_allowed
;
3555 bool kvm_kernel_irqchip_required(void)
3557 return kvm_state
->kernel_irqchip_required
;
3560 bool kvm_kernel_irqchip_split(void)
3562 return kvm_state
->kernel_irqchip_split
== ON_OFF_AUTO_ON
;
3565 static void kvm_get_dirty_ring_size(Object
*obj
, Visitor
*v
,
3566 const char *name
, void *opaque
,
3569 KVMState
*s
= KVM_STATE(obj
);
3570 uint32_t value
= s
->kvm_dirty_ring_size
;
3572 visit_type_uint32(v
, name
, &value
, errp
);
3575 static void kvm_set_dirty_ring_size(Object
*obj
, Visitor
*v
,
3576 const char *name
, void *opaque
,
3579 KVMState
*s
= KVM_STATE(obj
);
3583 error_setg(errp
, "Cannot set properties after the accelerator has been initialized");
3587 if (!visit_type_uint32(v
, name
, &value
, errp
)) {
3590 if (value
& (value
- 1)) {
3591 error_setg(errp
, "dirty-ring-size must be a power of two.");
3595 s
->kvm_dirty_ring_size
= value
;
3598 static char *kvm_get_device(Object
*obj
,
3599 Error
**errp G_GNUC_UNUSED
)
3601 KVMState
*s
= KVM_STATE(obj
);
3603 return g_strdup(s
->device
);
3606 static void kvm_set_device(Object
*obj
,
3608 Error
**errp G_GNUC_UNUSED
)
3610 KVMState
*s
= KVM_STATE(obj
);
3613 s
->device
= g_strdup(value
);
3616 static void kvm_accel_instance_init(Object
*obj
)
3618 KVMState
*s
= KVM_STATE(obj
);
3622 s
->kvm_shadow_mem
= -1;
3623 s
->kernel_irqchip_allowed
= true;
3624 s
->kernel_irqchip_split
= ON_OFF_AUTO_AUTO
;
3625 /* KVM dirty ring is by default off */
3626 s
->kvm_dirty_ring_size
= 0;
3627 s
->kvm_dirty_ring_with_bitmap
= false;
3628 s
->kvm_eager_split_size
= 0;
3629 s
->notify_vmexit
= NOTIFY_VMEXIT_OPTION_RUN
;
3630 s
->notify_window
= 0;
3632 s
->xen_gnttab_max_frames
= 64;
3633 s
->xen_evtchn_max_pirq
= 256;
3638 * kvm_gdbstub_sstep_flags():
3640 * Returns: SSTEP_* flags that KVM supports for guest debug. The
3641 * support is probed during kvm_init()
3643 static int kvm_gdbstub_sstep_flags(void)
3645 return kvm_sstep_flags
;
3648 static void kvm_accel_class_init(ObjectClass
*oc
, void *data
)
3650 AccelClass
*ac
= ACCEL_CLASS(oc
);
3652 ac
->init_machine
= kvm_init
;
3653 ac
->has_memory
= kvm_accel_has_memory
;
3654 ac
->allowed
= &kvm_allowed
;
3655 ac
->gdbstub_supported_sstep_flags
= kvm_gdbstub_sstep_flags
;
3657 object_class_property_add(oc
, "kernel-irqchip", "on|off|split",
3658 NULL
, kvm_set_kernel_irqchip
,
3660 object_class_property_set_description(oc
, "kernel-irqchip",
3661 "Configure KVM in-kernel irqchip");
3663 object_class_property_add(oc
, "kvm-shadow-mem", "int",
3664 kvm_get_kvm_shadow_mem
, kvm_set_kvm_shadow_mem
,
3666 object_class_property_set_description(oc
, "kvm-shadow-mem",
3667 "KVM shadow MMU size");
3669 object_class_property_add(oc
, "dirty-ring-size", "uint32",
3670 kvm_get_dirty_ring_size
, kvm_set_dirty_ring_size
,
3672 object_class_property_set_description(oc
, "dirty-ring-size",
3673 "Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)");
3675 object_class_property_add_str(oc
, "device", kvm_get_device
, kvm_set_device
);
3676 object_class_property_set_description(oc
, "device",
3677 "Path to the device node to use (default: /dev/kvm)");
3679 kvm_arch_accel_class_init(oc
);
3682 static const TypeInfo kvm_accel_type
= {
3683 .name
= TYPE_KVM_ACCEL
,
3684 .parent
= TYPE_ACCEL
,
3685 .instance_init
= kvm_accel_instance_init
,
3686 .class_init
= kvm_accel_class_init
,
3687 .instance_size
= sizeof(KVMState
),
3690 static void kvm_type_init(void)
3692 type_register_static(&kvm_accel_type
);
3695 type_init(kvm_type_init
);
3697 typedef struct StatsArgs
{
3698 union StatsResultsType
{
3699 StatsResultList
**stats
;
3700 StatsSchemaList
**schema
;
3706 static StatsList
*add_kvmstat_entry(struct kvm_stats_desc
*pdesc
,
3707 uint64_t *stats_data
,
3708 StatsList
*stats_list
,
3713 uint64List
*val_list
= NULL
;
3715 /* Only add stats that we understand. */
3716 switch (pdesc
->flags
& KVM_STATS_TYPE_MASK
) {
3717 case KVM_STATS_TYPE_CUMULATIVE
:
3718 case KVM_STATS_TYPE_INSTANT
:
3719 case KVM_STATS_TYPE_PEAK
:
3720 case KVM_STATS_TYPE_LINEAR_HIST
:
3721 case KVM_STATS_TYPE_LOG_HIST
:
3727 switch (pdesc
->flags
& KVM_STATS_UNIT_MASK
) {
3728 case KVM_STATS_UNIT_NONE
:
3729 case KVM_STATS_UNIT_BYTES
:
3730 case KVM_STATS_UNIT_CYCLES
:
3731 case KVM_STATS_UNIT_SECONDS
:
3732 case KVM_STATS_UNIT_BOOLEAN
:
3738 switch (pdesc
->flags
& KVM_STATS_BASE_MASK
) {
3739 case KVM_STATS_BASE_POW10
:
3740 case KVM_STATS_BASE_POW2
:
3746 /* Alloc and populate data list */
3747 stats
= g_new0(Stats
, 1);
3748 stats
->name
= g_strdup(pdesc
->name
);
3749 stats
->value
= g_new0(StatsValue
, 1);;
3751 if ((pdesc
->flags
& KVM_STATS_UNIT_MASK
) == KVM_STATS_UNIT_BOOLEAN
) {
3752 stats
->value
->u
.boolean
= *stats_data
;
3753 stats
->value
->type
= QTYPE_QBOOL
;
3754 } else if (pdesc
->size
== 1) {
3755 stats
->value
->u
.scalar
= *stats_data
;
3756 stats
->value
->type
= QTYPE_QNUM
;
3759 for (i
= 0; i
< pdesc
->size
; i
++) {
3760 QAPI_LIST_PREPEND(val_list
, stats_data
[i
]);
3762 stats
->value
->u
.list
= val_list
;
3763 stats
->value
->type
= QTYPE_QLIST
;
3766 QAPI_LIST_PREPEND(stats_list
, stats
);
3770 static StatsSchemaValueList
*add_kvmschema_entry(struct kvm_stats_desc
*pdesc
,
3771 StatsSchemaValueList
*list
,
3774 StatsSchemaValueList
*schema_entry
= g_new0(StatsSchemaValueList
, 1);
3775 schema_entry
->value
= g_new0(StatsSchemaValue
, 1);
3777 switch (pdesc
->flags
& KVM_STATS_TYPE_MASK
) {
3778 case KVM_STATS_TYPE_CUMULATIVE
:
3779 schema_entry
->value
->type
= STATS_TYPE_CUMULATIVE
;
3781 case KVM_STATS_TYPE_INSTANT
:
3782 schema_entry
->value
->type
= STATS_TYPE_INSTANT
;
3784 case KVM_STATS_TYPE_PEAK
:
3785 schema_entry
->value
->type
= STATS_TYPE_PEAK
;
3787 case KVM_STATS_TYPE_LINEAR_HIST
:
3788 schema_entry
->value
->type
= STATS_TYPE_LINEAR_HISTOGRAM
;
3789 schema_entry
->value
->bucket_size
= pdesc
->bucket_size
;
3790 schema_entry
->value
->has_bucket_size
= true;
3792 case KVM_STATS_TYPE_LOG_HIST
:
3793 schema_entry
->value
->type
= STATS_TYPE_LOG2_HISTOGRAM
;
3799 switch (pdesc
->flags
& KVM_STATS_UNIT_MASK
) {
3800 case KVM_STATS_UNIT_NONE
:
3802 case KVM_STATS_UNIT_BOOLEAN
:
3803 schema_entry
->value
->has_unit
= true;
3804 schema_entry
->value
->unit
= STATS_UNIT_BOOLEAN
;
3806 case KVM_STATS_UNIT_BYTES
:
3807 schema_entry
->value
->has_unit
= true;
3808 schema_entry
->value
->unit
= STATS_UNIT_BYTES
;
3810 case KVM_STATS_UNIT_CYCLES
:
3811 schema_entry
->value
->has_unit
= true;
3812 schema_entry
->value
->unit
= STATS_UNIT_CYCLES
;
3814 case KVM_STATS_UNIT_SECONDS
:
3815 schema_entry
->value
->has_unit
= true;
3816 schema_entry
->value
->unit
= STATS_UNIT_SECONDS
;
3822 schema_entry
->value
->exponent
= pdesc
->exponent
;
3823 if (pdesc
->exponent
) {
3824 switch (pdesc
->flags
& KVM_STATS_BASE_MASK
) {
3825 case KVM_STATS_BASE_POW10
:
3826 schema_entry
->value
->has_base
= true;
3827 schema_entry
->value
->base
= 10;
3829 case KVM_STATS_BASE_POW2
:
3830 schema_entry
->value
->has_base
= true;
3831 schema_entry
->value
->base
= 2;
3838 schema_entry
->value
->name
= g_strdup(pdesc
->name
);
3839 schema_entry
->next
= list
;
3840 return schema_entry
;
3842 g_free(schema_entry
->value
);
3843 g_free(schema_entry
);
3847 /* Cached stats descriptors */
3848 typedef struct StatsDescriptors
{
3849 const char *ident
; /* cache key, currently the StatsTarget */
3850 struct kvm_stats_desc
*kvm_stats_desc
;
3851 struct kvm_stats_header kvm_stats_header
;
3852 QTAILQ_ENTRY(StatsDescriptors
) next
;
3855 static QTAILQ_HEAD(, StatsDescriptors
) stats_descriptors
=
3856 QTAILQ_HEAD_INITIALIZER(stats_descriptors
);
3859 * Return the descriptors for 'target', that either have already been read
3860 * or are retrieved from 'stats_fd'.
3862 static StatsDescriptors
*find_stats_descriptors(StatsTarget target
, int stats_fd
,
3865 StatsDescriptors
*descriptors
;
3867 struct kvm_stats_desc
*kvm_stats_desc
;
3868 struct kvm_stats_header
*kvm_stats_header
;
3872 ident
= StatsTarget_str(target
);
3873 QTAILQ_FOREACH(descriptors
, &stats_descriptors
, next
) {
3874 if (g_str_equal(descriptors
->ident
, ident
)) {
3879 descriptors
= g_new0(StatsDescriptors
, 1);
3881 /* Read stats header */
3882 kvm_stats_header
= &descriptors
->kvm_stats_header
;
3883 ret
= pread(stats_fd
, kvm_stats_header
, sizeof(*kvm_stats_header
), 0);
3884 if (ret
!= sizeof(*kvm_stats_header
)) {
3885 error_setg(errp
, "KVM stats: failed to read stats header: "
3886 "expected %zu actual %zu",
3887 sizeof(*kvm_stats_header
), ret
);
3888 g_free(descriptors
);
3891 size_desc
= sizeof(*kvm_stats_desc
) + kvm_stats_header
->name_size
;
3893 /* Read stats descriptors */
3894 kvm_stats_desc
= g_malloc0_n(kvm_stats_header
->num_desc
, size_desc
);
3895 ret
= pread(stats_fd
, kvm_stats_desc
,
3896 size_desc
* kvm_stats_header
->num_desc
,
3897 kvm_stats_header
->desc_offset
);
3899 if (ret
!= size_desc
* kvm_stats_header
->num_desc
) {
3900 error_setg(errp
, "KVM stats: failed to read stats descriptors: "
3901 "expected %zu actual %zu",
3902 size_desc
* kvm_stats_header
->num_desc
, ret
);
3903 g_free(descriptors
);
3904 g_free(kvm_stats_desc
);
3907 descriptors
->kvm_stats_desc
= kvm_stats_desc
;
3908 descriptors
->ident
= ident
;
3909 QTAILQ_INSERT_TAIL(&stats_descriptors
, descriptors
, next
);
3913 static void query_stats(StatsResultList
**result
, StatsTarget target
,
3914 strList
*names
, int stats_fd
, CPUState
*cpu
,
3917 struct kvm_stats_desc
*kvm_stats_desc
;
3918 struct kvm_stats_header
*kvm_stats_header
;
3919 StatsDescriptors
*descriptors
;
3920 g_autofree
uint64_t *stats_data
= NULL
;
3921 struct kvm_stats_desc
*pdesc
;
3922 StatsList
*stats_list
= NULL
;
3923 size_t size_desc
, size_data
= 0;
3927 descriptors
= find_stats_descriptors(target
, stats_fd
, errp
);
3932 kvm_stats_header
= &descriptors
->kvm_stats_header
;
3933 kvm_stats_desc
= descriptors
->kvm_stats_desc
;
3934 size_desc
= sizeof(*kvm_stats_desc
) + kvm_stats_header
->name_size
;
3936 /* Tally the total data size; read schema data */
3937 for (i
= 0; i
< kvm_stats_header
->num_desc
; ++i
) {
3938 pdesc
= (void *)kvm_stats_desc
+ i
* size_desc
;
3939 size_data
+= pdesc
->size
* sizeof(*stats_data
);
3942 stats_data
= g_malloc0(size_data
);
3943 ret
= pread(stats_fd
, stats_data
, size_data
, kvm_stats_header
->data_offset
);
3945 if (ret
!= size_data
) {
3946 error_setg(errp
, "KVM stats: failed to read data: "
3947 "expected %zu actual %zu", size_data
, ret
);
3951 for (i
= 0; i
< kvm_stats_header
->num_desc
; ++i
) {
3953 pdesc
= (void *)kvm_stats_desc
+ i
* size_desc
;
3955 /* Add entry to the list */
3956 stats
= (void *)stats_data
+ pdesc
->offset
;
3957 if (!apply_str_list_filter(pdesc
->name
, names
)) {
3960 stats_list
= add_kvmstat_entry(pdesc
, stats
, stats_list
, errp
);
3968 case STATS_TARGET_VM
:
3969 add_stats_entry(result
, STATS_PROVIDER_KVM
, NULL
, stats_list
);
3971 case STATS_TARGET_VCPU
:
3972 add_stats_entry(result
, STATS_PROVIDER_KVM
,
3973 cpu
->parent_obj
.canonical_path
,
3977 g_assert_not_reached();
3981 static void query_stats_schema(StatsSchemaList
**result
, StatsTarget target
,
3982 int stats_fd
, Error
**errp
)
3984 struct kvm_stats_desc
*kvm_stats_desc
;
3985 struct kvm_stats_header
*kvm_stats_header
;
3986 StatsDescriptors
*descriptors
;
3987 struct kvm_stats_desc
*pdesc
;
3988 StatsSchemaValueList
*stats_list
= NULL
;
3992 descriptors
= find_stats_descriptors(target
, stats_fd
, errp
);
3997 kvm_stats_header
= &descriptors
->kvm_stats_header
;
3998 kvm_stats_desc
= descriptors
->kvm_stats_desc
;
3999 size_desc
= sizeof(*kvm_stats_desc
) + kvm_stats_header
->name_size
;
4001 /* Tally the total data size; read schema data */
4002 for (i
= 0; i
< kvm_stats_header
->num_desc
; ++i
) {
4003 pdesc
= (void *)kvm_stats_desc
+ i
* size_desc
;
4004 stats_list
= add_kvmschema_entry(pdesc
, stats_list
, errp
);
4007 add_stats_schema(result
, STATS_PROVIDER_KVM
, target
, stats_list
);
4010 static void query_stats_vcpu(CPUState
*cpu
, StatsArgs
*kvm_stats_args
)
4012 int stats_fd
= cpu
->kvm_vcpu_stats_fd
;
4013 Error
*local_err
= NULL
;
4015 if (stats_fd
== -1) {
4016 error_setg_errno(&local_err
, errno
, "KVM stats: ioctl failed");
4017 error_propagate(kvm_stats_args
->errp
, local_err
);
4020 query_stats(kvm_stats_args
->result
.stats
, STATS_TARGET_VCPU
,
4021 kvm_stats_args
->names
, stats_fd
, cpu
,
4022 kvm_stats_args
->errp
);
4025 static void query_stats_schema_vcpu(CPUState
*cpu
, StatsArgs
*kvm_stats_args
)
4027 int stats_fd
= cpu
->kvm_vcpu_stats_fd
;
4028 Error
*local_err
= NULL
;
4030 if (stats_fd
== -1) {
4031 error_setg_errno(&local_err
, errno
, "KVM stats: ioctl failed");
4032 error_propagate(kvm_stats_args
->errp
, local_err
);
4035 query_stats_schema(kvm_stats_args
->result
.schema
, STATS_TARGET_VCPU
, stats_fd
,
4036 kvm_stats_args
->errp
);
4039 static void query_stats_cb(StatsResultList
**result
, StatsTarget target
,
4040 strList
*names
, strList
*targets
, Error
**errp
)
4042 KVMState
*s
= kvm_state
;
4047 case STATS_TARGET_VM
:
4049 stats_fd
= kvm_vm_ioctl(s
, KVM_GET_STATS_FD
, NULL
);
4050 if (stats_fd
== -1) {
4051 error_setg_errno(errp
, errno
, "KVM stats: ioctl failed");
4054 query_stats(result
, target
, names
, stats_fd
, NULL
, errp
);
4058 case STATS_TARGET_VCPU
:
4060 StatsArgs stats_args
;
4061 stats_args
.result
.stats
= result
;
4062 stats_args
.names
= names
;
4063 stats_args
.errp
= errp
;
4065 if (!apply_str_list_filter(cpu
->parent_obj
.canonical_path
, targets
)) {
4068 query_stats_vcpu(cpu
, &stats_args
);
4077 void query_stats_schemas_cb(StatsSchemaList
**result
, Error
**errp
)
4079 StatsArgs stats_args
;
4080 KVMState
*s
= kvm_state
;
4083 stats_fd
= kvm_vm_ioctl(s
, KVM_GET_STATS_FD
, NULL
);
4084 if (stats_fd
== -1) {
4085 error_setg_errno(errp
, errno
, "KVM stats: ioctl failed");
4088 query_stats_schema(result
, STATS_TARGET_VM
, stats_fd
, errp
);
4092 stats_args
.result
.schema
= result
;
4093 stats_args
.errp
= errp
;
4094 query_stats_schema_vcpu(first_cpu
, &stats_args
);