4 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include <sys/ioctl.h>
20 #include <linux/kvm.h>
22 #include "qemu/atomic.h"
23 #include "qemu/option.h"
24 #include "qemu/config-file.h"
25 #include "qemu/error-report.h"
26 #include "qapi/error.h"
27 #include "hw/pci/msi.h"
28 #include "hw/pci/msix.h"
29 #include "hw/s390x/adapter.h"
30 #include "exec/gdbstub.h"
31 #include "sysemu/kvm_int.h"
32 #include "sysemu/runstate.h"
33 #include "sysemu/cpus.h"
34 #include "qemu/bswap.h"
35 #include "exec/memory.h"
36 #include "exec/ram_addr.h"
37 #include "qemu/event_notifier.h"
38 #include "qemu/main-loop.h"
41 #include "qapi/visitor.h"
42 #include "qapi/qapi-types-common.h"
43 #include "qapi/qapi-visit-common.h"
44 #include "sysemu/reset.h"
45 #include "qemu/guest-random.h"
46 #include "sysemu/hw_accel.h"
49 #include "hw/boards.h"
51 /* This check must be after config-host.h is included */
53 #include <sys/eventfd.h>
56 /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
57 * need to use the real host PAGE_SIZE, as that's what KVM will use.
62 #define PAGE_SIZE qemu_real_host_page_size
64 #ifndef KVM_GUESTDBG_BLOCKIRQ
65 #define KVM_GUESTDBG_BLOCKIRQ 0
71 #define DPRINTF(fmt, ...) \
72 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
74 #define DPRINTF(fmt, ...) \
78 #define KVM_MSI_HASHTAB_SIZE 256
80 struct KVMParkedVcpu
{
81 unsigned long vcpu_id
;
83 QLIST_ENTRY(KVMParkedVcpu
) node
;
86 enum KVMDirtyRingReaperState
{
87 KVM_DIRTY_RING_REAPER_NONE
= 0,
88 /* The reaper is sleeping */
89 KVM_DIRTY_RING_REAPER_WAIT
,
90 /* The reaper is reaping for dirty pages */
91 KVM_DIRTY_RING_REAPER_REAPING
,
95 * KVM reaper instance, responsible for collecting the KVM dirty bits
98 struct KVMDirtyRingReaper
{
99 /* The reaper thread */
100 QemuThread reaper_thr
;
101 volatile uint64_t reaper_iteration
; /* iteration number of reaper thr */
102 volatile enum KVMDirtyRingReaperState reaper_state
; /* reap thr state */
107 AccelState parent_obj
;
114 struct kvm_coalesced_mmio_ring
*coalesced_mmio_ring
;
115 bool coalesced_flush_in_progress
;
117 int robust_singlestep
;
119 #ifdef KVM_CAP_SET_GUEST_DEBUG
120 QTAILQ_HEAD(, kvm_sw_breakpoint
) kvm_sw_breakpoints
;
122 int max_nested_state_len
;
126 bool kernel_irqchip_allowed
;
127 bool kernel_irqchip_required
;
128 OnOffAuto kernel_irqchip_split
;
130 uint64_t manual_dirty_log_protect
;
131 /* The man page (and posix) say ioctl numbers are signed int, but
132 * they're not. Linux, glibc and *BSD all treat ioctl numbers as
133 * unsigned, and treating them as signed here can break things */
134 unsigned irq_set_ioctl
;
135 unsigned int sigmask_len
;
137 #ifdef KVM_CAP_IRQ_ROUTING
138 struct kvm_irq_routing
*irq_routes
;
139 int nr_allocated_irq_routes
;
140 unsigned long *used_gsi_bitmap
;
141 unsigned int gsi_count
;
142 QTAILQ_HEAD(, KVMMSIRoute
) msi_hashtab
[KVM_MSI_HASHTAB_SIZE
];
144 KVMMemoryListener memory_listener
;
145 QLIST_HEAD(, KVMParkedVcpu
) kvm_parked_vcpus
;
147 /* For "info mtree -f" to tell if an MR is registered in KVM */
150 KVMMemoryListener
*ml
;
153 uint64_t kvm_dirty_ring_bytes
; /* Size of the per-vcpu dirty ring */
154 uint32_t kvm_dirty_ring_size
; /* Number of dirty GFNs per ring */
155 struct KVMDirtyRingReaper reaper
;
159 bool kvm_kernel_irqchip
;
160 bool kvm_split_irqchip
;
161 bool kvm_async_interrupts_allowed
;
162 bool kvm_halt_in_kernel_allowed
;
163 bool kvm_eventfds_allowed
;
164 bool kvm_irqfds_allowed
;
165 bool kvm_resamplefds_allowed
;
166 bool kvm_msi_via_irqfd_allowed
;
167 bool kvm_gsi_routing_allowed
;
168 bool kvm_gsi_direct_mapping
;
170 bool kvm_readonly_mem_allowed
;
171 bool kvm_vm_attributes_allowed
;
172 bool kvm_direct_msi_allowed
;
173 bool kvm_ioeventfd_any_length_allowed
;
174 bool kvm_msi_use_devid
;
175 bool kvm_has_guest_debug
;
177 static bool kvm_immediate_exit
;
178 static hwaddr kvm_max_slot_size
= ~0;
180 static const KVMCapabilityInfo kvm_required_capabilites
[] = {
181 KVM_CAP_INFO(USER_MEMORY
),
182 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS
),
183 KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS
),
187 static NotifierList kvm_irqchip_change_notifiers
=
188 NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers
);
190 struct KVMResampleFd
{
192 EventNotifier
*resample_event
;
193 QLIST_ENTRY(KVMResampleFd
) node
;
195 typedef struct KVMResampleFd KVMResampleFd
;
198 * Only used with split irqchip where we need to do the resample fd
199 * kick for the kernel from userspace.
201 static QLIST_HEAD(, KVMResampleFd
) kvm_resample_fd_list
=
202 QLIST_HEAD_INITIALIZER(kvm_resample_fd_list
);
204 static QemuMutex kml_slots_lock
;
206 #define kvm_slots_lock() qemu_mutex_lock(&kml_slots_lock)
207 #define kvm_slots_unlock() qemu_mutex_unlock(&kml_slots_lock)
209 static void kvm_slot_init_dirty_bitmap(KVMSlot
*mem
);
211 static inline void kvm_resample_fd_remove(int gsi
)
215 QLIST_FOREACH(rfd
, &kvm_resample_fd_list
, node
) {
216 if (rfd
->gsi
== gsi
) {
217 QLIST_REMOVE(rfd
, node
);
224 static inline void kvm_resample_fd_insert(int gsi
, EventNotifier
*event
)
226 KVMResampleFd
*rfd
= g_new0(KVMResampleFd
, 1);
229 rfd
->resample_event
= event
;
231 QLIST_INSERT_HEAD(&kvm_resample_fd_list
, rfd
, node
);
234 void kvm_resample_fd_notify(int gsi
)
238 QLIST_FOREACH(rfd
, &kvm_resample_fd_list
, node
) {
239 if (rfd
->gsi
== gsi
) {
240 event_notifier_set(rfd
->resample_event
);
241 trace_kvm_resample_fd_notify(gsi
);
247 int kvm_get_max_memslots(void)
249 KVMState
*s
= KVM_STATE(current_accel());
254 /* Called with KVMMemoryListener.slots_lock held */
255 static KVMSlot
*kvm_get_free_slot(KVMMemoryListener
*kml
)
257 KVMState
*s
= kvm_state
;
260 for (i
= 0; i
< s
->nr_slots
; i
++) {
261 if (kml
->slots
[i
].memory_size
== 0) {
262 return &kml
->slots
[i
];
269 bool kvm_has_free_slot(MachineState
*ms
)
271 KVMState
*s
= KVM_STATE(ms
->accelerator
);
273 KVMMemoryListener
*kml
= &s
->memory_listener
;
276 result
= !!kvm_get_free_slot(kml
);
282 /* Called with KVMMemoryListener.slots_lock held */
283 static KVMSlot
*kvm_alloc_slot(KVMMemoryListener
*kml
)
285 KVMSlot
*slot
= kvm_get_free_slot(kml
);
291 fprintf(stderr
, "%s: no free slot available\n", __func__
);
295 static KVMSlot
*kvm_lookup_matching_slot(KVMMemoryListener
*kml
,
299 KVMState
*s
= kvm_state
;
302 for (i
= 0; i
< s
->nr_slots
; i
++) {
303 KVMSlot
*mem
= &kml
->slots
[i
];
305 if (start_addr
== mem
->start_addr
&& size
== mem
->memory_size
) {
314 * Calculate and align the start address and the size of the section.
315 * Return the size. If the size is 0, the aligned section is empty.
317 static hwaddr
kvm_align_section(MemoryRegionSection
*section
,
320 hwaddr size
= int128_get64(section
->size
);
321 hwaddr delta
, aligned
;
323 /* kvm works in page size chunks, but the function may be called
324 with sub-page size and unaligned start address. Pad the start
325 address to next and truncate size to previous page boundary. */
326 aligned
= ROUND_UP(section
->offset_within_address_space
,
327 qemu_real_host_page_size
);
328 delta
= aligned
- section
->offset_within_address_space
;
334 return (size
- delta
) & qemu_real_host_page_mask
;
337 int kvm_physical_memory_addr_from_host(KVMState
*s
, void *ram
,
340 KVMMemoryListener
*kml
= &s
->memory_listener
;
344 for (i
= 0; i
< s
->nr_slots
; i
++) {
345 KVMSlot
*mem
= &kml
->slots
[i
];
347 if (ram
>= mem
->ram
&& ram
< mem
->ram
+ mem
->memory_size
) {
348 *phys_addr
= mem
->start_addr
+ (ram
- mem
->ram
);
358 static int kvm_set_user_memory_region(KVMMemoryListener
*kml
, KVMSlot
*slot
, bool new)
360 KVMState
*s
= kvm_state
;
361 struct kvm_userspace_memory_region mem
;
364 mem
.slot
= slot
->slot
| (kml
->as_id
<< 16);
365 mem
.guest_phys_addr
= slot
->start_addr
;
366 mem
.userspace_addr
= (unsigned long)slot
->ram
;
367 mem
.flags
= slot
->flags
;
369 if (slot
->memory_size
&& !new && (mem
.flags
^ slot
->old_flags
) & KVM_MEM_READONLY
) {
370 /* Set the slot size to 0 before setting the slot to the desired
371 * value. This is needed based on KVM commit 75d61fbc. */
373 ret
= kvm_vm_ioctl(s
, KVM_SET_USER_MEMORY_REGION
, &mem
);
378 mem
.memory_size
= slot
->memory_size
;
379 ret
= kvm_vm_ioctl(s
, KVM_SET_USER_MEMORY_REGION
, &mem
);
380 slot
->old_flags
= mem
.flags
;
382 trace_kvm_set_user_memory(mem
.slot
, mem
.flags
, mem
.guest_phys_addr
,
383 mem
.memory_size
, mem
.userspace_addr
, ret
);
385 error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d,"
386 " start=0x%" PRIx64
", size=0x%" PRIx64
": %s",
387 __func__
, mem
.slot
, slot
->start_addr
,
388 (uint64_t)mem
.memory_size
, strerror(errno
));
393 static int do_kvm_destroy_vcpu(CPUState
*cpu
)
395 KVMState
*s
= kvm_state
;
397 struct KVMParkedVcpu
*vcpu
= NULL
;
400 DPRINTF("kvm_destroy_vcpu\n");
402 ret
= kvm_arch_destroy_vcpu(cpu
);
407 mmap_size
= kvm_ioctl(s
, KVM_GET_VCPU_MMAP_SIZE
, 0);
410 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
414 ret
= munmap(cpu
->kvm_run
, mmap_size
);
419 if (cpu
->kvm_dirty_gfns
) {
420 ret
= munmap(cpu
->kvm_dirty_gfns
, s
->kvm_dirty_ring_bytes
);
426 vcpu
= g_malloc0(sizeof(*vcpu
));
427 vcpu
->vcpu_id
= kvm_arch_vcpu_id(cpu
);
428 vcpu
->kvm_fd
= cpu
->kvm_fd
;
429 QLIST_INSERT_HEAD(&kvm_state
->kvm_parked_vcpus
, vcpu
, node
);
434 void kvm_destroy_vcpu(CPUState
*cpu
)
436 if (do_kvm_destroy_vcpu(cpu
) < 0) {
437 error_report("kvm_destroy_vcpu failed");
442 static int kvm_get_vcpu(KVMState
*s
, unsigned long vcpu_id
)
444 struct KVMParkedVcpu
*cpu
;
446 QLIST_FOREACH(cpu
, &s
->kvm_parked_vcpus
, node
) {
447 if (cpu
->vcpu_id
== vcpu_id
) {
450 QLIST_REMOVE(cpu
, node
);
451 kvm_fd
= cpu
->kvm_fd
;
457 return kvm_vm_ioctl(s
, KVM_CREATE_VCPU
, (void *)vcpu_id
);
460 int kvm_init_vcpu(CPUState
*cpu
, Error
**errp
)
462 KVMState
*s
= kvm_state
;
466 trace_kvm_init_vcpu(cpu
->cpu_index
, kvm_arch_vcpu_id(cpu
));
468 ret
= kvm_get_vcpu(s
, kvm_arch_vcpu_id(cpu
));
470 error_setg_errno(errp
, -ret
, "kvm_init_vcpu: kvm_get_vcpu failed (%lu)",
471 kvm_arch_vcpu_id(cpu
));
477 cpu
->vcpu_dirty
= true;
478 cpu
->dirty_pages
= 0;
480 mmap_size
= kvm_ioctl(s
, KVM_GET_VCPU_MMAP_SIZE
, 0);
483 error_setg_errno(errp
, -mmap_size
,
484 "kvm_init_vcpu: KVM_GET_VCPU_MMAP_SIZE failed");
488 cpu
->kvm_run
= mmap(NULL
, mmap_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
490 if (cpu
->kvm_run
== MAP_FAILED
) {
492 error_setg_errno(errp
, ret
,
493 "kvm_init_vcpu: mmap'ing vcpu state failed (%lu)",
494 kvm_arch_vcpu_id(cpu
));
498 if (s
->coalesced_mmio
&& !s
->coalesced_mmio_ring
) {
499 s
->coalesced_mmio_ring
=
500 (void *)cpu
->kvm_run
+ s
->coalesced_mmio
* PAGE_SIZE
;
503 if (s
->kvm_dirty_ring_size
) {
504 /* Use MAP_SHARED to share pages with the kernel */
505 cpu
->kvm_dirty_gfns
= mmap(NULL
, s
->kvm_dirty_ring_bytes
,
506 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
508 PAGE_SIZE
* KVM_DIRTY_LOG_PAGE_OFFSET
);
509 if (cpu
->kvm_dirty_gfns
== MAP_FAILED
) {
511 DPRINTF("mmap'ing vcpu dirty gfns failed: %d\n", ret
);
516 ret
= kvm_arch_init_vcpu(cpu
);
518 error_setg_errno(errp
, -ret
,
519 "kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)",
520 kvm_arch_vcpu_id(cpu
));
527 * dirty pages logging control
530 static int kvm_mem_flags(MemoryRegion
*mr
)
532 bool readonly
= mr
->readonly
|| memory_region_is_romd(mr
);
535 if (memory_region_get_dirty_log_mask(mr
) != 0) {
536 flags
|= KVM_MEM_LOG_DIRTY_PAGES
;
538 if (readonly
&& kvm_readonly_mem_allowed
) {
539 flags
|= KVM_MEM_READONLY
;
544 /* Called with KVMMemoryListener.slots_lock held */
545 static int kvm_slot_update_flags(KVMMemoryListener
*kml
, KVMSlot
*mem
,
548 mem
->flags
= kvm_mem_flags(mr
);
550 /* If nothing changed effectively, no need to issue ioctl */
551 if (mem
->flags
== mem
->old_flags
) {
555 kvm_slot_init_dirty_bitmap(mem
);
556 return kvm_set_user_memory_region(kml
, mem
, false);
559 static int kvm_section_update_flags(KVMMemoryListener
*kml
,
560 MemoryRegionSection
*section
)
562 hwaddr start_addr
, size
, slot_size
;
566 size
= kvm_align_section(section
, &start_addr
);
573 while (size
&& !ret
) {
574 slot_size
= MIN(kvm_max_slot_size
, size
);
575 mem
= kvm_lookup_matching_slot(kml
, start_addr
, slot_size
);
577 /* We don't have a slot if we want to trap every access. */
581 ret
= kvm_slot_update_flags(kml
, mem
, section
->mr
);
582 start_addr
+= slot_size
;
591 static void kvm_log_start(MemoryListener
*listener
,
592 MemoryRegionSection
*section
,
595 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
602 r
= kvm_section_update_flags(kml
, section
);
608 static void kvm_log_stop(MemoryListener
*listener
,
609 MemoryRegionSection
*section
,
612 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
619 r
= kvm_section_update_flags(kml
, section
);
625 /* get kvm's dirty pages bitmap and update qemu's */
626 static void kvm_slot_sync_dirty_pages(KVMSlot
*slot
)
628 ram_addr_t start
= slot
->ram_start_offset
;
629 ram_addr_t pages
= slot
->memory_size
/ qemu_real_host_page_size
;
631 cpu_physical_memory_set_dirty_lebitmap(slot
->dirty_bmap
, start
, pages
);
634 static void kvm_slot_reset_dirty_pages(KVMSlot
*slot
)
636 memset(slot
->dirty_bmap
, 0, slot
->dirty_bmap_size
);
639 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
641 /* Allocate the dirty bitmap for a slot */
642 static void kvm_slot_init_dirty_bitmap(KVMSlot
*mem
)
644 if (!(mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) || mem
->dirty_bmap
) {
649 * XXX bad kernel interface alert
650 * For dirty bitmap, kernel allocates array of size aligned to
651 * bits-per-long. But for case when the kernel is 64bits and
652 * the userspace is 32bits, userspace can't align to the same
653 * bits-per-long, since sizeof(long) is different between kernel
654 * and user space. This way, userspace will provide buffer which
655 * may be 4 bytes less than the kernel will use, resulting in
656 * userspace memory corruption (which is not detectable by valgrind
657 * too, in most cases).
658 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
659 * a hope that sizeof(long) won't become >8 any time soon.
661 * Note: the granule of kvm dirty log is qemu_real_host_page_size.
662 * And mem->memory_size is aligned to it (otherwise this mem can't
663 * be registered to KVM).
665 hwaddr bitmap_size
= ALIGN(mem
->memory_size
/ qemu_real_host_page_size
,
666 /*HOST_LONG_BITS*/ 64) / 8;
667 mem
->dirty_bmap
= g_malloc0(bitmap_size
);
668 mem
->dirty_bmap_size
= bitmap_size
;
672 * Sync dirty bitmap from kernel to KVMSlot.dirty_bmap, return true if
673 * succeeded, false otherwise
675 static bool kvm_slot_get_dirty_log(KVMState
*s
, KVMSlot
*slot
)
677 struct kvm_dirty_log d
= {};
680 d
.dirty_bitmap
= slot
->dirty_bmap
;
681 d
.slot
= slot
->slot
| (slot
->as_id
<< 16);
682 ret
= kvm_vm_ioctl(s
, KVM_GET_DIRTY_LOG
, &d
);
684 if (ret
== -ENOENT
) {
685 /* kernel does not have dirty bitmap in this slot */
689 error_report_once("%s: KVM_GET_DIRTY_LOG failed with %d",
695 /* Should be with all slots_lock held for the address spaces. */
696 static void kvm_dirty_ring_mark_page(KVMState
*s
, uint32_t as_id
,
697 uint32_t slot_id
, uint64_t offset
)
699 KVMMemoryListener
*kml
;
702 if (as_id
>= s
->nr_as
) {
706 kml
= s
->as
[as_id
].ml
;
707 mem
= &kml
->slots
[slot_id
];
709 if (!mem
->memory_size
|| offset
>=
710 (mem
->memory_size
/ qemu_real_host_page_size
)) {
714 set_bit(offset
, mem
->dirty_bmap
);
717 static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn
*gfn
)
719 return gfn
->flags
== KVM_DIRTY_GFN_F_DIRTY
;
722 static void dirty_gfn_set_collected(struct kvm_dirty_gfn
*gfn
)
724 gfn
->flags
= KVM_DIRTY_GFN_F_RESET
;
728 * Should be with all slots_lock held for the address spaces. It returns the
729 * dirty page we've collected on this dirty ring.
731 static uint32_t kvm_dirty_ring_reap_one(KVMState
*s
, CPUState
*cpu
)
733 struct kvm_dirty_gfn
*dirty_gfns
= cpu
->kvm_dirty_gfns
, *cur
;
734 uint32_t ring_size
= s
->kvm_dirty_ring_size
;
735 uint32_t count
= 0, fetch
= cpu
->kvm_fetch_index
;
737 assert(dirty_gfns
&& ring_size
);
738 trace_kvm_dirty_ring_reap_vcpu(cpu
->cpu_index
);
741 cur
= &dirty_gfns
[fetch
% ring_size
];
742 if (!dirty_gfn_is_dirtied(cur
)) {
745 kvm_dirty_ring_mark_page(s
, cur
->slot
>> 16, cur
->slot
& 0xffff,
747 dirty_gfn_set_collected(cur
);
748 trace_kvm_dirty_ring_page(cpu
->cpu_index
, fetch
, cur
->offset
);
752 cpu
->kvm_fetch_index
= fetch
;
753 cpu
->dirty_pages
+= count
;
758 /* Must be with slots_lock held */
759 static uint64_t kvm_dirty_ring_reap_locked(KVMState
*s
)
769 total
+= kvm_dirty_ring_reap_one(s
, cpu
);
773 ret
= kvm_vm_ioctl(s
, KVM_RESET_DIRTY_RINGS
);
774 assert(ret
== total
);
777 stamp
= get_clock() - stamp
;
780 trace_kvm_dirty_ring_reap(total
, stamp
/ 1000);
787 * Currently for simplicity, we must hold BQL before calling this. We can
788 * consider to drop the BQL if we're clear with all the race conditions.
790 static uint64_t kvm_dirty_ring_reap(KVMState
*s
)
795 * We need to lock all kvm slots for all address spaces here,
798 * (1) We need to mark dirty for dirty bitmaps in multiple slots
799 * and for tons of pages, so it's better to take the lock here
800 * once rather than once per page. And more importantly,
802 * (2) We must _NOT_ publish dirty bits to the other threads
803 * (e.g., the migration thread) via the kvm memory slot dirty
804 * bitmaps before correctly re-protect those dirtied pages.
805 * Otherwise we can have potential risk of data corruption if
806 * the page data is read in the other thread before we do
810 total
= kvm_dirty_ring_reap_locked(s
);
816 static void do_kvm_cpu_synchronize_kick(CPUState
*cpu
, run_on_cpu_data arg
)
818 /* No need to do anything */
822 * Kick all vcpus out in a synchronized way. When returned, we
823 * guarantee that every vcpu has been kicked and at least returned to
826 static void kvm_cpu_synchronize_kick_all(void)
831 run_on_cpu(cpu
, do_kvm_cpu_synchronize_kick
, RUN_ON_CPU_NULL
);
836 * Flush all the existing dirty pages to the KVM slot buffers. When
837 * this call returns, we guarantee that all the touched dirty pages
838 * before calling this function have been put into the per-kvmslot
841 * This function must be called with BQL held.
843 static void kvm_dirty_ring_flush(void)
845 trace_kvm_dirty_ring_flush(0);
847 * The function needs to be serialized. Since this function
848 * should always be with BQL held, serialization is guaranteed.
849 * However, let's be sure of it.
851 assert(qemu_mutex_iothread_locked());
853 * First make sure to flush the hardware buffers by kicking all
854 * vcpus out in a synchronous way.
856 kvm_cpu_synchronize_kick_all();
857 kvm_dirty_ring_reap(kvm_state
);
858 trace_kvm_dirty_ring_flush(1);
862 * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
864 * This function will first try to fetch dirty bitmap from the kernel,
865 * and then updates qemu's dirty bitmap.
867 * NOTE: caller must be with kml->slots_lock held.
869 * @kml: the KVM memory listener object
870 * @section: the memory section to sync the dirty bitmap with
872 static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener
*kml
,
873 MemoryRegionSection
*section
)
875 KVMState
*s
= kvm_state
;
877 hwaddr start_addr
, size
;
880 size
= kvm_align_section(section
, &start_addr
);
882 slot_size
= MIN(kvm_max_slot_size
, size
);
883 mem
= kvm_lookup_matching_slot(kml
, start_addr
, slot_size
);
885 /* We don't have a slot if we want to trap every access. */
888 if (kvm_slot_get_dirty_log(s
, mem
)) {
889 kvm_slot_sync_dirty_pages(mem
);
891 start_addr
+= slot_size
;
896 /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
897 #define KVM_CLEAR_LOG_SHIFT 6
898 #define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT)
899 #define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
901 static int kvm_log_clear_one_slot(KVMSlot
*mem
, int as_id
, uint64_t start
,
904 KVMState
*s
= kvm_state
;
905 uint64_t end
, bmap_start
, start_delta
, bmap_npages
;
906 struct kvm_clear_dirty_log d
;
907 unsigned long *bmap_clear
= NULL
, psize
= qemu_real_host_page_size
;
911 * We need to extend either the start or the size or both to
912 * satisfy the KVM interface requirement. Firstly, do the start
913 * page alignment on 64 host pages
915 bmap_start
= start
& KVM_CLEAR_LOG_MASK
;
916 start_delta
= start
- bmap_start
;
920 * The kernel interface has restriction on the size too, that either:
922 * (1) the size is 64 host pages aligned (just like the start), or
923 * (2) the size fills up until the end of the KVM memslot.
925 bmap_npages
= DIV_ROUND_UP(size
+ start_delta
, KVM_CLEAR_LOG_ALIGN
)
926 << KVM_CLEAR_LOG_SHIFT
;
927 end
= mem
->memory_size
/ psize
;
928 if (bmap_npages
> end
- bmap_start
) {
929 bmap_npages
= end
- bmap_start
;
931 start_delta
/= psize
;
934 * Prepare the bitmap to clear dirty bits. Here we must guarantee
935 * that we won't clear any unknown dirty bits otherwise we might
936 * accidentally clear some set bits which are not yet synced from
937 * the kernel into QEMU's bitmap, then we'll lose track of the
938 * guest modifications upon those pages (which can directly lead
939 * to guest data loss or panic after migration).
941 * Layout of the KVMSlot.dirty_bmap:
943 * |<-------- bmap_npages -----------..>|
946 * |----------------|-------------|------------------|------------|
949 * start bmap_start (start) end
950 * of memslot of memslot
952 * [1] bmap_npages can be aligned to either 64 pages or the end of slot
955 assert(bmap_start
% BITS_PER_LONG
== 0);
956 /* We should never do log_clear before log_sync */
957 assert(mem
->dirty_bmap
);
958 if (start_delta
|| bmap_npages
- size
/ psize
) {
959 /* Slow path - we need to manipulate a temp bitmap */
960 bmap_clear
= bitmap_new(bmap_npages
);
961 bitmap_copy_with_src_offset(bmap_clear
, mem
->dirty_bmap
,
962 bmap_start
, start_delta
+ size
/ psize
);
964 * We need to fill the holes at start because that was not
965 * specified by the caller and we extended the bitmap only for
968 bitmap_clear(bmap_clear
, 0, start_delta
);
969 d
.dirty_bitmap
= bmap_clear
;
972 * Fast path - both start and size align well with BITS_PER_LONG
973 * (or the end of memory slot)
975 d
.dirty_bitmap
= mem
->dirty_bmap
+ BIT_WORD(bmap_start
);
978 d
.first_page
= bmap_start
;
979 /* It should never overflow. If it happens, say something */
980 assert(bmap_npages
<= UINT32_MAX
);
981 d
.num_pages
= bmap_npages
;
982 d
.slot
= mem
->slot
| (as_id
<< 16);
984 ret
= kvm_vm_ioctl(s
, KVM_CLEAR_DIRTY_LOG
, &d
);
985 if (ret
< 0 && ret
!= -ENOENT
) {
986 error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
987 "start=0x%"PRIx64
", size=0x%"PRIx32
", errno=%d",
988 __func__
, d
.slot
, (uint64_t)d
.first_page
,
989 (uint32_t)d
.num_pages
, ret
);
992 trace_kvm_clear_dirty_log(d
.slot
, d
.first_page
, d
.num_pages
);
996 * After we have updated the remote dirty bitmap, we update the
997 * cached bitmap as well for the memslot, then if another user
998 * clears the same region we know we shouldn't clear it again on
999 * the remote otherwise it's data loss as well.
1001 bitmap_clear(mem
->dirty_bmap
, bmap_start
+ start_delta
,
1003 /* This handles the NULL case well */
1010 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
1012 * NOTE: this will be a no-op if we haven't enabled manual dirty log
1013 * protection in the host kernel because in that case this operation
1014 * will be done within log_sync().
1016 * @kml: the kvm memory listener
1017 * @section: the memory range to clear dirty bitmap
1019 static int kvm_physical_log_clear(KVMMemoryListener
*kml
,
1020 MemoryRegionSection
*section
)
1022 KVMState
*s
= kvm_state
;
1023 uint64_t start
, size
, offset
, count
;
1027 if (!s
->manual_dirty_log_protect
) {
1028 /* No need to do explicit clear */
1032 start
= section
->offset_within_address_space
;
1033 size
= int128_get64(section
->size
);
1036 /* Nothing more we can do... */
1042 for (i
= 0; i
< s
->nr_slots
; i
++) {
1043 mem
= &kml
->slots
[i
];
1044 /* Discard slots that are empty or do not overlap the section */
1045 if (!mem
->memory_size
||
1046 mem
->start_addr
> start
+ size
- 1 ||
1047 start
> mem
->start_addr
+ mem
->memory_size
- 1) {
1051 if (start
>= mem
->start_addr
) {
1052 /* The slot starts before section or is aligned to it. */
1053 offset
= start
- mem
->start_addr
;
1054 count
= MIN(mem
->memory_size
- offset
, size
);
1056 /* The slot starts after section. */
1058 count
= MIN(mem
->memory_size
, size
- (mem
->start_addr
- start
));
1060 ret
= kvm_log_clear_one_slot(mem
, kml
->as_id
, offset
, count
);
1071 static void kvm_coalesce_mmio_region(MemoryListener
*listener
,
1072 MemoryRegionSection
*secion
,
1073 hwaddr start
, hwaddr size
)
1075 KVMState
*s
= kvm_state
;
1077 if (s
->coalesced_mmio
) {
1078 struct kvm_coalesced_mmio_zone zone
;
1084 (void)kvm_vm_ioctl(s
, KVM_REGISTER_COALESCED_MMIO
, &zone
);
1088 static void kvm_uncoalesce_mmio_region(MemoryListener
*listener
,
1089 MemoryRegionSection
*secion
,
1090 hwaddr start
, hwaddr size
)
1092 KVMState
*s
= kvm_state
;
1094 if (s
->coalesced_mmio
) {
1095 struct kvm_coalesced_mmio_zone zone
;
1101 (void)kvm_vm_ioctl(s
, KVM_UNREGISTER_COALESCED_MMIO
, &zone
);
1105 static void kvm_coalesce_pio_add(MemoryListener
*listener
,
1106 MemoryRegionSection
*section
,
1107 hwaddr start
, hwaddr size
)
1109 KVMState
*s
= kvm_state
;
1111 if (s
->coalesced_pio
) {
1112 struct kvm_coalesced_mmio_zone zone
;
1118 (void)kvm_vm_ioctl(s
, KVM_REGISTER_COALESCED_MMIO
, &zone
);
1122 static void kvm_coalesce_pio_del(MemoryListener
*listener
,
1123 MemoryRegionSection
*section
,
1124 hwaddr start
, hwaddr size
)
1126 KVMState
*s
= kvm_state
;
1128 if (s
->coalesced_pio
) {
1129 struct kvm_coalesced_mmio_zone zone
;
1135 (void)kvm_vm_ioctl(s
, KVM_UNREGISTER_COALESCED_MMIO
, &zone
);
1139 static MemoryListener kvm_coalesced_pio_listener
= {
1140 .name
= "kvm-coalesced-pio",
1141 .coalesced_io_add
= kvm_coalesce_pio_add
,
1142 .coalesced_io_del
= kvm_coalesce_pio_del
,
1145 int kvm_check_extension(KVMState
*s
, unsigned int extension
)
1149 ret
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, extension
);
1157 int kvm_vm_check_extension(KVMState
*s
, unsigned int extension
)
1161 ret
= kvm_vm_ioctl(s
, KVM_CHECK_EXTENSION
, extension
);
1163 /* VM wide version not implemented, use global one instead */
1164 ret
= kvm_check_extension(s
, extension
);
1170 typedef struct HWPoisonPage
{
1171 ram_addr_t ram_addr
;
1172 QLIST_ENTRY(HWPoisonPage
) list
;
1175 static QLIST_HEAD(, HWPoisonPage
) hwpoison_page_list
=
1176 QLIST_HEAD_INITIALIZER(hwpoison_page_list
);
1178 static void kvm_unpoison_all(void *param
)
1180 HWPoisonPage
*page
, *next_page
;
1182 QLIST_FOREACH_SAFE(page
, &hwpoison_page_list
, list
, next_page
) {
1183 QLIST_REMOVE(page
, list
);
1184 qemu_ram_remap(page
->ram_addr
, TARGET_PAGE_SIZE
);
1189 void kvm_hwpoison_page_add(ram_addr_t ram_addr
)
1193 QLIST_FOREACH(page
, &hwpoison_page_list
, list
) {
1194 if (page
->ram_addr
== ram_addr
) {
1198 page
= g_new(HWPoisonPage
, 1);
1199 page
->ram_addr
= ram_addr
;
1200 QLIST_INSERT_HEAD(&hwpoison_page_list
, page
, list
);
1203 static uint32_t adjust_ioeventfd_endianness(uint32_t val
, uint32_t size
)
1205 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
1206 /* The kernel expects ioeventfd values in HOST_WORDS_BIGENDIAN
1207 * endianness, but the memory core hands them in target endianness.
1208 * For example, PPC is always treated as big-endian even if running
1209 * on KVM and on PPC64LE. Correct here.
1223 static int kvm_set_ioeventfd_mmio(int fd
, hwaddr addr
, uint32_t val
,
1224 bool assign
, uint32_t size
, bool datamatch
)
1227 struct kvm_ioeventfd iofd
= {
1228 .datamatch
= datamatch
? adjust_ioeventfd_endianness(val
, size
) : 0,
1235 trace_kvm_set_ioeventfd_mmio(fd
, (uint64_t)addr
, val
, assign
, size
,
1237 if (!kvm_enabled()) {
1242 iofd
.flags
|= KVM_IOEVENTFD_FLAG_DATAMATCH
;
1245 iofd
.flags
|= KVM_IOEVENTFD_FLAG_DEASSIGN
;
1248 ret
= kvm_vm_ioctl(kvm_state
, KVM_IOEVENTFD
, &iofd
);
1257 static int kvm_set_ioeventfd_pio(int fd
, uint16_t addr
, uint16_t val
,
1258 bool assign
, uint32_t size
, bool datamatch
)
1260 struct kvm_ioeventfd kick
= {
1261 .datamatch
= datamatch
? adjust_ioeventfd_endianness(val
, size
) : 0,
1263 .flags
= KVM_IOEVENTFD_FLAG_PIO
,
1268 trace_kvm_set_ioeventfd_pio(fd
, addr
, val
, assign
, size
, datamatch
);
1269 if (!kvm_enabled()) {
1273 kick
.flags
|= KVM_IOEVENTFD_FLAG_DATAMATCH
;
1276 kick
.flags
|= KVM_IOEVENTFD_FLAG_DEASSIGN
;
1278 r
= kvm_vm_ioctl(kvm_state
, KVM_IOEVENTFD
, &kick
);
1286 static int kvm_check_many_ioeventfds(void)
1288 /* Userspace can use ioeventfd for io notification. This requires a host
1289 * that supports eventfd(2) and an I/O thread; since eventfd does not
1290 * support SIGIO it cannot interrupt the vcpu.
1292 * Older kernels have a 6 device limit on the KVM io bus. Find out so we
1293 * can avoid creating too many ioeventfds.
1295 #if defined(CONFIG_EVENTFD)
1298 for (i
= 0; i
< ARRAY_SIZE(ioeventfds
); i
++) {
1299 ioeventfds
[i
] = eventfd(0, EFD_CLOEXEC
);
1300 if (ioeventfds
[i
] < 0) {
1303 ret
= kvm_set_ioeventfd_pio(ioeventfds
[i
], 0, i
, true, 2, true);
1305 close(ioeventfds
[i
]);
1310 /* Decide whether many devices are supported or not */
1311 ret
= i
== ARRAY_SIZE(ioeventfds
);
1314 kvm_set_ioeventfd_pio(ioeventfds
[i
], 0, i
, false, 2, true);
1315 close(ioeventfds
[i
]);
1323 static const KVMCapabilityInfo
*
1324 kvm_check_extension_list(KVMState
*s
, const KVMCapabilityInfo
*list
)
1326 while (list
->name
) {
1327 if (!kvm_check_extension(s
, list
->value
)) {
1335 void kvm_set_max_memslot_size(hwaddr max_slot_size
)
1338 ROUND_UP(max_slot_size
, qemu_real_host_page_size
) == max_slot_size
1340 kvm_max_slot_size
= max_slot_size
;
1343 static void kvm_set_phys_mem(KVMMemoryListener
*kml
,
1344 MemoryRegionSection
*section
, bool add
)
1348 MemoryRegion
*mr
= section
->mr
;
1349 bool writeable
= !mr
->readonly
&& !mr
->rom_device
;
1350 hwaddr start_addr
, size
, slot_size
, mr_offset
;
1351 ram_addr_t ram_start_offset
;
1354 if (!memory_region_is_ram(mr
)) {
1355 if (writeable
|| !kvm_readonly_mem_allowed
) {
1357 } else if (!mr
->romd_mode
) {
1358 /* If the memory device is not in romd_mode, then we actually want
1359 * to remove the kvm memory slot so all accesses will trap. */
1364 size
= kvm_align_section(section
, &start_addr
);
1369 /* The offset of the kvmslot within the memory region */
1370 mr_offset
= section
->offset_within_region
+ start_addr
-
1371 section
->offset_within_address_space
;
1373 /* use aligned delta to align the ram address and offset */
1374 ram
= memory_region_get_ram_ptr(mr
) + mr_offset
;
1375 ram_start_offset
= memory_region_get_ram_addr(mr
) + mr_offset
;
1381 slot_size
= MIN(kvm_max_slot_size
, size
);
1382 mem
= kvm_lookup_matching_slot(kml
, start_addr
, slot_size
);
1386 if (mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) {
1388 * NOTE: We should be aware of the fact that here we're only
1389 * doing a best effort to sync dirty bits. No matter whether
1390 * we're using dirty log or dirty ring, we ignored two facts:
1392 * (1) dirty bits can reside in hardware buffers (PML)
1394 * (2) after we collected dirty bits here, pages can be dirtied
1395 * again before we do the final KVM_SET_USER_MEMORY_REGION to
1398 * Not easy. Let's cross the fingers until it's fixed.
1400 if (kvm_state
->kvm_dirty_ring_size
) {
1401 kvm_dirty_ring_reap_locked(kvm_state
);
1403 kvm_slot_get_dirty_log(kvm_state
, mem
);
1405 kvm_slot_sync_dirty_pages(mem
);
1408 /* unregister the slot */
1409 g_free(mem
->dirty_bmap
);
1410 mem
->dirty_bmap
= NULL
;
1411 mem
->memory_size
= 0;
1413 err
= kvm_set_user_memory_region(kml
, mem
, false);
1415 fprintf(stderr
, "%s: error unregistering slot: %s\n",
1416 __func__
, strerror(-err
));
1419 start_addr
+= slot_size
;
1425 /* register the new slot */
1427 slot_size
= MIN(kvm_max_slot_size
, size
);
1428 mem
= kvm_alloc_slot(kml
);
1429 mem
->as_id
= kml
->as_id
;
1430 mem
->memory_size
= slot_size
;
1431 mem
->start_addr
= start_addr
;
1432 mem
->ram_start_offset
= ram_start_offset
;
1434 mem
->flags
= kvm_mem_flags(mr
);
1435 kvm_slot_init_dirty_bitmap(mem
);
1436 err
= kvm_set_user_memory_region(kml
, mem
, true);
1438 fprintf(stderr
, "%s: error registering slot: %s\n", __func__
,
1442 start_addr
+= slot_size
;
1443 ram_start_offset
+= slot_size
;
1452 static void *kvm_dirty_ring_reaper_thread(void *data
)
1455 struct KVMDirtyRingReaper
*r
= &s
->reaper
;
1457 rcu_register_thread();
1459 trace_kvm_dirty_ring_reaper("init");
1462 r
->reaper_state
= KVM_DIRTY_RING_REAPER_WAIT
;
1463 trace_kvm_dirty_ring_reaper("wait");
1465 * TODO: provide a smarter timeout rather than a constant?
1469 trace_kvm_dirty_ring_reaper("wakeup");
1470 r
->reaper_state
= KVM_DIRTY_RING_REAPER_REAPING
;
1472 qemu_mutex_lock_iothread();
1473 kvm_dirty_ring_reap(s
);
1474 qemu_mutex_unlock_iothread();
1476 r
->reaper_iteration
++;
1479 trace_kvm_dirty_ring_reaper("exit");
1481 rcu_unregister_thread();
1486 static int kvm_dirty_ring_reaper_init(KVMState
*s
)
1488 struct KVMDirtyRingReaper
*r
= &s
->reaper
;
1490 qemu_thread_create(&r
->reaper_thr
, "kvm-reaper",
1491 kvm_dirty_ring_reaper_thread
,
1492 s
, QEMU_THREAD_JOINABLE
);
1497 static void kvm_region_add(MemoryListener
*listener
,
1498 MemoryRegionSection
*section
)
1500 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
1502 memory_region_ref(section
->mr
);
1503 kvm_set_phys_mem(kml
, section
, true);
1506 static void kvm_region_del(MemoryListener
*listener
,
1507 MemoryRegionSection
*section
)
1509 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
1511 kvm_set_phys_mem(kml
, section
, false);
1512 memory_region_unref(section
->mr
);
1515 static void kvm_log_sync(MemoryListener
*listener
,
1516 MemoryRegionSection
*section
)
1518 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
1521 kvm_physical_sync_dirty_bitmap(kml
, section
);
1525 static void kvm_log_sync_global(MemoryListener
*l
)
1527 KVMMemoryListener
*kml
= container_of(l
, KVMMemoryListener
, listener
);
1528 KVMState
*s
= kvm_state
;
1532 /* Flush all kernel dirty addresses into KVMSlot dirty bitmap */
1533 kvm_dirty_ring_flush();
1536 * TODO: make this faster when nr_slots is big while there are
1537 * only a few used slots (small VMs).
1540 for (i
= 0; i
< s
->nr_slots
; i
++) {
1541 mem
= &kml
->slots
[i
];
1542 if (mem
->memory_size
&& mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) {
1543 kvm_slot_sync_dirty_pages(mem
);
1545 * This is not needed by KVM_GET_DIRTY_LOG because the
1546 * ioctl will unconditionally overwrite the whole region.
1547 * However kvm dirty ring has no such side effect.
1549 kvm_slot_reset_dirty_pages(mem
);
1555 static void kvm_log_clear(MemoryListener
*listener
,
1556 MemoryRegionSection
*section
)
1558 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
1561 r
= kvm_physical_log_clear(kml
, section
);
1563 error_report_once("%s: kvm log clear failed: mr=%s "
1564 "offset=%"HWADDR_PRIx
" size=%"PRIx64
, __func__
,
1565 section
->mr
->name
, section
->offset_within_region
,
1566 int128_get64(section
->size
));
1571 static void kvm_mem_ioeventfd_add(MemoryListener
*listener
,
1572 MemoryRegionSection
*section
,
1573 bool match_data
, uint64_t data
,
1576 int fd
= event_notifier_get_fd(e
);
1579 r
= kvm_set_ioeventfd_mmio(fd
, section
->offset_within_address_space
,
1580 data
, true, int128_get64(section
->size
),
1583 fprintf(stderr
, "%s: error adding ioeventfd: %s (%d)\n",
1584 __func__
, strerror(-r
), -r
);
1589 static void kvm_mem_ioeventfd_del(MemoryListener
*listener
,
1590 MemoryRegionSection
*section
,
1591 bool match_data
, uint64_t data
,
1594 int fd
= event_notifier_get_fd(e
);
1597 r
= kvm_set_ioeventfd_mmio(fd
, section
->offset_within_address_space
,
1598 data
, false, int128_get64(section
->size
),
1601 fprintf(stderr
, "%s: error deleting ioeventfd: %s (%d)\n",
1602 __func__
, strerror(-r
), -r
);
1607 static void kvm_io_ioeventfd_add(MemoryListener
*listener
,
1608 MemoryRegionSection
*section
,
1609 bool match_data
, uint64_t data
,
1612 int fd
= event_notifier_get_fd(e
);
1615 r
= kvm_set_ioeventfd_pio(fd
, section
->offset_within_address_space
,
1616 data
, true, int128_get64(section
->size
),
1619 fprintf(stderr
, "%s: error adding ioeventfd: %s (%d)\n",
1620 __func__
, strerror(-r
), -r
);
1625 static void kvm_io_ioeventfd_del(MemoryListener
*listener
,
1626 MemoryRegionSection
*section
,
1627 bool match_data
, uint64_t data
,
1631 int fd
= event_notifier_get_fd(e
);
1634 r
= kvm_set_ioeventfd_pio(fd
, section
->offset_within_address_space
,
1635 data
, false, int128_get64(section
->size
),
1638 fprintf(stderr
, "%s: error deleting ioeventfd: %s (%d)\n",
1639 __func__
, strerror(-r
), -r
);
1644 void kvm_memory_listener_register(KVMState
*s
, KVMMemoryListener
*kml
,
1645 AddressSpace
*as
, int as_id
, const char *name
)
1649 kml
->slots
= g_malloc0(s
->nr_slots
* sizeof(KVMSlot
));
1652 for (i
= 0; i
< s
->nr_slots
; i
++) {
1653 kml
->slots
[i
].slot
= i
;
1656 kml
->listener
.region_add
= kvm_region_add
;
1657 kml
->listener
.region_del
= kvm_region_del
;
1658 kml
->listener
.log_start
= kvm_log_start
;
1659 kml
->listener
.log_stop
= kvm_log_stop
;
1660 kml
->listener
.priority
= 10;
1661 kml
->listener
.name
= name
;
1663 if (s
->kvm_dirty_ring_size
) {
1664 kml
->listener
.log_sync_global
= kvm_log_sync_global
;
1666 kml
->listener
.log_sync
= kvm_log_sync
;
1667 kml
->listener
.log_clear
= kvm_log_clear
;
1670 memory_listener_register(&kml
->listener
, as
);
1672 for (i
= 0; i
< s
->nr_as
; ++i
) {
1681 static MemoryListener kvm_io_listener
= {
1683 .eventfd_add
= kvm_io_ioeventfd_add
,
1684 .eventfd_del
= kvm_io_ioeventfd_del
,
1688 int kvm_set_irq(KVMState
*s
, int irq
, int level
)
1690 struct kvm_irq_level event
;
1693 assert(kvm_async_interrupts_enabled());
1695 event
.level
= level
;
1697 ret
= kvm_vm_ioctl(s
, s
->irq_set_ioctl
, &event
);
1699 perror("kvm_set_irq");
1703 return (s
->irq_set_ioctl
== KVM_IRQ_LINE
) ? 1 : event
.status
;
1706 #ifdef KVM_CAP_IRQ_ROUTING
1707 typedef struct KVMMSIRoute
{
1708 struct kvm_irq_routing_entry kroute
;
1709 QTAILQ_ENTRY(KVMMSIRoute
) entry
;
1712 static void set_gsi(KVMState
*s
, unsigned int gsi
)
1714 set_bit(gsi
, s
->used_gsi_bitmap
);
1717 static void clear_gsi(KVMState
*s
, unsigned int gsi
)
1719 clear_bit(gsi
, s
->used_gsi_bitmap
);
1722 void kvm_init_irq_routing(KVMState
*s
)
1726 gsi_count
= kvm_check_extension(s
, KVM_CAP_IRQ_ROUTING
) - 1;
1727 if (gsi_count
> 0) {
1728 /* Round up so we can search ints using ffs */
1729 s
->used_gsi_bitmap
= bitmap_new(gsi_count
);
1730 s
->gsi_count
= gsi_count
;
1733 s
->irq_routes
= g_malloc0(sizeof(*s
->irq_routes
));
1734 s
->nr_allocated_irq_routes
= 0;
1736 if (!kvm_direct_msi_allowed
) {
1737 for (i
= 0; i
< KVM_MSI_HASHTAB_SIZE
; i
++) {
1738 QTAILQ_INIT(&s
->msi_hashtab
[i
]);
1742 kvm_arch_init_irq_routing(s
);
1745 void kvm_irqchip_commit_routes(KVMState
*s
)
1749 if (kvm_gsi_direct_mapping()) {
1753 if (!kvm_gsi_routing_enabled()) {
1757 s
->irq_routes
->flags
= 0;
1758 trace_kvm_irqchip_commit_routes();
1759 ret
= kvm_vm_ioctl(s
, KVM_SET_GSI_ROUTING
, s
->irq_routes
);
1763 static void kvm_add_routing_entry(KVMState
*s
,
1764 struct kvm_irq_routing_entry
*entry
)
1766 struct kvm_irq_routing_entry
*new;
1769 if (s
->irq_routes
->nr
== s
->nr_allocated_irq_routes
) {
1770 n
= s
->nr_allocated_irq_routes
* 2;
1774 size
= sizeof(struct kvm_irq_routing
);
1775 size
+= n
* sizeof(*new);
1776 s
->irq_routes
= g_realloc(s
->irq_routes
, size
);
1777 s
->nr_allocated_irq_routes
= n
;
1779 n
= s
->irq_routes
->nr
++;
1780 new = &s
->irq_routes
->entries
[n
];
1784 set_gsi(s
, entry
->gsi
);
1787 static int kvm_update_routing_entry(KVMState
*s
,
1788 struct kvm_irq_routing_entry
*new_entry
)
1790 struct kvm_irq_routing_entry
*entry
;
1793 for (n
= 0; n
< s
->irq_routes
->nr
; n
++) {
1794 entry
= &s
->irq_routes
->entries
[n
];
1795 if (entry
->gsi
!= new_entry
->gsi
) {
1799 if(!memcmp(entry
, new_entry
, sizeof *entry
)) {
1803 *entry
= *new_entry
;
1811 void kvm_irqchip_add_irq_route(KVMState
*s
, int irq
, int irqchip
, int pin
)
1813 struct kvm_irq_routing_entry e
= {};
1815 assert(pin
< s
->gsi_count
);
1818 e
.type
= KVM_IRQ_ROUTING_IRQCHIP
;
1820 e
.u
.irqchip
.irqchip
= irqchip
;
1821 e
.u
.irqchip
.pin
= pin
;
1822 kvm_add_routing_entry(s
, &e
);
1825 void kvm_irqchip_release_virq(KVMState
*s
, int virq
)
1827 struct kvm_irq_routing_entry
*e
;
1830 if (kvm_gsi_direct_mapping()) {
1834 for (i
= 0; i
< s
->irq_routes
->nr
; i
++) {
1835 e
= &s
->irq_routes
->entries
[i
];
1836 if (e
->gsi
== virq
) {
1837 s
->irq_routes
->nr
--;
1838 *e
= s
->irq_routes
->entries
[s
->irq_routes
->nr
];
1842 kvm_arch_release_virq_post(virq
);
1843 trace_kvm_irqchip_release_virq(virq
);
1846 void kvm_irqchip_add_change_notifier(Notifier
*n
)
1848 notifier_list_add(&kvm_irqchip_change_notifiers
, n
);
1851 void kvm_irqchip_remove_change_notifier(Notifier
*n
)
1856 void kvm_irqchip_change_notify(void)
1858 notifier_list_notify(&kvm_irqchip_change_notifiers
, NULL
);
1861 static unsigned int kvm_hash_msi(uint32_t data
)
1863 /* This is optimized for IA32 MSI layout. However, no other arch shall
1864 * repeat the mistake of not providing a direct MSI injection API. */
1868 static void kvm_flush_dynamic_msi_routes(KVMState
*s
)
1870 KVMMSIRoute
*route
, *next
;
1873 for (hash
= 0; hash
< KVM_MSI_HASHTAB_SIZE
; hash
++) {
1874 QTAILQ_FOREACH_SAFE(route
, &s
->msi_hashtab
[hash
], entry
, next
) {
1875 kvm_irqchip_release_virq(s
, route
->kroute
.gsi
);
1876 QTAILQ_REMOVE(&s
->msi_hashtab
[hash
], route
, entry
);
1882 static int kvm_irqchip_get_virq(KVMState
*s
)
1887 * PIC and IOAPIC share the first 16 GSI numbers, thus the available
1888 * GSI numbers are more than the number of IRQ route. Allocating a GSI
1889 * number can succeed even though a new route entry cannot be added.
1890 * When this happens, flush dynamic MSI entries to free IRQ route entries.
1892 if (!kvm_direct_msi_allowed
&& s
->irq_routes
->nr
== s
->gsi_count
) {
1893 kvm_flush_dynamic_msi_routes(s
);
1896 /* Return the lowest unused GSI in the bitmap */
1897 next_virq
= find_first_zero_bit(s
->used_gsi_bitmap
, s
->gsi_count
);
1898 if (next_virq
>= s
->gsi_count
) {
1905 static KVMMSIRoute
*kvm_lookup_msi_route(KVMState
*s
, MSIMessage msg
)
1907 unsigned int hash
= kvm_hash_msi(msg
.data
);
1910 QTAILQ_FOREACH(route
, &s
->msi_hashtab
[hash
], entry
) {
1911 if (route
->kroute
.u
.msi
.address_lo
== (uint32_t)msg
.address
&&
1912 route
->kroute
.u
.msi
.address_hi
== (msg
.address
>> 32) &&
1913 route
->kroute
.u
.msi
.data
== le32_to_cpu(msg
.data
)) {
1920 int kvm_irqchip_send_msi(KVMState
*s
, MSIMessage msg
)
1925 if (kvm_direct_msi_allowed
) {
1926 msi
.address_lo
= (uint32_t)msg
.address
;
1927 msi
.address_hi
= msg
.address
>> 32;
1928 msi
.data
= le32_to_cpu(msg
.data
);
1930 memset(msi
.pad
, 0, sizeof(msi
.pad
));
1932 return kvm_vm_ioctl(s
, KVM_SIGNAL_MSI
, &msi
);
1935 route
= kvm_lookup_msi_route(s
, msg
);
1939 virq
= kvm_irqchip_get_virq(s
);
1944 route
= g_malloc0(sizeof(KVMMSIRoute
));
1945 route
->kroute
.gsi
= virq
;
1946 route
->kroute
.type
= KVM_IRQ_ROUTING_MSI
;
1947 route
->kroute
.flags
= 0;
1948 route
->kroute
.u
.msi
.address_lo
= (uint32_t)msg
.address
;
1949 route
->kroute
.u
.msi
.address_hi
= msg
.address
>> 32;
1950 route
->kroute
.u
.msi
.data
= le32_to_cpu(msg
.data
);
1952 kvm_add_routing_entry(s
, &route
->kroute
);
1953 kvm_irqchip_commit_routes(s
);
1955 QTAILQ_INSERT_TAIL(&s
->msi_hashtab
[kvm_hash_msi(msg
.data
)], route
,
1959 assert(route
->kroute
.type
== KVM_IRQ_ROUTING_MSI
);
1961 return kvm_set_irq(s
, route
->kroute
.gsi
, 1);
1964 int kvm_irqchip_add_msi_route(KVMState
*s
, int vector
, PCIDevice
*dev
)
1966 struct kvm_irq_routing_entry kroute
= {};
1968 MSIMessage msg
= {0, 0};
1970 if (pci_available
&& dev
) {
1971 msg
= pci_get_msi_message(dev
, vector
);
1974 if (kvm_gsi_direct_mapping()) {
1975 return kvm_arch_msi_data_to_gsi(msg
.data
);
1978 if (!kvm_gsi_routing_enabled()) {
1982 virq
= kvm_irqchip_get_virq(s
);
1988 kroute
.type
= KVM_IRQ_ROUTING_MSI
;
1990 kroute
.u
.msi
.address_lo
= (uint32_t)msg
.address
;
1991 kroute
.u
.msi
.address_hi
= msg
.address
>> 32;
1992 kroute
.u
.msi
.data
= le32_to_cpu(msg
.data
);
1993 if (pci_available
&& kvm_msi_devid_required()) {
1994 kroute
.flags
= KVM_MSI_VALID_DEVID
;
1995 kroute
.u
.msi
.devid
= pci_requester_id(dev
);
1997 if (kvm_arch_fixup_msi_route(&kroute
, msg
.address
, msg
.data
, dev
)) {
1998 kvm_irqchip_release_virq(s
, virq
);
2002 trace_kvm_irqchip_add_msi_route(dev
? dev
->name
: (char *)"N/A",
2005 kvm_add_routing_entry(s
, &kroute
);
2006 kvm_arch_add_msi_route_post(&kroute
, vector
, dev
);
2007 kvm_irqchip_commit_routes(s
);
2012 int kvm_irqchip_update_msi_route(KVMState
*s
, int virq
, MSIMessage msg
,
2015 struct kvm_irq_routing_entry kroute
= {};
2017 if (kvm_gsi_direct_mapping()) {
2021 if (!kvm_irqchip_in_kernel()) {
2026 kroute
.type
= KVM_IRQ_ROUTING_MSI
;
2028 kroute
.u
.msi
.address_lo
= (uint32_t)msg
.address
;
2029 kroute
.u
.msi
.address_hi
= msg
.address
>> 32;
2030 kroute
.u
.msi
.data
= le32_to_cpu(msg
.data
);
2031 if (pci_available
&& kvm_msi_devid_required()) {
2032 kroute
.flags
= KVM_MSI_VALID_DEVID
;
2033 kroute
.u
.msi
.devid
= pci_requester_id(dev
);
2035 if (kvm_arch_fixup_msi_route(&kroute
, msg
.address
, msg
.data
, dev
)) {
2039 trace_kvm_irqchip_update_msi_route(virq
);
2041 return kvm_update_routing_entry(s
, &kroute
);
2044 static int kvm_irqchip_assign_irqfd(KVMState
*s
, EventNotifier
*event
,
2045 EventNotifier
*resample
, int virq
,
2048 int fd
= event_notifier_get_fd(event
);
2049 int rfd
= resample
? event_notifier_get_fd(resample
) : -1;
2051 struct kvm_irqfd irqfd
= {
2054 .flags
= assign
? 0 : KVM_IRQFD_FLAG_DEASSIGN
,
2059 if (kvm_irqchip_is_split()) {
2061 * When the slow irqchip (e.g. IOAPIC) is in the
2062 * userspace, KVM kernel resamplefd will not work because
2063 * the EOI of the interrupt will be delivered to userspace
2064 * instead, so the KVM kernel resamplefd kick will be
2065 * skipped. The userspace here mimics what the kernel
2066 * provides with resamplefd, remember the resamplefd and
2067 * kick it when we receive EOI of this IRQ.
2069 * This is hackery because IOAPIC is mostly bypassed
2070 * (except EOI broadcasts) when irqfd is used. However
2071 * this can bring much performance back for split irqchip
2072 * with INTx IRQs (for VFIO, this gives 93% perf of the
2073 * full fast path, which is 46% perf boost comparing to
2074 * the INTx slow path).
2076 kvm_resample_fd_insert(virq
, resample
);
2078 irqfd
.flags
|= KVM_IRQFD_FLAG_RESAMPLE
;
2079 irqfd
.resamplefd
= rfd
;
2081 } else if (!assign
) {
2082 if (kvm_irqchip_is_split()) {
2083 kvm_resample_fd_remove(virq
);
2087 if (!kvm_irqfds_enabled()) {
2091 return kvm_vm_ioctl(s
, KVM_IRQFD
, &irqfd
);
2094 int kvm_irqchip_add_adapter_route(KVMState
*s
, AdapterInfo
*adapter
)
2096 struct kvm_irq_routing_entry kroute
= {};
2099 if (!kvm_gsi_routing_enabled()) {
2103 virq
= kvm_irqchip_get_virq(s
);
2109 kroute
.type
= KVM_IRQ_ROUTING_S390_ADAPTER
;
2111 kroute
.u
.adapter
.summary_addr
= adapter
->summary_addr
;
2112 kroute
.u
.adapter
.ind_addr
= adapter
->ind_addr
;
2113 kroute
.u
.adapter
.summary_offset
= adapter
->summary_offset
;
2114 kroute
.u
.adapter
.ind_offset
= adapter
->ind_offset
;
2115 kroute
.u
.adapter
.adapter_id
= adapter
->adapter_id
;
2117 kvm_add_routing_entry(s
, &kroute
);
2122 int kvm_irqchip_add_hv_sint_route(KVMState
*s
, uint32_t vcpu
, uint32_t sint
)
2124 struct kvm_irq_routing_entry kroute
= {};
2127 if (!kvm_gsi_routing_enabled()) {
2130 if (!kvm_check_extension(s
, KVM_CAP_HYPERV_SYNIC
)) {
2133 virq
= kvm_irqchip_get_virq(s
);
2139 kroute
.type
= KVM_IRQ_ROUTING_HV_SINT
;
2141 kroute
.u
.hv_sint
.vcpu
= vcpu
;
2142 kroute
.u
.hv_sint
.sint
= sint
;
2144 kvm_add_routing_entry(s
, &kroute
);
2145 kvm_irqchip_commit_routes(s
);
2150 #else /* !KVM_CAP_IRQ_ROUTING */
2152 void kvm_init_irq_routing(KVMState
*s
)
2156 void kvm_irqchip_release_virq(KVMState
*s
, int virq
)
2160 int kvm_irqchip_send_msi(KVMState
*s
, MSIMessage msg
)
2165 int kvm_irqchip_add_msi_route(KVMState
*s
, int vector
, PCIDevice
*dev
)
2170 int kvm_irqchip_add_adapter_route(KVMState
*s
, AdapterInfo
*adapter
)
2175 int kvm_irqchip_add_hv_sint_route(KVMState
*s
, uint32_t vcpu
, uint32_t sint
)
2180 static int kvm_irqchip_assign_irqfd(KVMState
*s
, EventNotifier
*event
,
2181 EventNotifier
*resample
, int virq
,
2187 int kvm_irqchip_update_msi_route(KVMState
*s
, int virq
, MSIMessage msg
)
2191 #endif /* !KVM_CAP_IRQ_ROUTING */
2193 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState
*s
, EventNotifier
*n
,
2194 EventNotifier
*rn
, int virq
)
2196 return kvm_irqchip_assign_irqfd(s
, n
, rn
, virq
, true);
2199 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState
*s
, EventNotifier
*n
,
2202 return kvm_irqchip_assign_irqfd(s
, n
, NULL
, virq
, false);
2205 int kvm_irqchip_add_irqfd_notifier(KVMState
*s
, EventNotifier
*n
,
2206 EventNotifier
*rn
, qemu_irq irq
)
2209 gboolean found
= g_hash_table_lookup_extended(s
->gsimap
, irq
, &key
, &gsi
);
2214 return kvm_irqchip_add_irqfd_notifier_gsi(s
, n
, rn
, GPOINTER_TO_INT(gsi
));
2217 int kvm_irqchip_remove_irqfd_notifier(KVMState
*s
, EventNotifier
*n
,
2221 gboolean found
= g_hash_table_lookup_extended(s
->gsimap
, irq
, &key
, &gsi
);
2226 return kvm_irqchip_remove_irqfd_notifier_gsi(s
, n
, GPOINTER_TO_INT(gsi
));
2229 void kvm_irqchip_set_qemuirq_gsi(KVMState
*s
, qemu_irq irq
, int gsi
)
2231 g_hash_table_insert(s
->gsimap
, irq
, GINT_TO_POINTER(gsi
));
2234 static void kvm_irqchip_create(KVMState
*s
)
2238 assert(s
->kernel_irqchip_split
!= ON_OFF_AUTO_AUTO
);
2239 if (kvm_check_extension(s
, KVM_CAP_IRQCHIP
)) {
2241 } else if (kvm_check_extension(s
, KVM_CAP_S390_IRQCHIP
)) {
2242 ret
= kvm_vm_enable_cap(s
, KVM_CAP_S390_IRQCHIP
, 0);
2244 fprintf(stderr
, "Enable kernel irqchip failed: %s\n", strerror(-ret
));
2251 /* First probe and see if there's a arch-specific hook to create the
2252 * in-kernel irqchip for us */
2253 ret
= kvm_arch_irqchip_create(s
);
2255 if (s
->kernel_irqchip_split
== ON_OFF_AUTO_ON
) {
2256 perror("Split IRQ chip mode not supported.");
2259 ret
= kvm_vm_ioctl(s
, KVM_CREATE_IRQCHIP
);
2263 fprintf(stderr
, "Create kernel irqchip failed: %s\n", strerror(-ret
));
2267 kvm_kernel_irqchip
= true;
2268 /* If we have an in-kernel IRQ chip then we must have asynchronous
2269 * interrupt delivery (though the reverse is not necessarily true)
2271 kvm_async_interrupts_allowed
= true;
2272 kvm_halt_in_kernel_allowed
= true;
2274 kvm_init_irq_routing(s
);
2276 s
->gsimap
= g_hash_table_new(g_direct_hash
, g_direct_equal
);
2279 /* Find number of supported CPUs using the recommended
2280 * procedure from the kernel API documentation to cope with
2281 * older kernels that may be missing capabilities.
2283 static int kvm_recommended_vcpus(KVMState
*s
)
2285 int ret
= kvm_vm_check_extension(s
, KVM_CAP_NR_VCPUS
);
2286 return (ret
) ? ret
: 4;
2289 static int kvm_max_vcpus(KVMState
*s
)
2291 int ret
= kvm_check_extension(s
, KVM_CAP_MAX_VCPUS
);
2292 return (ret
) ? ret
: kvm_recommended_vcpus(s
);
2295 static int kvm_max_vcpu_id(KVMState
*s
)
2297 int ret
= kvm_check_extension(s
, KVM_CAP_MAX_VCPU_ID
);
2298 return (ret
) ? ret
: kvm_max_vcpus(s
);
2301 bool kvm_vcpu_id_is_valid(int vcpu_id
)
2303 KVMState
*s
= KVM_STATE(current_accel());
2304 return vcpu_id
>= 0 && vcpu_id
< kvm_max_vcpu_id(s
);
2307 bool kvm_dirty_ring_enabled(void)
2309 return kvm_state
->kvm_dirty_ring_size
? true : false;
2312 static int kvm_init(MachineState
*ms
)
2314 MachineClass
*mc
= MACHINE_GET_CLASS(ms
);
2315 static const char upgrade_note
[] =
2316 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
2317 "(see http://sourceforge.net/projects/kvm).\n";
2322 { "SMP", ms
->smp
.cpus
},
2323 { "hotpluggable", ms
->smp
.max_cpus
},
2326 int soft_vcpus_limit
, hard_vcpus_limit
;
2328 const KVMCapabilityInfo
*missing_cap
;
2331 uint64_t dirty_log_manual_caps
;
2333 qemu_mutex_init(&kml_slots_lock
);
2335 s
= KVM_STATE(ms
->accelerator
);
2338 * On systems where the kernel can support different base page
2339 * sizes, host page size may be different from TARGET_PAGE_SIZE,
2340 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
2341 * page size for the system though.
2343 assert(TARGET_PAGE_SIZE
<= qemu_real_host_page_size
);
2347 #ifdef KVM_CAP_SET_GUEST_DEBUG
2348 QTAILQ_INIT(&s
->kvm_sw_breakpoints
);
2350 QLIST_INIT(&s
->kvm_parked_vcpus
);
2351 s
->fd
= qemu_open_old("/dev/kvm", O_RDWR
);
2353 fprintf(stderr
, "Could not access KVM kernel module: %m\n");
2358 ret
= kvm_ioctl(s
, KVM_GET_API_VERSION
, 0);
2359 if (ret
< KVM_API_VERSION
) {
2363 fprintf(stderr
, "kvm version too old\n");
2367 if (ret
> KVM_API_VERSION
) {
2369 fprintf(stderr
, "kvm version not supported\n");
2373 kvm_immediate_exit
= kvm_check_extension(s
, KVM_CAP_IMMEDIATE_EXIT
);
2374 s
->nr_slots
= kvm_check_extension(s
, KVM_CAP_NR_MEMSLOTS
);
2376 /* If unspecified, use the default value */
2381 s
->nr_as
= kvm_check_extension(s
, KVM_CAP_MULTI_ADDRESS_SPACE
);
2382 if (s
->nr_as
<= 1) {
2385 s
->as
= g_new0(struct KVMAs
, s
->nr_as
);
2387 if (object_property_find(OBJECT(current_machine
), "kvm-type")) {
2388 g_autofree
char *kvm_type
= object_property_get_str(OBJECT(current_machine
),
2391 type
= mc
->kvm_type(ms
, kvm_type
);
2392 } else if (mc
->kvm_type
) {
2393 type
= mc
->kvm_type(ms
, NULL
);
2397 ret
= kvm_ioctl(s
, KVM_CREATE_VM
, type
);
2398 } while (ret
== -EINTR
);
2401 fprintf(stderr
, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret
,
2405 if (ret
== -EINVAL
) {
2407 "Host kernel setup problem detected. Please verify:\n");
2408 fprintf(stderr
, "- for kernels supporting the switch_amode or"
2409 " user_mode parameters, whether\n");
2411 " user space is running in primary address space\n");
2413 "- for kernels supporting the vm.allocate_pgste sysctl, "
2414 "whether it is enabled\n");
2416 #elif defined(TARGET_PPC)
2417 if (ret
== -EINVAL
) {
2419 "PPC KVM module is not loaded. Try modprobe kvm_%s.\n",
2420 (type
== 2) ? "pr" : "hv");
2428 /* check the vcpu limits */
2429 soft_vcpus_limit
= kvm_recommended_vcpus(s
);
2430 hard_vcpus_limit
= kvm_max_vcpus(s
);
2433 if (nc
->num
> soft_vcpus_limit
) {
2434 warn_report("Number of %s cpus requested (%d) exceeds "
2435 "the recommended cpus supported by KVM (%d)",
2436 nc
->name
, nc
->num
, soft_vcpus_limit
);
2438 if (nc
->num
> hard_vcpus_limit
) {
2439 fprintf(stderr
, "Number of %s cpus requested (%d) exceeds "
2440 "the maximum cpus supported by KVM (%d)\n",
2441 nc
->name
, nc
->num
, hard_vcpus_limit
);
2448 missing_cap
= kvm_check_extension_list(s
, kvm_required_capabilites
);
2451 kvm_check_extension_list(s
, kvm_arch_required_capabilities
);
2455 fprintf(stderr
, "kvm does not support %s\n%s",
2456 missing_cap
->name
, upgrade_note
);
2460 s
->coalesced_mmio
= kvm_check_extension(s
, KVM_CAP_COALESCED_MMIO
);
2461 s
->coalesced_pio
= s
->coalesced_mmio
&&
2462 kvm_check_extension(s
, KVM_CAP_COALESCED_PIO
);
2465 * Enable KVM dirty ring if supported, otherwise fall back to
2466 * dirty logging mode
2468 if (s
->kvm_dirty_ring_size
> 0) {
2469 uint64_t ring_bytes
;
2471 ring_bytes
= s
->kvm_dirty_ring_size
* sizeof(struct kvm_dirty_gfn
);
2473 /* Read the max supported pages */
2474 ret
= kvm_vm_check_extension(s
, KVM_CAP_DIRTY_LOG_RING
);
2476 if (ring_bytes
> ret
) {
2477 error_report("KVM dirty ring size %" PRIu32
" too big "
2478 "(maximum is %ld). Please use a smaller value.",
2479 s
->kvm_dirty_ring_size
,
2480 (long)ret
/ sizeof(struct kvm_dirty_gfn
));
2485 ret
= kvm_vm_enable_cap(s
, KVM_CAP_DIRTY_LOG_RING
, 0, ring_bytes
);
2487 error_report("Enabling of KVM dirty ring failed: %s. "
2488 "Suggested minimum value is 1024.", strerror(-ret
));
2492 s
->kvm_dirty_ring_bytes
= ring_bytes
;
2494 warn_report("KVM dirty ring not available, using bitmap method");
2495 s
->kvm_dirty_ring_size
= 0;
2500 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is
2501 * enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no
2502 * page is wr-protected initially, which is against how kvm dirty ring is
2503 * usage - kvm dirty ring requires all pages are wr-protected at the very
2504 * beginning. Enabling this feature for dirty ring causes data corruption.
2506 * TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log,
2507 * we may expect a higher stall time when starting the migration. In the
2508 * future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too:
2509 * instead of clearing dirty bit, it can be a way to explicitly wr-protect
2512 if (!s
->kvm_dirty_ring_size
) {
2513 dirty_log_manual_caps
=
2514 kvm_check_extension(s
, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
);
2515 dirty_log_manual_caps
&= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE
|
2516 KVM_DIRTY_LOG_INITIALLY_SET
);
2517 s
->manual_dirty_log_protect
= dirty_log_manual_caps
;
2518 if (dirty_log_manual_caps
) {
2519 ret
= kvm_vm_enable_cap(s
, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
, 0,
2520 dirty_log_manual_caps
);
2522 warn_report("Trying to enable capability %"PRIu64
" of "
2523 "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. "
2524 "Falling back to the legacy mode. ",
2525 dirty_log_manual_caps
);
2526 s
->manual_dirty_log_protect
= 0;
2531 #ifdef KVM_CAP_VCPU_EVENTS
2532 s
->vcpu_events
= kvm_check_extension(s
, KVM_CAP_VCPU_EVENTS
);
2535 s
->robust_singlestep
=
2536 kvm_check_extension(s
, KVM_CAP_X86_ROBUST_SINGLESTEP
);
2538 #ifdef KVM_CAP_DEBUGREGS
2539 s
->debugregs
= kvm_check_extension(s
, KVM_CAP_DEBUGREGS
);
2542 s
->max_nested_state_len
= kvm_check_extension(s
, KVM_CAP_NESTED_STATE
);
2544 #ifdef KVM_CAP_IRQ_ROUTING
2545 kvm_direct_msi_allowed
= (kvm_check_extension(s
, KVM_CAP_SIGNAL_MSI
) > 0);
2548 s
->intx_set_mask
= kvm_check_extension(s
, KVM_CAP_PCI_2_3
);
2550 s
->irq_set_ioctl
= KVM_IRQ_LINE
;
2551 if (kvm_check_extension(s
, KVM_CAP_IRQ_INJECT_STATUS
)) {
2552 s
->irq_set_ioctl
= KVM_IRQ_LINE_STATUS
;
2555 kvm_readonly_mem_allowed
=
2556 (kvm_check_extension(s
, KVM_CAP_READONLY_MEM
) > 0);
2558 kvm_eventfds_allowed
=
2559 (kvm_check_extension(s
, KVM_CAP_IOEVENTFD
) > 0);
2561 kvm_irqfds_allowed
=
2562 (kvm_check_extension(s
, KVM_CAP_IRQFD
) > 0);
2564 kvm_resamplefds_allowed
=
2565 (kvm_check_extension(s
, KVM_CAP_IRQFD_RESAMPLE
) > 0);
2567 kvm_vm_attributes_allowed
=
2568 (kvm_check_extension(s
, KVM_CAP_VM_ATTRIBUTES
) > 0);
2570 kvm_ioeventfd_any_length_allowed
=
2571 (kvm_check_extension(s
, KVM_CAP_IOEVENTFD_ANY_LENGTH
) > 0);
2573 #ifdef KVM_CAP_SET_GUEST_DEBUG
2574 kvm_has_guest_debug
=
2575 (kvm_check_extension(s
, KVM_CAP_SET_GUEST_DEBUG
) > 0);
2578 kvm_sstep_flags
= 0;
2579 if (kvm_has_guest_debug
) {
2580 kvm_sstep_flags
= SSTEP_ENABLE
;
2582 #if defined KVM_CAP_SET_GUEST_DEBUG2
2583 int guest_debug_flags
=
2584 kvm_check_extension(s
, KVM_CAP_SET_GUEST_DEBUG2
);
2586 if (guest_debug_flags
& KVM_GUESTDBG_BLOCKIRQ
) {
2587 kvm_sstep_flags
|= SSTEP_NOIRQ
;
2594 ret
= kvm_arch_init(ms
, s
);
2599 if (s
->kernel_irqchip_split
== ON_OFF_AUTO_AUTO
) {
2600 s
->kernel_irqchip_split
= mc
->default_kernel_irqchip_split
? ON_OFF_AUTO_ON
: ON_OFF_AUTO_OFF
;
2603 qemu_register_reset(kvm_unpoison_all
, NULL
);
2605 if (s
->kernel_irqchip_allowed
) {
2606 kvm_irqchip_create(s
);
2609 if (kvm_eventfds_allowed
) {
2610 s
->memory_listener
.listener
.eventfd_add
= kvm_mem_ioeventfd_add
;
2611 s
->memory_listener
.listener
.eventfd_del
= kvm_mem_ioeventfd_del
;
2613 s
->memory_listener
.listener
.coalesced_io_add
= kvm_coalesce_mmio_region
;
2614 s
->memory_listener
.listener
.coalesced_io_del
= kvm_uncoalesce_mmio_region
;
2616 kvm_memory_listener_register(s
, &s
->memory_listener
,
2617 &address_space_memory
, 0, "kvm-memory");
2618 if (kvm_eventfds_allowed
) {
2619 memory_listener_register(&kvm_io_listener
,
2622 memory_listener_register(&kvm_coalesced_pio_listener
,
2625 s
->many_ioeventfds
= kvm_check_many_ioeventfds();
2627 s
->sync_mmu
= !!kvm_vm_check_extension(kvm_state
, KVM_CAP_SYNC_MMU
);
2629 ret
= ram_block_discard_disable(true);
2633 if (s
->kvm_dirty_ring_size
) {
2634 ret
= kvm_dirty_ring_reaper_init(s
);
2650 g_free(s
->memory_listener
.slots
);
2655 void kvm_set_sigmask_len(KVMState
*s
, unsigned int sigmask_len
)
2657 s
->sigmask_len
= sigmask_len
;
2660 static void kvm_handle_io(uint16_t port
, MemTxAttrs attrs
, void *data
, int direction
,
2661 int size
, uint32_t count
)
2664 uint8_t *ptr
= data
;
2666 for (i
= 0; i
< count
; i
++) {
2667 address_space_rw(&address_space_io
, port
, attrs
,
2669 direction
== KVM_EXIT_IO_OUT
);
2674 static int kvm_handle_internal_error(CPUState
*cpu
, struct kvm_run
*run
)
2676 fprintf(stderr
, "KVM internal error. Suberror: %d\n",
2677 run
->internal
.suberror
);
2679 if (kvm_check_extension(kvm_state
, KVM_CAP_INTERNAL_ERROR_DATA
)) {
2682 for (i
= 0; i
< run
->internal
.ndata
; ++i
) {
2683 fprintf(stderr
, "extra data[%d]: 0x%016"PRIx64
"\n",
2684 i
, (uint64_t)run
->internal
.data
[i
]);
2687 if (run
->internal
.suberror
== KVM_INTERNAL_ERROR_EMULATION
) {
2688 fprintf(stderr
, "emulation failure\n");
2689 if (!kvm_arch_stop_on_emulation_error(cpu
)) {
2690 cpu_dump_state(cpu
, stderr
, CPU_DUMP_CODE
);
2691 return EXCP_INTERRUPT
;
2694 /* FIXME: Should trigger a qmp message to let management know
2695 * something went wrong.
2700 void kvm_flush_coalesced_mmio_buffer(void)
2702 KVMState
*s
= kvm_state
;
2704 if (s
->coalesced_flush_in_progress
) {
2708 s
->coalesced_flush_in_progress
= true;
2710 if (s
->coalesced_mmio_ring
) {
2711 struct kvm_coalesced_mmio_ring
*ring
= s
->coalesced_mmio_ring
;
2712 while (ring
->first
!= ring
->last
) {
2713 struct kvm_coalesced_mmio
*ent
;
2715 ent
= &ring
->coalesced_mmio
[ring
->first
];
2717 if (ent
->pio
== 1) {
2718 address_space_write(&address_space_io
, ent
->phys_addr
,
2719 MEMTXATTRS_UNSPECIFIED
, ent
->data
,
2722 cpu_physical_memory_write(ent
->phys_addr
, ent
->data
, ent
->len
);
2725 ring
->first
= (ring
->first
+ 1) % KVM_COALESCED_MMIO_MAX
;
2729 s
->coalesced_flush_in_progress
= false;
2732 bool kvm_cpu_check_are_resettable(void)
2734 return kvm_arch_cpu_check_are_resettable();
2737 static void do_kvm_cpu_synchronize_state(CPUState
*cpu
, run_on_cpu_data arg
)
2739 if (!cpu
->vcpu_dirty
) {
2740 kvm_arch_get_registers(cpu
);
2741 cpu
->vcpu_dirty
= true;
2745 void kvm_cpu_synchronize_state(CPUState
*cpu
)
2747 if (!cpu
->vcpu_dirty
) {
2748 run_on_cpu(cpu
, do_kvm_cpu_synchronize_state
, RUN_ON_CPU_NULL
);
2752 static void do_kvm_cpu_synchronize_post_reset(CPUState
*cpu
, run_on_cpu_data arg
)
2754 kvm_arch_put_registers(cpu
, KVM_PUT_RESET_STATE
);
2755 cpu
->vcpu_dirty
= false;
2758 void kvm_cpu_synchronize_post_reset(CPUState
*cpu
)
2760 run_on_cpu(cpu
, do_kvm_cpu_synchronize_post_reset
, RUN_ON_CPU_NULL
);
2763 static void do_kvm_cpu_synchronize_post_init(CPUState
*cpu
, run_on_cpu_data arg
)
2765 kvm_arch_put_registers(cpu
, KVM_PUT_FULL_STATE
);
2766 cpu
->vcpu_dirty
= false;
2769 void kvm_cpu_synchronize_post_init(CPUState
*cpu
)
2771 run_on_cpu(cpu
, do_kvm_cpu_synchronize_post_init
, RUN_ON_CPU_NULL
);
2774 static void do_kvm_cpu_synchronize_pre_loadvm(CPUState
*cpu
, run_on_cpu_data arg
)
2776 cpu
->vcpu_dirty
= true;
2779 void kvm_cpu_synchronize_pre_loadvm(CPUState
*cpu
)
2781 run_on_cpu(cpu
, do_kvm_cpu_synchronize_pre_loadvm
, RUN_ON_CPU_NULL
);
2784 #ifdef KVM_HAVE_MCE_INJECTION
2785 static __thread
void *pending_sigbus_addr
;
2786 static __thread
int pending_sigbus_code
;
2787 static __thread
bool have_sigbus_pending
;
2790 static void kvm_cpu_kick(CPUState
*cpu
)
2792 qatomic_set(&cpu
->kvm_run
->immediate_exit
, 1);
2795 static void kvm_cpu_kick_self(void)
2797 if (kvm_immediate_exit
) {
2798 kvm_cpu_kick(current_cpu
);
2800 qemu_cpu_kick_self();
2804 static void kvm_eat_signals(CPUState
*cpu
)
2806 struct timespec ts
= { 0, 0 };
2812 if (kvm_immediate_exit
) {
2813 qatomic_set(&cpu
->kvm_run
->immediate_exit
, 0);
2814 /* Write kvm_run->immediate_exit before the cpu->exit_request
2815 * write in kvm_cpu_exec.
2821 sigemptyset(&waitset
);
2822 sigaddset(&waitset
, SIG_IPI
);
2825 r
= sigtimedwait(&waitset
, &siginfo
, &ts
);
2826 if (r
== -1 && !(errno
== EAGAIN
|| errno
== EINTR
)) {
2827 perror("sigtimedwait");
2831 r
= sigpending(&chkset
);
2833 perror("sigpending");
2836 } while (sigismember(&chkset
, SIG_IPI
));
2839 int kvm_cpu_exec(CPUState
*cpu
)
2841 struct kvm_run
*run
= cpu
->kvm_run
;
2844 DPRINTF("kvm_cpu_exec()\n");
2846 if (kvm_arch_process_async_events(cpu
)) {
2847 qatomic_set(&cpu
->exit_request
, 0);
2851 qemu_mutex_unlock_iothread();
2852 cpu_exec_start(cpu
);
2857 if (cpu
->vcpu_dirty
) {
2858 kvm_arch_put_registers(cpu
, KVM_PUT_RUNTIME_STATE
);
2859 cpu
->vcpu_dirty
= false;
2862 kvm_arch_pre_run(cpu
, run
);
2863 if (qatomic_read(&cpu
->exit_request
)) {
2864 DPRINTF("interrupt exit requested\n");
2866 * KVM requires us to reenter the kernel after IO exits to complete
2867 * instruction emulation. This self-signal will ensure that we
2870 kvm_cpu_kick_self();
2873 /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
2874 * Matching barrier in kvm_eat_signals.
2878 run_ret
= kvm_vcpu_ioctl(cpu
, KVM_RUN
, 0);
2880 attrs
= kvm_arch_post_run(cpu
, run
);
2882 #ifdef KVM_HAVE_MCE_INJECTION
2883 if (unlikely(have_sigbus_pending
)) {
2884 qemu_mutex_lock_iothread();
2885 kvm_arch_on_sigbus_vcpu(cpu
, pending_sigbus_code
,
2886 pending_sigbus_addr
);
2887 have_sigbus_pending
= false;
2888 qemu_mutex_unlock_iothread();
2893 if (run_ret
== -EINTR
|| run_ret
== -EAGAIN
) {
2894 DPRINTF("io window exit\n");
2895 kvm_eat_signals(cpu
);
2896 ret
= EXCP_INTERRUPT
;
2899 fprintf(stderr
, "error: kvm run failed %s\n",
2900 strerror(-run_ret
));
2902 if (run_ret
== -EBUSY
) {
2904 "This is probably because your SMT is enabled.\n"
2905 "VCPU can only run on primary threads with all "
2906 "secondary threads offline.\n");
2913 trace_kvm_run_exit(cpu
->cpu_index
, run
->exit_reason
);
2914 switch (run
->exit_reason
) {
2916 DPRINTF("handle_io\n");
2917 /* Called outside BQL */
2918 kvm_handle_io(run
->io
.port
, attrs
,
2919 (uint8_t *)run
+ run
->io
.data_offset
,
2926 DPRINTF("handle_mmio\n");
2927 /* Called outside BQL */
2928 address_space_rw(&address_space_memory
,
2929 run
->mmio
.phys_addr
, attrs
,
2932 run
->mmio
.is_write
);
2935 case KVM_EXIT_IRQ_WINDOW_OPEN
:
2936 DPRINTF("irq_window_open\n");
2937 ret
= EXCP_INTERRUPT
;
2939 case KVM_EXIT_SHUTDOWN
:
2940 DPRINTF("shutdown\n");
2941 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
2942 ret
= EXCP_INTERRUPT
;
2944 case KVM_EXIT_UNKNOWN
:
2945 fprintf(stderr
, "KVM: unknown exit, hardware reason %" PRIx64
"\n",
2946 (uint64_t)run
->hw
.hardware_exit_reason
);
2949 case KVM_EXIT_INTERNAL_ERROR
:
2950 ret
= kvm_handle_internal_error(cpu
, run
);
2952 case KVM_EXIT_DIRTY_RING_FULL
:
2954 * We shouldn't continue if the dirty ring of this vcpu is
2955 * still full. Got kicked by KVM_RESET_DIRTY_RINGS.
2957 trace_kvm_dirty_ring_full(cpu
->cpu_index
);
2958 qemu_mutex_lock_iothread();
2959 kvm_dirty_ring_reap(kvm_state
);
2960 qemu_mutex_unlock_iothread();
2963 case KVM_EXIT_SYSTEM_EVENT
:
2964 switch (run
->system_event
.type
) {
2965 case KVM_SYSTEM_EVENT_SHUTDOWN
:
2966 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN
);
2967 ret
= EXCP_INTERRUPT
;
2969 case KVM_SYSTEM_EVENT_RESET
:
2970 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
2971 ret
= EXCP_INTERRUPT
;
2973 case KVM_SYSTEM_EVENT_CRASH
:
2974 kvm_cpu_synchronize_state(cpu
);
2975 qemu_mutex_lock_iothread();
2976 qemu_system_guest_panicked(cpu_get_crash_info(cpu
));
2977 qemu_mutex_unlock_iothread();
2981 DPRINTF("kvm_arch_handle_exit\n");
2982 ret
= kvm_arch_handle_exit(cpu
, run
);
2987 DPRINTF("kvm_arch_handle_exit\n");
2988 ret
= kvm_arch_handle_exit(cpu
, run
);
2994 qemu_mutex_lock_iothread();
2997 cpu_dump_state(cpu
, stderr
, CPU_DUMP_CODE
);
2998 vm_stop(RUN_STATE_INTERNAL_ERROR
);
3001 qatomic_set(&cpu
->exit_request
, 0);
3005 int kvm_ioctl(KVMState
*s
, int type
, ...)
3012 arg
= va_arg(ap
, void *);
3015 trace_kvm_ioctl(type
, arg
);
3016 ret
= ioctl(s
->fd
, type
, arg
);
3023 int kvm_vm_ioctl(KVMState
*s
, int type
, ...)
3030 arg
= va_arg(ap
, void *);
3033 trace_kvm_vm_ioctl(type
, arg
);
3034 ret
= ioctl(s
->vmfd
, type
, arg
);
3041 int kvm_vcpu_ioctl(CPUState
*cpu
, int type
, ...)
3048 arg
= va_arg(ap
, void *);
3051 trace_kvm_vcpu_ioctl(cpu
->cpu_index
, type
, arg
);
3052 ret
= ioctl(cpu
->kvm_fd
, type
, arg
);
3059 int kvm_device_ioctl(int fd
, int type
, ...)
3066 arg
= va_arg(ap
, void *);
3069 trace_kvm_device_ioctl(fd
, type
, arg
);
3070 ret
= ioctl(fd
, type
, arg
);
3077 int kvm_vm_check_attr(KVMState
*s
, uint32_t group
, uint64_t attr
)
3080 struct kvm_device_attr attribute
= {
3085 if (!kvm_vm_attributes_allowed
) {
3089 ret
= kvm_vm_ioctl(s
, KVM_HAS_DEVICE_ATTR
, &attribute
);
3090 /* kvm returns 0 on success for HAS_DEVICE_ATTR */
3094 int kvm_device_check_attr(int dev_fd
, uint32_t group
, uint64_t attr
)
3096 struct kvm_device_attr attribute
= {
3102 return kvm_device_ioctl(dev_fd
, KVM_HAS_DEVICE_ATTR
, &attribute
) ? 0 : 1;
3105 int kvm_device_access(int fd
, int group
, uint64_t attr
,
3106 void *val
, bool write
, Error
**errp
)
3108 struct kvm_device_attr kvmattr
;
3112 kvmattr
.group
= group
;
3113 kvmattr
.attr
= attr
;
3114 kvmattr
.addr
= (uintptr_t)val
;
3116 err
= kvm_device_ioctl(fd
,
3117 write
? KVM_SET_DEVICE_ATTR
: KVM_GET_DEVICE_ATTR
,
3120 error_setg_errno(errp
, -err
,
3121 "KVM_%s_DEVICE_ATTR failed: Group %d "
3122 "attr 0x%016" PRIx64
,
3123 write
? "SET" : "GET", group
, attr
);
3128 bool kvm_has_sync_mmu(void)
3130 return kvm_state
->sync_mmu
;
3133 int kvm_has_vcpu_events(void)
3135 return kvm_state
->vcpu_events
;
3138 int kvm_has_robust_singlestep(void)
3140 return kvm_state
->robust_singlestep
;
3143 int kvm_has_debugregs(void)
3145 return kvm_state
->debugregs
;
3148 int kvm_max_nested_state_length(void)
3150 return kvm_state
->max_nested_state_len
;
3153 int kvm_has_many_ioeventfds(void)
3155 if (!kvm_enabled()) {
3158 return kvm_state
->many_ioeventfds
;
3161 int kvm_has_gsi_routing(void)
3163 #ifdef KVM_CAP_IRQ_ROUTING
3164 return kvm_check_extension(kvm_state
, KVM_CAP_IRQ_ROUTING
);
3170 int kvm_has_intx_set_mask(void)
3172 return kvm_state
->intx_set_mask
;
3175 bool kvm_arm_supports_user_irq(void)
3177 return kvm_check_extension(kvm_state
, KVM_CAP_ARM_USER_IRQ
);
3180 #ifdef KVM_CAP_SET_GUEST_DEBUG
3181 struct kvm_sw_breakpoint
*kvm_find_sw_breakpoint(CPUState
*cpu
,
3184 struct kvm_sw_breakpoint
*bp
;
3186 QTAILQ_FOREACH(bp
, &cpu
->kvm_state
->kvm_sw_breakpoints
, entry
) {
3194 int kvm_sw_breakpoints_active(CPUState
*cpu
)
3196 return !QTAILQ_EMPTY(&cpu
->kvm_state
->kvm_sw_breakpoints
);
3199 struct kvm_set_guest_debug_data
{
3200 struct kvm_guest_debug dbg
;
3204 static void kvm_invoke_set_guest_debug(CPUState
*cpu
, run_on_cpu_data data
)
3206 struct kvm_set_guest_debug_data
*dbg_data
=
3207 (struct kvm_set_guest_debug_data
*) data
.host_ptr
;
3209 dbg_data
->err
= kvm_vcpu_ioctl(cpu
, KVM_SET_GUEST_DEBUG
,
3213 int kvm_update_guest_debug(CPUState
*cpu
, unsigned long reinject_trap
)
3215 struct kvm_set_guest_debug_data data
;
3217 data
.dbg
.control
= reinject_trap
;
3219 if (cpu
->singlestep_enabled
) {
3220 data
.dbg
.control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_SINGLESTEP
;
3222 if (cpu
->singlestep_enabled
& SSTEP_NOIRQ
) {
3223 data
.dbg
.control
|= KVM_GUESTDBG_BLOCKIRQ
;
3226 kvm_arch_update_guest_debug(cpu
, &data
.dbg
);
3228 run_on_cpu(cpu
, kvm_invoke_set_guest_debug
,
3229 RUN_ON_CPU_HOST_PTR(&data
));
3233 int kvm_insert_breakpoint(CPUState
*cpu
, target_ulong addr
,
3234 target_ulong len
, int type
)
3236 struct kvm_sw_breakpoint
*bp
;
3239 if (type
== GDB_BREAKPOINT_SW
) {
3240 bp
= kvm_find_sw_breakpoint(cpu
, addr
);
3246 bp
= g_malloc(sizeof(struct kvm_sw_breakpoint
));
3249 err
= kvm_arch_insert_sw_breakpoint(cpu
, bp
);
3255 QTAILQ_INSERT_HEAD(&cpu
->kvm_state
->kvm_sw_breakpoints
, bp
, entry
);
3257 err
= kvm_arch_insert_hw_breakpoint(addr
, len
, type
);
3264 err
= kvm_update_guest_debug(cpu
, 0);
3272 int kvm_remove_breakpoint(CPUState
*cpu
, target_ulong addr
,
3273 target_ulong len
, int type
)
3275 struct kvm_sw_breakpoint
*bp
;
3278 if (type
== GDB_BREAKPOINT_SW
) {
3279 bp
= kvm_find_sw_breakpoint(cpu
, addr
);
3284 if (bp
->use_count
> 1) {
3289 err
= kvm_arch_remove_sw_breakpoint(cpu
, bp
);
3294 QTAILQ_REMOVE(&cpu
->kvm_state
->kvm_sw_breakpoints
, bp
, entry
);
3297 err
= kvm_arch_remove_hw_breakpoint(addr
, len
, type
);
3304 err
= kvm_update_guest_debug(cpu
, 0);
3312 void kvm_remove_all_breakpoints(CPUState
*cpu
)
3314 struct kvm_sw_breakpoint
*bp
, *next
;
3315 KVMState
*s
= cpu
->kvm_state
;
3318 QTAILQ_FOREACH_SAFE(bp
, &s
->kvm_sw_breakpoints
, entry
, next
) {
3319 if (kvm_arch_remove_sw_breakpoint(cpu
, bp
) != 0) {
3320 /* Try harder to find a CPU that currently sees the breakpoint. */
3321 CPU_FOREACH(tmpcpu
) {
3322 if (kvm_arch_remove_sw_breakpoint(tmpcpu
, bp
) == 0) {
3327 QTAILQ_REMOVE(&s
->kvm_sw_breakpoints
, bp
, entry
);
3330 kvm_arch_remove_all_hw_breakpoints();
3333 kvm_update_guest_debug(cpu
, 0);
3337 #else /* !KVM_CAP_SET_GUEST_DEBUG */
3339 int kvm_update_guest_debug(CPUState
*cpu
, unsigned long reinject_trap
)
3344 int kvm_insert_breakpoint(CPUState
*cpu
, target_ulong addr
,
3345 target_ulong len
, int type
)
3350 int kvm_remove_breakpoint(CPUState
*cpu
, target_ulong addr
,
3351 target_ulong len
, int type
)
3356 void kvm_remove_all_breakpoints(CPUState
*cpu
)
3359 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
3361 static int kvm_set_signal_mask(CPUState
*cpu
, const sigset_t
*sigset
)
3363 KVMState
*s
= kvm_state
;
3364 struct kvm_signal_mask
*sigmask
;
3367 sigmask
= g_malloc(sizeof(*sigmask
) + sizeof(*sigset
));
3369 sigmask
->len
= s
->sigmask_len
;
3370 memcpy(sigmask
->sigset
, sigset
, sizeof(*sigset
));
3371 r
= kvm_vcpu_ioctl(cpu
, KVM_SET_SIGNAL_MASK
, sigmask
);
3377 static void kvm_ipi_signal(int sig
)
3380 assert(kvm_immediate_exit
);
3381 kvm_cpu_kick(current_cpu
);
3385 void kvm_init_cpu_signals(CPUState
*cpu
)
3389 struct sigaction sigact
;
3391 memset(&sigact
, 0, sizeof(sigact
));
3392 sigact
.sa_handler
= kvm_ipi_signal
;
3393 sigaction(SIG_IPI
, &sigact
, NULL
);
3395 pthread_sigmask(SIG_BLOCK
, NULL
, &set
);
3396 #if defined KVM_HAVE_MCE_INJECTION
3397 sigdelset(&set
, SIGBUS
);
3398 pthread_sigmask(SIG_SETMASK
, &set
, NULL
);
3400 sigdelset(&set
, SIG_IPI
);
3401 if (kvm_immediate_exit
) {
3402 r
= pthread_sigmask(SIG_SETMASK
, &set
, NULL
);
3404 r
= kvm_set_signal_mask(cpu
, &set
);
3407 fprintf(stderr
, "kvm_set_signal_mask: %s\n", strerror(-r
));
3412 /* Called asynchronously in VCPU thread. */
3413 int kvm_on_sigbus_vcpu(CPUState
*cpu
, int code
, void *addr
)
3415 #ifdef KVM_HAVE_MCE_INJECTION
3416 if (have_sigbus_pending
) {
3419 have_sigbus_pending
= true;
3420 pending_sigbus_addr
= addr
;
3421 pending_sigbus_code
= code
;
3422 qatomic_set(&cpu
->exit_request
, 1);
3429 /* Called synchronously (via signalfd) in main thread. */
3430 int kvm_on_sigbus(int code
, void *addr
)
3432 #ifdef KVM_HAVE_MCE_INJECTION
3433 /* Action required MCE kills the process if SIGBUS is blocked. Because
3434 * that's what happens in the I/O thread, where we handle MCE via signalfd,
3435 * we can only get action optional here.
3437 assert(code
!= BUS_MCEERR_AR
);
3438 kvm_arch_on_sigbus_vcpu(first_cpu
, code
, addr
);
3445 int kvm_create_device(KVMState
*s
, uint64_t type
, bool test
)
3448 struct kvm_create_device create_dev
;
3450 create_dev
.type
= type
;
3452 create_dev
.flags
= test
? KVM_CREATE_DEVICE_TEST
: 0;
3454 if (!kvm_check_extension(s
, KVM_CAP_DEVICE_CTRL
)) {
3458 ret
= kvm_vm_ioctl(s
, KVM_CREATE_DEVICE
, &create_dev
);
3463 return test
? 0 : create_dev
.fd
;
3466 bool kvm_device_supported(int vmfd
, uint64_t type
)
3468 struct kvm_create_device create_dev
= {
3471 .flags
= KVM_CREATE_DEVICE_TEST
,
3474 if (ioctl(vmfd
, KVM_CHECK_EXTENSION
, KVM_CAP_DEVICE_CTRL
) <= 0) {
3478 return (ioctl(vmfd
, KVM_CREATE_DEVICE
, &create_dev
) >= 0);
3481 int kvm_set_one_reg(CPUState
*cs
, uint64_t id
, void *source
)
3483 struct kvm_one_reg reg
;
3487 reg
.addr
= (uintptr_t) source
;
3488 r
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
3490 trace_kvm_failed_reg_set(id
, strerror(-r
));
3495 int kvm_get_one_reg(CPUState
*cs
, uint64_t id
, void *target
)
3497 struct kvm_one_reg reg
;
3501 reg
.addr
= (uintptr_t) target
;
3502 r
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
3504 trace_kvm_failed_reg_get(id
, strerror(-r
));
3509 static bool kvm_accel_has_memory(MachineState
*ms
, AddressSpace
*as
,
3510 hwaddr start_addr
, hwaddr size
)
3512 KVMState
*kvm
= KVM_STATE(ms
->accelerator
);
3515 for (i
= 0; i
< kvm
->nr_as
; ++i
) {
3516 if (kvm
->as
[i
].as
== as
&& kvm
->as
[i
].ml
) {
3517 size
= MIN(kvm_max_slot_size
, size
);
3518 return NULL
!= kvm_lookup_matching_slot(kvm
->as
[i
].ml
,
3526 static void kvm_get_kvm_shadow_mem(Object
*obj
, Visitor
*v
,
3527 const char *name
, void *opaque
,
3530 KVMState
*s
= KVM_STATE(obj
);
3531 int64_t value
= s
->kvm_shadow_mem
;
3533 visit_type_int(v
, name
, &value
, errp
);
3536 static void kvm_set_kvm_shadow_mem(Object
*obj
, Visitor
*v
,
3537 const char *name
, void *opaque
,
3540 KVMState
*s
= KVM_STATE(obj
);
3544 error_setg(errp
, "Cannot set properties after the accelerator has been initialized");
3548 if (!visit_type_int(v
, name
, &value
, errp
)) {
3552 s
->kvm_shadow_mem
= value
;
3555 static void kvm_set_kernel_irqchip(Object
*obj
, Visitor
*v
,
3556 const char *name
, void *opaque
,
3559 KVMState
*s
= KVM_STATE(obj
);
3563 error_setg(errp
, "Cannot set properties after the accelerator has been initialized");
3567 if (!visit_type_OnOffSplit(v
, name
, &mode
, errp
)) {
3571 case ON_OFF_SPLIT_ON
:
3572 s
->kernel_irqchip_allowed
= true;
3573 s
->kernel_irqchip_required
= true;
3574 s
->kernel_irqchip_split
= ON_OFF_AUTO_OFF
;
3576 case ON_OFF_SPLIT_OFF
:
3577 s
->kernel_irqchip_allowed
= false;
3578 s
->kernel_irqchip_required
= false;
3579 s
->kernel_irqchip_split
= ON_OFF_AUTO_OFF
;
3581 case ON_OFF_SPLIT_SPLIT
:
3582 s
->kernel_irqchip_allowed
= true;
3583 s
->kernel_irqchip_required
= true;
3584 s
->kernel_irqchip_split
= ON_OFF_AUTO_ON
;
3587 /* The value was checked in visit_type_OnOffSplit() above. If
3588 * we get here, then something is wrong in QEMU.
3594 bool kvm_kernel_irqchip_allowed(void)
3596 return kvm_state
->kernel_irqchip_allowed
;
3599 bool kvm_kernel_irqchip_required(void)
3601 return kvm_state
->kernel_irqchip_required
;
3604 bool kvm_kernel_irqchip_split(void)
3606 return kvm_state
->kernel_irqchip_split
== ON_OFF_AUTO_ON
;
3609 static void kvm_get_dirty_ring_size(Object
*obj
, Visitor
*v
,
3610 const char *name
, void *opaque
,
3613 KVMState
*s
= KVM_STATE(obj
);
3614 uint32_t value
= s
->kvm_dirty_ring_size
;
3616 visit_type_uint32(v
, name
, &value
, errp
);
3619 static void kvm_set_dirty_ring_size(Object
*obj
, Visitor
*v
,
3620 const char *name
, void *opaque
,
3623 KVMState
*s
= KVM_STATE(obj
);
3624 Error
*error
= NULL
;
3628 error_setg(errp
, "Cannot set properties after the accelerator has been initialized");
3632 visit_type_uint32(v
, name
, &value
, &error
);
3634 error_propagate(errp
, error
);
3637 if (value
& (value
- 1)) {
3638 error_setg(errp
, "dirty-ring-size must be a power of two.");
3642 s
->kvm_dirty_ring_size
= value
;
3645 static void kvm_accel_instance_init(Object
*obj
)
3647 KVMState
*s
= KVM_STATE(obj
);
3651 s
->kvm_shadow_mem
= -1;
3652 s
->kernel_irqchip_allowed
= true;
3653 s
->kernel_irqchip_split
= ON_OFF_AUTO_AUTO
;
3654 /* KVM dirty ring is by default off */
3655 s
->kvm_dirty_ring_size
= 0;
3658 static void kvm_accel_class_init(ObjectClass
*oc
, void *data
)
3660 AccelClass
*ac
= ACCEL_CLASS(oc
);
3662 ac
->init_machine
= kvm_init
;
3663 ac
->has_memory
= kvm_accel_has_memory
;
3664 ac
->allowed
= &kvm_allowed
;
3666 object_class_property_add(oc
, "kernel-irqchip", "on|off|split",
3667 NULL
, kvm_set_kernel_irqchip
,
3669 object_class_property_set_description(oc
, "kernel-irqchip",
3670 "Configure KVM in-kernel irqchip");
3672 object_class_property_add(oc
, "kvm-shadow-mem", "int",
3673 kvm_get_kvm_shadow_mem
, kvm_set_kvm_shadow_mem
,
3675 object_class_property_set_description(oc
, "kvm-shadow-mem",
3676 "KVM shadow MMU size");
3678 object_class_property_add(oc
, "dirty-ring-size", "uint32",
3679 kvm_get_dirty_ring_size
, kvm_set_dirty_ring_size
,
3681 object_class_property_set_description(oc
, "dirty-ring-size",
3682 "Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)");
3685 static const TypeInfo kvm_accel_type
= {
3686 .name
= TYPE_KVM_ACCEL
,
3687 .parent
= TYPE_ACCEL
,
3688 .instance_init
= kvm_accel_instance_init
,
3689 .class_init
= kvm_accel_class_init
,
3690 .instance_size
= sizeof(KVMState
),
3693 static void kvm_type_init(void)
3695 type_register_static(&kvm_accel_type
);
3698 type_init(kvm_type_init
);