4 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include <sys/ioctl.h>
19 #include <linux/kvm.h>
21 #include "qemu/atomic.h"
22 #include "qemu/option.h"
23 #include "qemu/config-file.h"
24 #include "qemu/error-report.h"
25 #include "qapi/error.h"
26 #include "hw/pci/msi.h"
27 #include "hw/pci/msix.h"
28 #include "hw/s390x/adapter.h"
29 #include "exec/gdbstub.h"
30 #include "sysemu/kvm_int.h"
31 #include "sysemu/runstate.h"
32 #include "sysemu/cpus.h"
33 #include "sysemu/sysemu.h"
34 #include "qemu/bswap.h"
35 #include "exec/memory.h"
36 #include "exec/ram_addr.h"
37 #include "exec/address-spaces.h"
38 #include "qemu/event_notifier.h"
39 #include "qemu/main-loop.h"
42 #include "sysemu/sev.h"
43 #include "qapi/visitor.h"
44 #include "qapi/qapi-types-common.h"
45 #include "qapi/qapi-visit-common.h"
46 #include "sysemu/reset.h"
47 #include "qemu/guest-random.h"
48 #include "sysemu/hw_accel.h"
51 #include "hw/boards.h"
53 /* This check must be after config-host.h is included */
55 #include <sys/eventfd.h>
58 /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
59 * need to use the real host PAGE_SIZE, as that's what KVM will use.
64 #define PAGE_SIZE qemu_real_host_page_size
69 #define DPRINTF(fmt, ...) \
70 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
72 #define DPRINTF(fmt, ...) \
76 #define KVM_MSI_HASHTAB_SIZE 256
78 struct KVMParkedVcpu
{
79 unsigned long vcpu_id
;
81 QLIST_ENTRY(KVMParkedVcpu
) node
;
86 AccelState parent_obj
;
93 struct kvm_coalesced_mmio_ring
*coalesced_mmio_ring
;
94 bool coalesced_flush_in_progress
;
96 int robust_singlestep
;
98 #ifdef KVM_CAP_SET_GUEST_DEBUG
99 QTAILQ_HEAD(, kvm_sw_breakpoint
) kvm_sw_breakpoints
;
101 int max_nested_state_len
;
105 bool kernel_irqchip_allowed
;
106 bool kernel_irqchip_required
;
107 OnOffAuto kernel_irqchip_split
;
109 uint64_t manual_dirty_log_protect
;
110 /* The man page (and posix) say ioctl numbers are signed int, but
111 * they're not. Linux, glibc and *BSD all treat ioctl numbers as
112 * unsigned, and treating them as signed here can break things */
113 unsigned irq_set_ioctl
;
114 unsigned int sigmask_len
;
116 #ifdef KVM_CAP_IRQ_ROUTING
117 struct kvm_irq_routing
*irq_routes
;
118 int nr_allocated_irq_routes
;
119 unsigned long *used_gsi_bitmap
;
120 unsigned int gsi_count
;
121 QTAILQ_HEAD(, KVMMSIRoute
) msi_hashtab
[KVM_MSI_HASHTAB_SIZE
];
123 KVMMemoryListener memory_listener
;
124 QLIST_HEAD(, KVMParkedVcpu
) kvm_parked_vcpus
;
126 /* memory encryption */
127 void *memcrypt_handle
;
128 int (*memcrypt_encrypt_data
)(void *handle
, uint8_t *ptr
, uint64_t len
);
130 /* For "info mtree -f" to tell if an MR is registered in KVM */
133 KVMMemoryListener
*ml
;
139 bool kvm_kernel_irqchip
;
140 bool kvm_split_irqchip
;
141 bool kvm_async_interrupts_allowed
;
142 bool kvm_halt_in_kernel_allowed
;
143 bool kvm_eventfds_allowed
;
144 bool kvm_irqfds_allowed
;
145 bool kvm_resamplefds_allowed
;
146 bool kvm_msi_via_irqfd_allowed
;
147 bool kvm_gsi_routing_allowed
;
148 bool kvm_gsi_direct_mapping
;
150 bool kvm_readonly_mem_allowed
;
151 bool kvm_vm_attributes_allowed
;
152 bool kvm_direct_msi_allowed
;
153 bool kvm_ioeventfd_any_length_allowed
;
154 bool kvm_msi_use_devid
;
155 static bool kvm_immediate_exit
;
156 static hwaddr kvm_max_slot_size
= ~0;
158 static const KVMCapabilityInfo kvm_required_capabilites
[] = {
159 KVM_CAP_INFO(USER_MEMORY
),
160 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS
),
161 KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS
),
165 static NotifierList kvm_irqchip_change_notifiers
=
166 NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers
);
168 struct KVMResampleFd
{
170 EventNotifier
*resample_event
;
171 QLIST_ENTRY(KVMResampleFd
) node
;
173 typedef struct KVMResampleFd KVMResampleFd
;
176 * Only used with split irqchip where we need to do the resample fd
177 * kick for the kernel from userspace.
179 static QLIST_HEAD(, KVMResampleFd
) kvm_resample_fd_list
=
180 QLIST_HEAD_INITIALIZER(kvm_resample_fd_list
);
182 #define kvm_slots_lock(kml) qemu_mutex_lock(&(kml)->slots_lock)
183 #define kvm_slots_unlock(kml) qemu_mutex_unlock(&(kml)->slots_lock)
185 static inline void kvm_resample_fd_remove(int gsi
)
189 QLIST_FOREACH(rfd
, &kvm_resample_fd_list
, node
) {
190 if (rfd
->gsi
== gsi
) {
191 QLIST_REMOVE(rfd
, node
);
198 static inline void kvm_resample_fd_insert(int gsi
, EventNotifier
*event
)
200 KVMResampleFd
*rfd
= g_new0(KVMResampleFd
, 1);
203 rfd
->resample_event
= event
;
205 QLIST_INSERT_HEAD(&kvm_resample_fd_list
, rfd
, node
);
208 void kvm_resample_fd_notify(int gsi
)
212 QLIST_FOREACH(rfd
, &kvm_resample_fd_list
, node
) {
213 if (rfd
->gsi
== gsi
) {
214 event_notifier_set(rfd
->resample_event
);
215 trace_kvm_resample_fd_notify(gsi
);
221 int kvm_get_max_memslots(void)
223 KVMState
*s
= KVM_STATE(current_accel());
228 bool kvm_memcrypt_enabled(void)
230 if (kvm_state
&& kvm_state
->memcrypt_handle
) {
237 int kvm_memcrypt_encrypt_data(uint8_t *ptr
, uint64_t len
)
239 if (kvm_state
->memcrypt_handle
&&
240 kvm_state
->memcrypt_encrypt_data
) {
241 return kvm_state
->memcrypt_encrypt_data(kvm_state
->memcrypt_handle
,
248 /* Called with KVMMemoryListener.slots_lock held */
249 static KVMSlot
*kvm_get_free_slot(KVMMemoryListener
*kml
)
251 KVMState
*s
= kvm_state
;
254 for (i
= 0; i
< s
->nr_slots
; i
++) {
255 if (kml
->slots
[i
].memory_size
== 0) {
256 return &kml
->slots
[i
];
263 bool kvm_has_free_slot(MachineState
*ms
)
265 KVMState
*s
= KVM_STATE(ms
->accelerator
);
267 KVMMemoryListener
*kml
= &s
->memory_listener
;
270 result
= !!kvm_get_free_slot(kml
);
271 kvm_slots_unlock(kml
);
276 /* Called with KVMMemoryListener.slots_lock held */
277 static KVMSlot
*kvm_alloc_slot(KVMMemoryListener
*kml
)
279 KVMSlot
*slot
= kvm_get_free_slot(kml
);
285 fprintf(stderr
, "%s: no free slot available\n", __func__
);
289 static KVMSlot
*kvm_lookup_matching_slot(KVMMemoryListener
*kml
,
293 KVMState
*s
= kvm_state
;
296 for (i
= 0; i
< s
->nr_slots
; i
++) {
297 KVMSlot
*mem
= &kml
->slots
[i
];
299 if (start_addr
== mem
->start_addr
&& size
== mem
->memory_size
) {
308 * Calculate and align the start address and the size of the section.
309 * Return the size. If the size is 0, the aligned section is empty.
311 static hwaddr
kvm_align_section(MemoryRegionSection
*section
,
314 hwaddr size
= int128_get64(section
->size
);
315 hwaddr delta
, aligned
;
317 /* kvm works in page size chunks, but the function may be called
318 with sub-page size and unaligned start address. Pad the start
319 address to next and truncate size to previous page boundary. */
320 aligned
= ROUND_UP(section
->offset_within_address_space
,
321 qemu_real_host_page_size
);
322 delta
= aligned
- section
->offset_within_address_space
;
328 return (size
- delta
) & qemu_real_host_page_mask
;
331 int kvm_physical_memory_addr_from_host(KVMState
*s
, void *ram
,
334 KVMMemoryListener
*kml
= &s
->memory_listener
;
338 for (i
= 0; i
< s
->nr_slots
; i
++) {
339 KVMSlot
*mem
= &kml
->slots
[i
];
341 if (ram
>= mem
->ram
&& ram
< mem
->ram
+ mem
->memory_size
) {
342 *phys_addr
= mem
->start_addr
+ (ram
- mem
->ram
);
347 kvm_slots_unlock(kml
);
352 static int kvm_set_user_memory_region(KVMMemoryListener
*kml
, KVMSlot
*slot
, bool new)
354 KVMState
*s
= kvm_state
;
355 struct kvm_userspace_memory_region mem
;
358 mem
.slot
= slot
->slot
| (kml
->as_id
<< 16);
359 mem
.guest_phys_addr
= slot
->start_addr
;
360 mem
.userspace_addr
= (unsigned long)slot
->ram
;
361 mem
.flags
= slot
->flags
;
363 if (slot
->memory_size
&& !new && (mem
.flags
^ slot
->old_flags
) & KVM_MEM_READONLY
) {
364 /* Set the slot size to 0 before setting the slot to the desired
365 * value. This is needed based on KVM commit 75d61fbc. */
367 ret
= kvm_vm_ioctl(s
, KVM_SET_USER_MEMORY_REGION
, &mem
);
372 mem
.memory_size
= slot
->memory_size
;
373 ret
= kvm_vm_ioctl(s
, KVM_SET_USER_MEMORY_REGION
, &mem
);
374 slot
->old_flags
= mem
.flags
;
376 trace_kvm_set_user_memory(mem
.slot
, mem
.flags
, mem
.guest_phys_addr
,
377 mem
.memory_size
, mem
.userspace_addr
, ret
);
379 error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d,"
380 " start=0x%" PRIx64
", size=0x%" PRIx64
": %s",
381 __func__
, mem
.slot
, slot
->start_addr
,
382 (uint64_t)mem
.memory_size
, strerror(errno
));
387 static int do_kvm_destroy_vcpu(CPUState
*cpu
)
389 KVMState
*s
= kvm_state
;
391 struct KVMParkedVcpu
*vcpu
= NULL
;
394 DPRINTF("kvm_destroy_vcpu\n");
396 ret
= kvm_arch_destroy_vcpu(cpu
);
401 mmap_size
= kvm_ioctl(s
, KVM_GET_VCPU_MMAP_SIZE
, 0);
404 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
408 ret
= munmap(cpu
->kvm_run
, mmap_size
);
413 vcpu
= g_malloc0(sizeof(*vcpu
));
414 vcpu
->vcpu_id
= kvm_arch_vcpu_id(cpu
);
415 vcpu
->kvm_fd
= cpu
->kvm_fd
;
416 QLIST_INSERT_HEAD(&kvm_state
->kvm_parked_vcpus
, vcpu
, node
);
421 void kvm_destroy_vcpu(CPUState
*cpu
)
423 if (do_kvm_destroy_vcpu(cpu
) < 0) {
424 error_report("kvm_destroy_vcpu failed");
429 static int kvm_get_vcpu(KVMState
*s
, unsigned long vcpu_id
)
431 struct KVMParkedVcpu
*cpu
;
433 QLIST_FOREACH(cpu
, &s
->kvm_parked_vcpus
, node
) {
434 if (cpu
->vcpu_id
== vcpu_id
) {
437 QLIST_REMOVE(cpu
, node
);
438 kvm_fd
= cpu
->kvm_fd
;
444 return kvm_vm_ioctl(s
, KVM_CREATE_VCPU
, (void *)vcpu_id
);
447 int kvm_init_vcpu(CPUState
*cpu
, Error
**errp
)
449 KVMState
*s
= kvm_state
;
453 trace_kvm_init_vcpu(cpu
->cpu_index
, kvm_arch_vcpu_id(cpu
));
455 ret
= kvm_get_vcpu(s
, kvm_arch_vcpu_id(cpu
));
457 error_setg_errno(errp
, -ret
, "kvm_init_vcpu: kvm_get_vcpu failed (%lu)",
458 kvm_arch_vcpu_id(cpu
));
464 cpu
->vcpu_dirty
= true;
466 mmap_size
= kvm_ioctl(s
, KVM_GET_VCPU_MMAP_SIZE
, 0);
469 error_setg_errno(errp
, -mmap_size
,
470 "kvm_init_vcpu: KVM_GET_VCPU_MMAP_SIZE failed");
474 cpu
->kvm_run
= mmap(NULL
, mmap_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
476 if (cpu
->kvm_run
== MAP_FAILED
) {
478 error_setg_errno(errp
, ret
,
479 "kvm_init_vcpu: mmap'ing vcpu state failed (%lu)",
480 kvm_arch_vcpu_id(cpu
));
484 if (s
->coalesced_mmio
&& !s
->coalesced_mmio_ring
) {
485 s
->coalesced_mmio_ring
=
486 (void *)cpu
->kvm_run
+ s
->coalesced_mmio
* PAGE_SIZE
;
489 ret
= kvm_arch_init_vcpu(cpu
);
491 error_setg_errno(errp
, -ret
,
492 "kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)",
493 kvm_arch_vcpu_id(cpu
));
500 * dirty pages logging control
503 static int kvm_mem_flags(MemoryRegion
*mr
)
505 bool readonly
= mr
->readonly
|| memory_region_is_romd(mr
);
508 if (memory_region_get_dirty_log_mask(mr
) != 0) {
509 flags
|= KVM_MEM_LOG_DIRTY_PAGES
;
511 if (readonly
&& kvm_readonly_mem_allowed
) {
512 flags
|= KVM_MEM_READONLY
;
517 /* Called with KVMMemoryListener.slots_lock held */
518 static int kvm_slot_update_flags(KVMMemoryListener
*kml
, KVMSlot
*mem
,
521 mem
->flags
= kvm_mem_flags(mr
);
523 /* If nothing changed effectively, no need to issue ioctl */
524 if (mem
->flags
== mem
->old_flags
) {
528 return kvm_set_user_memory_region(kml
, mem
, false);
531 static int kvm_section_update_flags(KVMMemoryListener
*kml
,
532 MemoryRegionSection
*section
)
534 hwaddr start_addr
, size
, slot_size
;
538 size
= kvm_align_section(section
, &start_addr
);
545 while (size
&& !ret
) {
546 slot_size
= MIN(kvm_max_slot_size
, size
);
547 mem
= kvm_lookup_matching_slot(kml
, start_addr
, slot_size
);
549 /* We don't have a slot if we want to trap every access. */
553 ret
= kvm_slot_update_flags(kml
, mem
, section
->mr
);
554 start_addr
+= slot_size
;
559 kvm_slots_unlock(kml
);
563 static void kvm_log_start(MemoryListener
*listener
,
564 MemoryRegionSection
*section
,
567 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
574 r
= kvm_section_update_flags(kml
, section
);
580 static void kvm_log_stop(MemoryListener
*listener
,
581 MemoryRegionSection
*section
,
584 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
591 r
= kvm_section_update_flags(kml
, section
);
597 /* get kvm's dirty pages bitmap and update qemu's */
598 static int kvm_get_dirty_pages_log_range(MemoryRegionSection
*section
,
599 unsigned long *bitmap
)
601 ram_addr_t start
= section
->offset_within_region
+
602 memory_region_get_ram_addr(section
->mr
);
603 ram_addr_t pages
= int128_get64(section
->size
) / qemu_real_host_page_size
;
605 cpu_physical_memory_set_dirty_lebitmap(bitmap
, start
, pages
);
609 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
611 /* Allocate the dirty bitmap for a slot */
612 static void kvm_memslot_init_dirty_bitmap(KVMSlot
*mem
)
615 * XXX bad kernel interface alert
616 * For dirty bitmap, kernel allocates array of size aligned to
617 * bits-per-long. But for case when the kernel is 64bits and
618 * the userspace is 32bits, userspace can't align to the same
619 * bits-per-long, since sizeof(long) is different between kernel
620 * and user space. This way, userspace will provide buffer which
621 * may be 4 bytes less than the kernel will use, resulting in
622 * userspace memory corruption (which is not detectable by valgrind
623 * too, in most cases).
624 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
625 * a hope that sizeof(long) won't become >8 any time soon.
627 hwaddr bitmap_size
= ALIGN(((mem
->memory_size
) >> TARGET_PAGE_BITS
),
628 /*HOST_LONG_BITS*/ 64) / 8;
629 mem
->dirty_bmap
= g_malloc0(bitmap_size
);
633 * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
635 * This function will first try to fetch dirty bitmap from the kernel,
636 * and then updates qemu's dirty bitmap.
638 * NOTE: caller must be with kml->slots_lock held.
640 * @kml: the KVM memory listener object
641 * @section: the memory section to sync the dirty bitmap with
643 static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener
*kml
,
644 MemoryRegionSection
*section
)
646 KVMState
*s
= kvm_state
;
647 struct kvm_dirty_log d
= {};
649 hwaddr start_addr
, size
;
650 hwaddr slot_size
, slot_offset
= 0;
653 size
= kvm_align_section(section
, &start_addr
);
655 MemoryRegionSection subsection
= *section
;
657 slot_size
= MIN(kvm_max_slot_size
, size
);
658 mem
= kvm_lookup_matching_slot(kml
, start_addr
, slot_size
);
660 /* We don't have a slot if we want to trap every access. */
664 if (!mem
->dirty_bmap
) {
665 /* Allocate on the first log_sync, once and for all */
666 kvm_memslot_init_dirty_bitmap(mem
);
669 d
.dirty_bitmap
= mem
->dirty_bmap
;
670 d
.slot
= mem
->slot
| (kml
->as_id
<< 16);
671 if (kvm_vm_ioctl(s
, KVM_GET_DIRTY_LOG
, &d
) == -1) {
672 DPRINTF("ioctl failed %d\n", errno
);
677 subsection
.offset_within_region
+= slot_offset
;
678 subsection
.size
= int128_make64(slot_size
);
679 kvm_get_dirty_pages_log_range(&subsection
, d
.dirty_bitmap
);
681 slot_offset
+= slot_size
;
682 start_addr
+= slot_size
;
689 /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
690 #define KVM_CLEAR_LOG_SHIFT 6
691 #define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT)
692 #define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
694 static int kvm_log_clear_one_slot(KVMSlot
*mem
, int as_id
, uint64_t start
,
697 KVMState
*s
= kvm_state
;
698 uint64_t end
, bmap_start
, start_delta
, bmap_npages
;
699 struct kvm_clear_dirty_log d
;
700 unsigned long *bmap_clear
= NULL
, psize
= qemu_real_host_page_size
;
704 * We need to extend either the start or the size or both to
705 * satisfy the KVM interface requirement. Firstly, do the start
706 * page alignment on 64 host pages
708 bmap_start
= start
& KVM_CLEAR_LOG_MASK
;
709 start_delta
= start
- bmap_start
;
713 * The kernel interface has restriction on the size too, that either:
715 * (1) the size is 64 host pages aligned (just like the start), or
716 * (2) the size fills up until the end of the KVM memslot.
718 bmap_npages
= DIV_ROUND_UP(size
+ start_delta
, KVM_CLEAR_LOG_ALIGN
)
719 << KVM_CLEAR_LOG_SHIFT
;
720 end
= mem
->memory_size
/ psize
;
721 if (bmap_npages
> end
- bmap_start
) {
722 bmap_npages
= end
- bmap_start
;
724 start_delta
/= psize
;
727 * Prepare the bitmap to clear dirty bits. Here we must guarantee
728 * that we won't clear any unknown dirty bits otherwise we might
729 * accidentally clear some set bits which are not yet synced from
730 * the kernel into QEMU's bitmap, then we'll lose track of the
731 * guest modifications upon those pages (which can directly lead
732 * to guest data loss or panic after migration).
734 * Layout of the KVMSlot.dirty_bmap:
736 * |<-------- bmap_npages -----------..>|
739 * |----------------|-------------|------------------|------------|
742 * start bmap_start (start) end
743 * of memslot of memslot
745 * [1] bmap_npages can be aligned to either 64 pages or the end of slot
748 assert(bmap_start
% BITS_PER_LONG
== 0);
749 /* We should never do log_clear before log_sync */
750 assert(mem
->dirty_bmap
);
751 if (start_delta
|| bmap_npages
- size
/ psize
) {
752 /* Slow path - we need to manipulate a temp bitmap */
753 bmap_clear
= bitmap_new(bmap_npages
);
754 bitmap_copy_with_src_offset(bmap_clear
, mem
->dirty_bmap
,
755 bmap_start
, start_delta
+ size
/ psize
);
757 * We need to fill the holes at start because that was not
758 * specified by the caller and we extended the bitmap only for
761 bitmap_clear(bmap_clear
, 0, start_delta
);
762 d
.dirty_bitmap
= bmap_clear
;
765 * Fast path - both start and size align well with BITS_PER_LONG
766 * (or the end of memory slot)
768 d
.dirty_bitmap
= mem
->dirty_bmap
+ BIT_WORD(bmap_start
);
771 d
.first_page
= bmap_start
;
772 /* It should never overflow. If it happens, say something */
773 assert(bmap_npages
<= UINT32_MAX
);
774 d
.num_pages
= bmap_npages
;
775 d
.slot
= mem
->slot
| (as_id
<< 16);
777 if (kvm_vm_ioctl(s
, KVM_CLEAR_DIRTY_LOG
, &d
) == -1) {
779 error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
780 "start=0x%"PRIx64
", size=0x%"PRIx32
", errno=%d",
781 __func__
, d
.slot
, (uint64_t)d
.first_page
,
782 (uint32_t)d
.num_pages
, ret
);
785 trace_kvm_clear_dirty_log(d
.slot
, d
.first_page
, d
.num_pages
);
789 * After we have updated the remote dirty bitmap, we update the
790 * cached bitmap as well for the memslot, then if another user
791 * clears the same region we know we shouldn't clear it again on
792 * the remote otherwise it's data loss as well.
794 bitmap_clear(mem
->dirty_bmap
, bmap_start
+ start_delta
,
796 /* This handles the NULL case well */
803 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
805 * NOTE: this will be a no-op if we haven't enabled manual dirty log
806 * protection in the host kernel because in that case this operation
807 * will be done within log_sync().
809 * @kml: the kvm memory listener
810 * @section: the memory range to clear dirty bitmap
812 static int kvm_physical_log_clear(KVMMemoryListener
*kml
,
813 MemoryRegionSection
*section
)
815 KVMState
*s
= kvm_state
;
816 uint64_t start
, size
, offset
, count
;
820 if (!s
->manual_dirty_log_protect
) {
821 /* No need to do explicit clear */
825 start
= section
->offset_within_address_space
;
826 size
= int128_get64(section
->size
);
829 /* Nothing more we can do... */
835 for (i
= 0; i
< s
->nr_slots
; i
++) {
836 mem
= &kml
->slots
[i
];
837 /* Discard slots that are empty or do not overlap the section */
838 if (!mem
->memory_size
||
839 mem
->start_addr
> start
+ size
- 1 ||
840 start
> mem
->start_addr
+ mem
->memory_size
- 1) {
844 if (start
>= mem
->start_addr
) {
845 /* The slot starts before section or is aligned to it. */
846 offset
= start
- mem
->start_addr
;
847 count
= MIN(mem
->memory_size
- offset
, size
);
849 /* The slot starts after section. */
851 count
= MIN(mem
->memory_size
, size
- (mem
->start_addr
- start
));
853 ret
= kvm_log_clear_one_slot(mem
, kml
->as_id
, offset
, count
);
859 kvm_slots_unlock(kml
);
864 static void kvm_coalesce_mmio_region(MemoryListener
*listener
,
865 MemoryRegionSection
*secion
,
866 hwaddr start
, hwaddr size
)
868 KVMState
*s
= kvm_state
;
870 if (s
->coalesced_mmio
) {
871 struct kvm_coalesced_mmio_zone zone
;
877 (void)kvm_vm_ioctl(s
, KVM_REGISTER_COALESCED_MMIO
, &zone
);
881 static void kvm_uncoalesce_mmio_region(MemoryListener
*listener
,
882 MemoryRegionSection
*secion
,
883 hwaddr start
, hwaddr size
)
885 KVMState
*s
= kvm_state
;
887 if (s
->coalesced_mmio
) {
888 struct kvm_coalesced_mmio_zone zone
;
894 (void)kvm_vm_ioctl(s
, KVM_UNREGISTER_COALESCED_MMIO
, &zone
);
898 static void kvm_coalesce_pio_add(MemoryListener
*listener
,
899 MemoryRegionSection
*section
,
900 hwaddr start
, hwaddr size
)
902 KVMState
*s
= kvm_state
;
904 if (s
->coalesced_pio
) {
905 struct kvm_coalesced_mmio_zone zone
;
911 (void)kvm_vm_ioctl(s
, KVM_REGISTER_COALESCED_MMIO
, &zone
);
915 static void kvm_coalesce_pio_del(MemoryListener
*listener
,
916 MemoryRegionSection
*section
,
917 hwaddr start
, hwaddr size
)
919 KVMState
*s
= kvm_state
;
921 if (s
->coalesced_pio
) {
922 struct kvm_coalesced_mmio_zone zone
;
928 (void)kvm_vm_ioctl(s
, KVM_UNREGISTER_COALESCED_MMIO
, &zone
);
932 static MemoryListener kvm_coalesced_pio_listener
= {
933 .coalesced_io_add
= kvm_coalesce_pio_add
,
934 .coalesced_io_del
= kvm_coalesce_pio_del
,
937 int kvm_check_extension(KVMState
*s
, unsigned int extension
)
941 ret
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, extension
);
949 int kvm_vm_check_extension(KVMState
*s
, unsigned int extension
)
953 ret
= kvm_vm_ioctl(s
, KVM_CHECK_EXTENSION
, extension
);
955 /* VM wide version not implemented, use global one instead */
956 ret
= kvm_check_extension(s
, extension
);
962 typedef struct HWPoisonPage
{
964 QLIST_ENTRY(HWPoisonPage
) list
;
967 static QLIST_HEAD(, HWPoisonPage
) hwpoison_page_list
=
968 QLIST_HEAD_INITIALIZER(hwpoison_page_list
);
970 static void kvm_unpoison_all(void *param
)
972 HWPoisonPage
*page
, *next_page
;
974 QLIST_FOREACH_SAFE(page
, &hwpoison_page_list
, list
, next_page
) {
975 QLIST_REMOVE(page
, list
);
976 qemu_ram_remap(page
->ram_addr
, TARGET_PAGE_SIZE
);
981 void kvm_hwpoison_page_add(ram_addr_t ram_addr
)
985 QLIST_FOREACH(page
, &hwpoison_page_list
, list
) {
986 if (page
->ram_addr
== ram_addr
) {
990 page
= g_new(HWPoisonPage
, 1);
991 page
->ram_addr
= ram_addr
;
992 QLIST_INSERT_HEAD(&hwpoison_page_list
, page
, list
);
995 static uint32_t adjust_ioeventfd_endianness(uint32_t val
, uint32_t size
)
997 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
998 /* The kernel expects ioeventfd values in HOST_WORDS_BIGENDIAN
999 * endianness, but the memory core hands them in target endianness.
1000 * For example, PPC is always treated as big-endian even if running
1001 * on KVM and on PPC64LE. Correct here.
1015 static int kvm_set_ioeventfd_mmio(int fd
, hwaddr addr
, uint32_t val
,
1016 bool assign
, uint32_t size
, bool datamatch
)
1019 struct kvm_ioeventfd iofd
= {
1020 .datamatch
= datamatch
? adjust_ioeventfd_endianness(val
, size
) : 0,
1027 trace_kvm_set_ioeventfd_mmio(fd
, (uint64_t)addr
, val
, assign
, size
,
1029 if (!kvm_enabled()) {
1034 iofd
.flags
|= KVM_IOEVENTFD_FLAG_DATAMATCH
;
1037 iofd
.flags
|= KVM_IOEVENTFD_FLAG_DEASSIGN
;
1040 ret
= kvm_vm_ioctl(kvm_state
, KVM_IOEVENTFD
, &iofd
);
1049 static int kvm_set_ioeventfd_pio(int fd
, uint16_t addr
, uint16_t val
,
1050 bool assign
, uint32_t size
, bool datamatch
)
1052 struct kvm_ioeventfd kick
= {
1053 .datamatch
= datamatch
? adjust_ioeventfd_endianness(val
, size
) : 0,
1055 .flags
= KVM_IOEVENTFD_FLAG_PIO
,
1060 trace_kvm_set_ioeventfd_pio(fd
, addr
, val
, assign
, size
, datamatch
);
1061 if (!kvm_enabled()) {
1065 kick
.flags
|= KVM_IOEVENTFD_FLAG_DATAMATCH
;
1068 kick
.flags
|= KVM_IOEVENTFD_FLAG_DEASSIGN
;
1070 r
= kvm_vm_ioctl(kvm_state
, KVM_IOEVENTFD
, &kick
);
1078 static int kvm_check_many_ioeventfds(void)
1080 /* Userspace can use ioeventfd for io notification. This requires a host
1081 * that supports eventfd(2) and an I/O thread; since eventfd does not
1082 * support SIGIO it cannot interrupt the vcpu.
1084 * Older kernels have a 6 device limit on the KVM io bus. Find out so we
1085 * can avoid creating too many ioeventfds.
1087 #if defined(CONFIG_EVENTFD)
1090 for (i
= 0; i
< ARRAY_SIZE(ioeventfds
); i
++) {
1091 ioeventfds
[i
] = eventfd(0, EFD_CLOEXEC
);
1092 if (ioeventfds
[i
] < 0) {
1095 ret
= kvm_set_ioeventfd_pio(ioeventfds
[i
], 0, i
, true, 2, true);
1097 close(ioeventfds
[i
]);
1102 /* Decide whether many devices are supported or not */
1103 ret
= i
== ARRAY_SIZE(ioeventfds
);
1106 kvm_set_ioeventfd_pio(ioeventfds
[i
], 0, i
, false, 2, true);
1107 close(ioeventfds
[i
]);
1115 static const KVMCapabilityInfo
*
1116 kvm_check_extension_list(KVMState
*s
, const KVMCapabilityInfo
*list
)
1118 while (list
->name
) {
1119 if (!kvm_check_extension(s
, list
->value
)) {
1127 void kvm_set_max_memslot_size(hwaddr max_slot_size
)
1130 ROUND_UP(max_slot_size
, qemu_real_host_page_size
) == max_slot_size
1132 kvm_max_slot_size
= max_slot_size
;
1135 static void kvm_set_phys_mem(KVMMemoryListener
*kml
,
1136 MemoryRegionSection
*section
, bool add
)
1140 MemoryRegion
*mr
= section
->mr
;
1141 bool writeable
= !mr
->readonly
&& !mr
->rom_device
;
1142 hwaddr start_addr
, size
, slot_size
;
1145 if (!memory_region_is_ram(mr
)) {
1146 if (writeable
|| !kvm_readonly_mem_allowed
) {
1148 } else if (!mr
->romd_mode
) {
1149 /* If the memory device is not in romd_mode, then we actually want
1150 * to remove the kvm memory slot so all accesses will trap. */
1155 size
= kvm_align_section(section
, &start_addr
);
1160 /* use aligned delta to align the ram address */
1161 ram
= memory_region_get_ram_ptr(mr
) + section
->offset_within_region
+
1162 (start_addr
- section
->offset_within_address_space
);
1164 kvm_slots_lock(kml
);
1168 slot_size
= MIN(kvm_max_slot_size
, size
);
1169 mem
= kvm_lookup_matching_slot(kml
, start_addr
, slot_size
);
1173 if (mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) {
1174 kvm_physical_sync_dirty_bitmap(kml
, section
);
1177 /* unregister the slot */
1178 g_free(mem
->dirty_bmap
);
1179 mem
->dirty_bmap
= NULL
;
1180 mem
->memory_size
= 0;
1182 err
= kvm_set_user_memory_region(kml
, mem
, false);
1184 fprintf(stderr
, "%s: error unregistering slot: %s\n",
1185 __func__
, strerror(-err
));
1188 start_addr
+= slot_size
;
1194 /* register the new slot */
1196 slot_size
= MIN(kvm_max_slot_size
, size
);
1197 mem
= kvm_alloc_slot(kml
);
1198 mem
->memory_size
= slot_size
;
1199 mem
->start_addr
= start_addr
;
1201 mem
->flags
= kvm_mem_flags(mr
);
1203 if (mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) {
1205 * Reallocate the bmap; it means it doesn't disappear in
1206 * middle of a migrate.
1208 kvm_memslot_init_dirty_bitmap(mem
);
1210 err
= kvm_set_user_memory_region(kml
, mem
, true);
1212 fprintf(stderr
, "%s: error registering slot: %s\n", __func__
,
1216 start_addr
+= slot_size
;
1222 kvm_slots_unlock(kml
);
1225 static void kvm_region_add(MemoryListener
*listener
,
1226 MemoryRegionSection
*section
)
1228 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
1230 memory_region_ref(section
->mr
);
1231 kvm_set_phys_mem(kml
, section
, true);
1234 static void kvm_region_del(MemoryListener
*listener
,
1235 MemoryRegionSection
*section
)
1237 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
1239 kvm_set_phys_mem(kml
, section
, false);
1240 memory_region_unref(section
->mr
);
1243 static void kvm_log_sync(MemoryListener
*listener
,
1244 MemoryRegionSection
*section
)
1246 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
1249 kvm_slots_lock(kml
);
1250 r
= kvm_physical_sync_dirty_bitmap(kml
, section
);
1251 kvm_slots_unlock(kml
);
1257 static void kvm_log_clear(MemoryListener
*listener
,
1258 MemoryRegionSection
*section
)
1260 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
1263 r
= kvm_physical_log_clear(kml
, section
);
1265 error_report_once("%s: kvm log clear failed: mr=%s "
1266 "offset=%"HWADDR_PRIx
" size=%"PRIx64
, __func__
,
1267 section
->mr
->name
, section
->offset_within_region
,
1268 int128_get64(section
->size
));
1273 static void kvm_mem_ioeventfd_add(MemoryListener
*listener
,
1274 MemoryRegionSection
*section
,
1275 bool match_data
, uint64_t data
,
1278 int fd
= event_notifier_get_fd(e
);
1281 r
= kvm_set_ioeventfd_mmio(fd
, section
->offset_within_address_space
,
1282 data
, true, int128_get64(section
->size
),
1285 fprintf(stderr
, "%s: error adding ioeventfd: %s (%d)\n",
1286 __func__
, strerror(-r
), -r
);
1291 static void kvm_mem_ioeventfd_del(MemoryListener
*listener
,
1292 MemoryRegionSection
*section
,
1293 bool match_data
, uint64_t data
,
1296 int fd
= event_notifier_get_fd(e
);
1299 r
= kvm_set_ioeventfd_mmio(fd
, section
->offset_within_address_space
,
1300 data
, false, int128_get64(section
->size
),
1303 fprintf(stderr
, "%s: error deleting ioeventfd: %s (%d)\n",
1304 __func__
, strerror(-r
), -r
);
1309 static void kvm_io_ioeventfd_add(MemoryListener
*listener
,
1310 MemoryRegionSection
*section
,
1311 bool match_data
, uint64_t data
,
1314 int fd
= event_notifier_get_fd(e
);
1317 r
= kvm_set_ioeventfd_pio(fd
, section
->offset_within_address_space
,
1318 data
, true, int128_get64(section
->size
),
1321 fprintf(stderr
, "%s: error adding ioeventfd: %s (%d)\n",
1322 __func__
, strerror(-r
), -r
);
1327 static void kvm_io_ioeventfd_del(MemoryListener
*listener
,
1328 MemoryRegionSection
*section
,
1329 bool match_data
, uint64_t data
,
1333 int fd
= event_notifier_get_fd(e
);
1336 r
= kvm_set_ioeventfd_pio(fd
, section
->offset_within_address_space
,
1337 data
, false, int128_get64(section
->size
),
1340 fprintf(stderr
, "%s: error deleting ioeventfd: %s (%d)\n",
1341 __func__
, strerror(-r
), -r
);
1346 void kvm_memory_listener_register(KVMState
*s
, KVMMemoryListener
*kml
,
1347 AddressSpace
*as
, int as_id
)
1351 qemu_mutex_init(&kml
->slots_lock
);
1352 kml
->slots
= g_malloc0(s
->nr_slots
* sizeof(KVMSlot
));
1355 for (i
= 0; i
< s
->nr_slots
; i
++) {
1356 kml
->slots
[i
].slot
= i
;
1359 kml
->listener
.region_add
= kvm_region_add
;
1360 kml
->listener
.region_del
= kvm_region_del
;
1361 kml
->listener
.log_start
= kvm_log_start
;
1362 kml
->listener
.log_stop
= kvm_log_stop
;
1363 kml
->listener
.log_sync
= kvm_log_sync
;
1364 kml
->listener
.log_clear
= kvm_log_clear
;
1365 kml
->listener
.priority
= 10;
1367 memory_listener_register(&kml
->listener
, as
);
1369 for (i
= 0; i
< s
->nr_as
; ++i
) {
1378 static MemoryListener kvm_io_listener
= {
1379 .eventfd_add
= kvm_io_ioeventfd_add
,
1380 .eventfd_del
= kvm_io_ioeventfd_del
,
1384 int kvm_set_irq(KVMState
*s
, int irq
, int level
)
1386 struct kvm_irq_level event
;
1389 assert(kvm_async_interrupts_enabled());
1391 event
.level
= level
;
1393 ret
= kvm_vm_ioctl(s
, s
->irq_set_ioctl
, &event
);
1395 perror("kvm_set_irq");
1399 return (s
->irq_set_ioctl
== KVM_IRQ_LINE
) ? 1 : event
.status
;
1402 #ifdef KVM_CAP_IRQ_ROUTING
1403 typedef struct KVMMSIRoute
{
1404 struct kvm_irq_routing_entry kroute
;
1405 QTAILQ_ENTRY(KVMMSIRoute
) entry
;
1408 static void set_gsi(KVMState
*s
, unsigned int gsi
)
1410 set_bit(gsi
, s
->used_gsi_bitmap
);
1413 static void clear_gsi(KVMState
*s
, unsigned int gsi
)
1415 clear_bit(gsi
, s
->used_gsi_bitmap
);
1418 void kvm_init_irq_routing(KVMState
*s
)
1422 gsi_count
= kvm_check_extension(s
, KVM_CAP_IRQ_ROUTING
) - 1;
1423 if (gsi_count
> 0) {
1424 /* Round up so we can search ints using ffs */
1425 s
->used_gsi_bitmap
= bitmap_new(gsi_count
);
1426 s
->gsi_count
= gsi_count
;
1429 s
->irq_routes
= g_malloc0(sizeof(*s
->irq_routes
));
1430 s
->nr_allocated_irq_routes
= 0;
1432 if (!kvm_direct_msi_allowed
) {
1433 for (i
= 0; i
< KVM_MSI_HASHTAB_SIZE
; i
++) {
1434 QTAILQ_INIT(&s
->msi_hashtab
[i
]);
1438 kvm_arch_init_irq_routing(s
);
1441 void kvm_irqchip_commit_routes(KVMState
*s
)
1445 if (kvm_gsi_direct_mapping()) {
1449 if (!kvm_gsi_routing_enabled()) {
1453 s
->irq_routes
->flags
= 0;
1454 trace_kvm_irqchip_commit_routes();
1455 ret
= kvm_vm_ioctl(s
, KVM_SET_GSI_ROUTING
, s
->irq_routes
);
1459 static void kvm_add_routing_entry(KVMState
*s
,
1460 struct kvm_irq_routing_entry
*entry
)
1462 struct kvm_irq_routing_entry
*new;
1465 if (s
->irq_routes
->nr
== s
->nr_allocated_irq_routes
) {
1466 n
= s
->nr_allocated_irq_routes
* 2;
1470 size
= sizeof(struct kvm_irq_routing
);
1471 size
+= n
* sizeof(*new);
1472 s
->irq_routes
= g_realloc(s
->irq_routes
, size
);
1473 s
->nr_allocated_irq_routes
= n
;
1475 n
= s
->irq_routes
->nr
++;
1476 new = &s
->irq_routes
->entries
[n
];
1480 set_gsi(s
, entry
->gsi
);
1483 static int kvm_update_routing_entry(KVMState
*s
,
1484 struct kvm_irq_routing_entry
*new_entry
)
1486 struct kvm_irq_routing_entry
*entry
;
1489 for (n
= 0; n
< s
->irq_routes
->nr
; n
++) {
1490 entry
= &s
->irq_routes
->entries
[n
];
1491 if (entry
->gsi
!= new_entry
->gsi
) {
1495 if(!memcmp(entry
, new_entry
, sizeof *entry
)) {
1499 *entry
= *new_entry
;
1507 void kvm_irqchip_add_irq_route(KVMState
*s
, int irq
, int irqchip
, int pin
)
1509 struct kvm_irq_routing_entry e
= {};
1511 assert(pin
< s
->gsi_count
);
1514 e
.type
= KVM_IRQ_ROUTING_IRQCHIP
;
1516 e
.u
.irqchip
.irqchip
= irqchip
;
1517 e
.u
.irqchip
.pin
= pin
;
1518 kvm_add_routing_entry(s
, &e
);
1521 void kvm_irqchip_release_virq(KVMState
*s
, int virq
)
1523 struct kvm_irq_routing_entry
*e
;
1526 if (kvm_gsi_direct_mapping()) {
1530 for (i
= 0; i
< s
->irq_routes
->nr
; i
++) {
1531 e
= &s
->irq_routes
->entries
[i
];
1532 if (e
->gsi
== virq
) {
1533 s
->irq_routes
->nr
--;
1534 *e
= s
->irq_routes
->entries
[s
->irq_routes
->nr
];
1538 kvm_arch_release_virq_post(virq
);
1539 trace_kvm_irqchip_release_virq(virq
);
1542 void kvm_irqchip_add_change_notifier(Notifier
*n
)
1544 notifier_list_add(&kvm_irqchip_change_notifiers
, n
);
1547 void kvm_irqchip_remove_change_notifier(Notifier
*n
)
1552 void kvm_irqchip_change_notify(void)
1554 notifier_list_notify(&kvm_irqchip_change_notifiers
, NULL
);
1557 static unsigned int kvm_hash_msi(uint32_t data
)
1559 /* This is optimized for IA32 MSI layout. However, no other arch shall
1560 * repeat the mistake of not providing a direct MSI injection API. */
1564 static void kvm_flush_dynamic_msi_routes(KVMState
*s
)
1566 KVMMSIRoute
*route
, *next
;
1569 for (hash
= 0; hash
< KVM_MSI_HASHTAB_SIZE
; hash
++) {
1570 QTAILQ_FOREACH_SAFE(route
, &s
->msi_hashtab
[hash
], entry
, next
) {
1571 kvm_irqchip_release_virq(s
, route
->kroute
.gsi
);
1572 QTAILQ_REMOVE(&s
->msi_hashtab
[hash
], route
, entry
);
1578 static int kvm_irqchip_get_virq(KVMState
*s
)
1583 * PIC and IOAPIC share the first 16 GSI numbers, thus the available
1584 * GSI numbers are more than the number of IRQ route. Allocating a GSI
1585 * number can succeed even though a new route entry cannot be added.
1586 * When this happens, flush dynamic MSI entries to free IRQ route entries.
1588 if (!kvm_direct_msi_allowed
&& s
->irq_routes
->nr
== s
->gsi_count
) {
1589 kvm_flush_dynamic_msi_routes(s
);
1592 /* Return the lowest unused GSI in the bitmap */
1593 next_virq
= find_first_zero_bit(s
->used_gsi_bitmap
, s
->gsi_count
);
1594 if (next_virq
>= s
->gsi_count
) {
1601 static KVMMSIRoute
*kvm_lookup_msi_route(KVMState
*s
, MSIMessage msg
)
1603 unsigned int hash
= kvm_hash_msi(msg
.data
);
1606 QTAILQ_FOREACH(route
, &s
->msi_hashtab
[hash
], entry
) {
1607 if (route
->kroute
.u
.msi
.address_lo
== (uint32_t)msg
.address
&&
1608 route
->kroute
.u
.msi
.address_hi
== (msg
.address
>> 32) &&
1609 route
->kroute
.u
.msi
.data
== le32_to_cpu(msg
.data
)) {
1616 int kvm_irqchip_send_msi(KVMState
*s
, MSIMessage msg
)
1621 if (kvm_direct_msi_allowed
) {
1622 msi
.address_lo
= (uint32_t)msg
.address
;
1623 msi
.address_hi
= msg
.address
>> 32;
1624 msi
.data
= le32_to_cpu(msg
.data
);
1626 memset(msi
.pad
, 0, sizeof(msi
.pad
));
1628 return kvm_vm_ioctl(s
, KVM_SIGNAL_MSI
, &msi
);
1631 route
= kvm_lookup_msi_route(s
, msg
);
1635 virq
= kvm_irqchip_get_virq(s
);
1640 route
= g_malloc0(sizeof(KVMMSIRoute
));
1641 route
->kroute
.gsi
= virq
;
1642 route
->kroute
.type
= KVM_IRQ_ROUTING_MSI
;
1643 route
->kroute
.flags
= 0;
1644 route
->kroute
.u
.msi
.address_lo
= (uint32_t)msg
.address
;
1645 route
->kroute
.u
.msi
.address_hi
= msg
.address
>> 32;
1646 route
->kroute
.u
.msi
.data
= le32_to_cpu(msg
.data
);
1648 kvm_add_routing_entry(s
, &route
->kroute
);
1649 kvm_irqchip_commit_routes(s
);
1651 QTAILQ_INSERT_TAIL(&s
->msi_hashtab
[kvm_hash_msi(msg
.data
)], route
,
1655 assert(route
->kroute
.type
== KVM_IRQ_ROUTING_MSI
);
1657 return kvm_set_irq(s
, route
->kroute
.gsi
, 1);
1660 int kvm_irqchip_add_msi_route(KVMState
*s
, int vector
, PCIDevice
*dev
)
1662 struct kvm_irq_routing_entry kroute
= {};
1664 MSIMessage msg
= {0, 0};
1666 if (pci_available
&& dev
) {
1667 msg
= pci_get_msi_message(dev
, vector
);
1670 if (kvm_gsi_direct_mapping()) {
1671 return kvm_arch_msi_data_to_gsi(msg
.data
);
1674 if (!kvm_gsi_routing_enabled()) {
1678 virq
= kvm_irqchip_get_virq(s
);
1684 kroute
.type
= KVM_IRQ_ROUTING_MSI
;
1686 kroute
.u
.msi
.address_lo
= (uint32_t)msg
.address
;
1687 kroute
.u
.msi
.address_hi
= msg
.address
>> 32;
1688 kroute
.u
.msi
.data
= le32_to_cpu(msg
.data
);
1689 if (pci_available
&& kvm_msi_devid_required()) {
1690 kroute
.flags
= KVM_MSI_VALID_DEVID
;
1691 kroute
.u
.msi
.devid
= pci_requester_id(dev
);
1693 if (kvm_arch_fixup_msi_route(&kroute
, msg
.address
, msg
.data
, dev
)) {
1694 kvm_irqchip_release_virq(s
, virq
);
1698 trace_kvm_irqchip_add_msi_route(dev
? dev
->name
: (char *)"N/A",
1701 kvm_add_routing_entry(s
, &kroute
);
1702 kvm_arch_add_msi_route_post(&kroute
, vector
, dev
);
1703 kvm_irqchip_commit_routes(s
);
1708 int kvm_irqchip_update_msi_route(KVMState
*s
, int virq
, MSIMessage msg
,
1711 struct kvm_irq_routing_entry kroute
= {};
1713 if (kvm_gsi_direct_mapping()) {
1717 if (!kvm_irqchip_in_kernel()) {
1722 kroute
.type
= KVM_IRQ_ROUTING_MSI
;
1724 kroute
.u
.msi
.address_lo
= (uint32_t)msg
.address
;
1725 kroute
.u
.msi
.address_hi
= msg
.address
>> 32;
1726 kroute
.u
.msi
.data
= le32_to_cpu(msg
.data
);
1727 if (pci_available
&& kvm_msi_devid_required()) {
1728 kroute
.flags
= KVM_MSI_VALID_DEVID
;
1729 kroute
.u
.msi
.devid
= pci_requester_id(dev
);
1731 if (kvm_arch_fixup_msi_route(&kroute
, msg
.address
, msg
.data
, dev
)) {
1735 trace_kvm_irqchip_update_msi_route(virq
);
1737 return kvm_update_routing_entry(s
, &kroute
);
1740 static int kvm_irqchip_assign_irqfd(KVMState
*s
, EventNotifier
*event
,
1741 EventNotifier
*resample
, int virq
,
1744 int fd
= event_notifier_get_fd(event
);
1745 int rfd
= resample
? event_notifier_get_fd(resample
) : -1;
1747 struct kvm_irqfd irqfd
= {
1750 .flags
= assign
? 0 : KVM_IRQFD_FLAG_DEASSIGN
,
1755 if (kvm_irqchip_is_split()) {
1757 * When the slow irqchip (e.g. IOAPIC) is in the
1758 * userspace, KVM kernel resamplefd will not work because
1759 * the EOI of the interrupt will be delivered to userspace
1760 * instead, so the KVM kernel resamplefd kick will be
1761 * skipped. The userspace here mimics what the kernel
1762 * provides with resamplefd, remember the resamplefd and
1763 * kick it when we receive EOI of this IRQ.
1765 * This is hackery because IOAPIC is mostly bypassed
1766 * (except EOI broadcasts) when irqfd is used. However
1767 * this can bring much performance back for split irqchip
1768 * with INTx IRQs (for VFIO, this gives 93% perf of the
1769 * full fast path, which is 46% perf boost comparing to
1770 * the INTx slow path).
1772 kvm_resample_fd_insert(virq
, resample
);
1774 irqfd
.flags
|= KVM_IRQFD_FLAG_RESAMPLE
;
1775 irqfd
.resamplefd
= rfd
;
1777 } else if (!assign
) {
1778 if (kvm_irqchip_is_split()) {
1779 kvm_resample_fd_remove(virq
);
1783 if (!kvm_irqfds_enabled()) {
1787 return kvm_vm_ioctl(s
, KVM_IRQFD
, &irqfd
);
1790 int kvm_irqchip_add_adapter_route(KVMState
*s
, AdapterInfo
*adapter
)
1792 struct kvm_irq_routing_entry kroute
= {};
1795 if (!kvm_gsi_routing_enabled()) {
1799 virq
= kvm_irqchip_get_virq(s
);
1805 kroute
.type
= KVM_IRQ_ROUTING_S390_ADAPTER
;
1807 kroute
.u
.adapter
.summary_addr
= adapter
->summary_addr
;
1808 kroute
.u
.adapter
.ind_addr
= adapter
->ind_addr
;
1809 kroute
.u
.adapter
.summary_offset
= adapter
->summary_offset
;
1810 kroute
.u
.adapter
.ind_offset
= adapter
->ind_offset
;
1811 kroute
.u
.adapter
.adapter_id
= adapter
->adapter_id
;
1813 kvm_add_routing_entry(s
, &kroute
);
1818 int kvm_irqchip_add_hv_sint_route(KVMState
*s
, uint32_t vcpu
, uint32_t sint
)
1820 struct kvm_irq_routing_entry kroute
= {};
1823 if (!kvm_gsi_routing_enabled()) {
1826 if (!kvm_check_extension(s
, KVM_CAP_HYPERV_SYNIC
)) {
1829 virq
= kvm_irqchip_get_virq(s
);
1835 kroute
.type
= KVM_IRQ_ROUTING_HV_SINT
;
1837 kroute
.u
.hv_sint
.vcpu
= vcpu
;
1838 kroute
.u
.hv_sint
.sint
= sint
;
1840 kvm_add_routing_entry(s
, &kroute
);
1841 kvm_irqchip_commit_routes(s
);
1846 #else /* !KVM_CAP_IRQ_ROUTING */
1848 void kvm_init_irq_routing(KVMState
*s
)
1852 void kvm_irqchip_release_virq(KVMState
*s
, int virq
)
1856 int kvm_irqchip_send_msi(KVMState
*s
, MSIMessage msg
)
1861 int kvm_irqchip_add_msi_route(KVMState
*s
, int vector
, PCIDevice
*dev
)
1866 int kvm_irqchip_add_adapter_route(KVMState
*s
, AdapterInfo
*adapter
)
1871 int kvm_irqchip_add_hv_sint_route(KVMState
*s
, uint32_t vcpu
, uint32_t sint
)
1876 static int kvm_irqchip_assign_irqfd(KVMState
*s
, EventNotifier
*event
,
1877 EventNotifier
*resample
, int virq
,
1883 int kvm_irqchip_update_msi_route(KVMState
*s
, int virq
, MSIMessage msg
)
1887 #endif /* !KVM_CAP_IRQ_ROUTING */
1889 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState
*s
, EventNotifier
*n
,
1890 EventNotifier
*rn
, int virq
)
1892 return kvm_irqchip_assign_irqfd(s
, n
, rn
, virq
, true);
1895 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState
*s
, EventNotifier
*n
,
1898 return kvm_irqchip_assign_irqfd(s
, n
, NULL
, virq
, false);
1901 int kvm_irqchip_add_irqfd_notifier(KVMState
*s
, EventNotifier
*n
,
1902 EventNotifier
*rn
, qemu_irq irq
)
1905 gboolean found
= g_hash_table_lookup_extended(s
->gsimap
, irq
, &key
, &gsi
);
1910 return kvm_irqchip_add_irqfd_notifier_gsi(s
, n
, rn
, GPOINTER_TO_INT(gsi
));
1913 int kvm_irqchip_remove_irqfd_notifier(KVMState
*s
, EventNotifier
*n
,
1917 gboolean found
= g_hash_table_lookup_extended(s
->gsimap
, irq
, &key
, &gsi
);
1922 return kvm_irqchip_remove_irqfd_notifier_gsi(s
, n
, GPOINTER_TO_INT(gsi
));
1925 void kvm_irqchip_set_qemuirq_gsi(KVMState
*s
, qemu_irq irq
, int gsi
)
1927 g_hash_table_insert(s
->gsimap
, irq
, GINT_TO_POINTER(gsi
));
1930 static void kvm_irqchip_create(KVMState
*s
)
1934 assert(s
->kernel_irqchip_split
!= ON_OFF_AUTO_AUTO
);
1935 if (kvm_check_extension(s
, KVM_CAP_IRQCHIP
)) {
1937 } else if (kvm_check_extension(s
, KVM_CAP_S390_IRQCHIP
)) {
1938 ret
= kvm_vm_enable_cap(s
, KVM_CAP_S390_IRQCHIP
, 0);
1940 fprintf(stderr
, "Enable kernel irqchip failed: %s\n", strerror(-ret
));
1947 /* First probe and see if there's a arch-specific hook to create the
1948 * in-kernel irqchip for us */
1949 ret
= kvm_arch_irqchip_create(s
);
1951 if (s
->kernel_irqchip_split
== ON_OFF_AUTO_ON
) {
1952 perror("Split IRQ chip mode not supported.");
1955 ret
= kvm_vm_ioctl(s
, KVM_CREATE_IRQCHIP
);
1959 fprintf(stderr
, "Create kernel irqchip failed: %s\n", strerror(-ret
));
1963 kvm_kernel_irqchip
= true;
1964 /* If we have an in-kernel IRQ chip then we must have asynchronous
1965 * interrupt delivery (though the reverse is not necessarily true)
1967 kvm_async_interrupts_allowed
= true;
1968 kvm_halt_in_kernel_allowed
= true;
1970 kvm_init_irq_routing(s
);
1972 s
->gsimap
= g_hash_table_new(g_direct_hash
, g_direct_equal
);
1975 /* Find number of supported CPUs using the recommended
1976 * procedure from the kernel API documentation to cope with
1977 * older kernels that may be missing capabilities.
1979 static int kvm_recommended_vcpus(KVMState
*s
)
1981 int ret
= kvm_vm_check_extension(s
, KVM_CAP_NR_VCPUS
);
1982 return (ret
) ? ret
: 4;
1985 static int kvm_max_vcpus(KVMState
*s
)
1987 int ret
= kvm_check_extension(s
, KVM_CAP_MAX_VCPUS
);
1988 return (ret
) ? ret
: kvm_recommended_vcpus(s
);
1991 static int kvm_max_vcpu_id(KVMState
*s
)
1993 int ret
= kvm_check_extension(s
, KVM_CAP_MAX_VCPU_ID
);
1994 return (ret
) ? ret
: kvm_max_vcpus(s
);
1997 bool kvm_vcpu_id_is_valid(int vcpu_id
)
1999 KVMState
*s
= KVM_STATE(current_accel());
2000 return vcpu_id
>= 0 && vcpu_id
< kvm_max_vcpu_id(s
);
2003 static int kvm_init(MachineState
*ms
)
2005 MachineClass
*mc
= MACHINE_GET_CLASS(ms
);
2006 static const char upgrade_note
[] =
2007 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
2008 "(see http://sourceforge.net/projects/kvm).\n";
2013 { "SMP", ms
->smp
.cpus
},
2014 { "hotpluggable", ms
->smp
.max_cpus
},
2017 int soft_vcpus_limit
, hard_vcpus_limit
;
2019 const KVMCapabilityInfo
*missing_cap
;
2022 uint64_t dirty_log_manual_caps
;
2024 s
= KVM_STATE(ms
->accelerator
);
2027 * On systems where the kernel can support different base page
2028 * sizes, host page size may be different from TARGET_PAGE_SIZE,
2029 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
2030 * page size for the system though.
2032 assert(TARGET_PAGE_SIZE
<= qemu_real_host_page_size
);
2036 #ifdef KVM_CAP_SET_GUEST_DEBUG
2037 QTAILQ_INIT(&s
->kvm_sw_breakpoints
);
2039 QLIST_INIT(&s
->kvm_parked_vcpus
);
2041 s
->fd
= qemu_open_old("/dev/kvm", O_RDWR
);
2043 fprintf(stderr
, "Could not access KVM kernel module: %m\n");
2048 ret
= kvm_ioctl(s
, KVM_GET_API_VERSION
, 0);
2049 if (ret
< KVM_API_VERSION
) {
2053 fprintf(stderr
, "kvm version too old\n");
2057 if (ret
> KVM_API_VERSION
) {
2059 fprintf(stderr
, "kvm version not supported\n");
2063 kvm_immediate_exit
= kvm_check_extension(s
, KVM_CAP_IMMEDIATE_EXIT
);
2064 s
->nr_slots
= kvm_check_extension(s
, KVM_CAP_NR_MEMSLOTS
);
2066 /* If unspecified, use the default value */
2071 s
->nr_as
= kvm_check_extension(s
, KVM_CAP_MULTI_ADDRESS_SPACE
);
2072 if (s
->nr_as
<= 1) {
2075 s
->as
= g_new0(struct KVMAs
, s
->nr_as
);
2077 if (object_property_find(OBJECT(current_machine
), "kvm-type")) {
2078 g_autofree
char *kvm_type
= object_property_get_str(OBJECT(current_machine
),
2081 type
= mc
->kvm_type(ms
, kvm_type
);
2085 ret
= kvm_ioctl(s
, KVM_CREATE_VM
, type
);
2086 } while (ret
== -EINTR
);
2089 fprintf(stderr
, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret
,
2093 if (ret
== -EINVAL
) {
2095 "Host kernel setup problem detected. Please verify:\n");
2096 fprintf(stderr
, "- for kernels supporting the switch_amode or"
2097 " user_mode parameters, whether\n");
2099 " user space is running in primary address space\n");
2101 "- for kernels supporting the vm.allocate_pgste sysctl, "
2102 "whether it is enabled\n");
2110 /* check the vcpu limits */
2111 soft_vcpus_limit
= kvm_recommended_vcpus(s
);
2112 hard_vcpus_limit
= kvm_max_vcpus(s
);
2115 if (nc
->num
> soft_vcpus_limit
) {
2116 warn_report("Number of %s cpus requested (%d) exceeds "
2117 "the recommended cpus supported by KVM (%d)",
2118 nc
->name
, nc
->num
, soft_vcpus_limit
);
2120 if (nc
->num
> hard_vcpus_limit
) {
2121 fprintf(stderr
, "Number of %s cpus requested (%d) exceeds "
2122 "the maximum cpus supported by KVM (%d)\n",
2123 nc
->name
, nc
->num
, hard_vcpus_limit
);
2130 missing_cap
= kvm_check_extension_list(s
, kvm_required_capabilites
);
2133 kvm_check_extension_list(s
, kvm_arch_required_capabilities
);
2137 fprintf(stderr
, "kvm does not support %s\n%s",
2138 missing_cap
->name
, upgrade_note
);
2142 s
->coalesced_mmio
= kvm_check_extension(s
, KVM_CAP_COALESCED_MMIO
);
2143 s
->coalesced_pio
= s
->coalesced_mmio
&&
2144 kvm_check_extension(s
, KVM_CAP_COALESCED_PIO
);
2146 dirty_log_manual_caps
=
2147 kvm_check_extension(s
, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
);
2148 dirty_log_manual_caps
&= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE
|
2149 KVM_DIRTY_LOG_INITIALLY_SET
);
2150 s
->manual_dirty_log_protect
= dirty_log_manual_caps
;
2151 if (dirty_log_manual_caps
) {
2152 ret
= kvm_vm_enable_cap(s
, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
, 0,
2153 dirty_log_manual_caps
);
2155 warn_report("Trying to enable capability %"PRIu64
" of "
2156 "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. "
2157 "Falling back to the legacy mode. ",
2158 dirty_log_manual_caps
);
2159 s
->manual_dirty_log_protect
= 0;
2163 #ifdef KVM_CAP_VCPU_EVENTS
2164 s
->vcpu_events
= kvm_check_extension(s
, KVM_CAP_VCPU_EVENTS
);
2167 s
->robust_singlestep
=
2168 kvm_check_extension(s
, KVM_CAP_X86_ROBUST_SINGLESTEP
);
2170 #ifdef KVM_CAP_DEBUGREGS
2171 s
->debugregs
= kvm_check_extension(s
, KVM_CAP_DEBUGREGS
);
2174 s
->max_nested_state_len
= kvm_check_extension(s
, KVM_CAP_NESTED_STATE
);
2176 #ifdef KVM_CAP_IRQ_ROUTING
2177 kvm_direct_msi_allowed
= (kvm_check_extension(s
, KVM_CAP_SIGNAL_MSI
) > 0);
2180 s
->intx_set_mask
= kvm_check_extension(s
, KVM_CAP_PCI_2_3
);
2182 s
->irq_set_ioctl
= KVM_IRQ_LINE
;
2183 if (kvm_check_extension(s
, KVM_CAP_IRQ_INJECT_STATUS
)) {
2184 s
->irq_set_ioctl
= KVM_IRQ_LINE_STATUS
;
2187 kvm_readonly_mem_allowed
=
2188 (kvm_check_extension(s
, KVM_CAP_READONLY_MEM
) > 0);
2190 kvm_eventfds_allowed
=
2191 (kvm_check_extension(s
, KVM_CAP_IOEVENTFD
) > 0);
2193 kvm_irqfds_allowed
=
2194 (kvm_check_extension(s
, KVM_CAP_IRQFD
) > 0);
2196 kvm_resamplefds_allowed
=
2197 (kvm_check_extension(s
, KVM_CAP_IRQFD_RESAMPLE
) > 0);
2199 kvm_vm_attributes_allowed
=
2200 (kvm_check_extension(s
, KVM_CAP_VM_ATTRIBUTES
) > 0);
2202 kvm_ioeventfd_any_length_allowed
=
2203 (kvm_check_extension(s
, KVM_CAP_IOEVENTFD_ANY_LENGTH
) > 0);
2208 * if memory encryption object is specified then initialize the memory
2209 * encryption context.
2211 if (ms
->memory_encryption
) {
2212 kvm_state
->memcrypt_handle
= sev_guest_init(ms
->memory_encryption
);
2213 if (!kvm_state
->memcrypt_handle
) {
2218 kvm_state
->memcrypt_encrypt_data
= sev_encrypt_data
;
2221 ret
= kvm_arch_init(ms
, s
);
2226 if (s
->kernel_irqchip_split
== ON_OFF_AUTO_AUTO
) {
2227 s
->kernel_irqchip_split
= mc
->default_kernel_irqchip_split
? ON_OFF_AUTO_ON
: ON_OFF_AUTO_OFF
;
2230 qemu_register_reset(kvm_unpoison_all
, NULL
);
2232 if (s
->kernel_irqchip_allowed
) {
2233 kvm_irqchip_create(s
);
2236 if (kvm_eventfds_allowed
) {
2237 s
->memory_listener
.listener
.eventfd_add
= kvm_mem_ioeventfd_add
;
2238 s
->memory_listener
.listener
.eventfd_del
= kvm_mem_ioeventfd_del
;
2240 s
->memory_listener
.listener
.coalesced_io_add
= kvm_coalesce_mmio_region
;
2241 s
->memory_listener
.listener
.coalesced_io_del
= kvm_uncoalesce_mmio_region
;
2243 kvm_memory_listener_register(s
, &s
->memory_listener
,
2244 &address_space_memory
, 0);
2245 if (kvm_eventfds_allowed
) {
2246 memory_listener_register(&kvm_io_listener
,
2249 memory_listener_register(&kvm_coalesced_pio_listener
,
2252 s
->many_ioeventfds
= kvm_check_many_ioeventfds();
2254 s
->sync_mmu
= !!kvm_vm_check_extension(kvm_state
, KVM_CAP_SYNC_MMU
);
2256 ret
= ram_block_discard_disable(true);
2260 cpus_register_accel(&kvm_cpus
);
2271 g_free(s
->memory_listener
.slots
);
2276 void kvm_set_sigmask_len(KVMState
*s
, unsigned int sigmask_len
)
2278 s
->sigmask_len
= sigmask_len
;
2281 static void kvm_handle_io(uint16_t port
, MemTxAttrs attrs
, void *data
, int direction
,
2282 int size
, uint32_t count
)
2285 uint8_t *ptr
= data
;
2287 for (i
= 0; i
< count
; i
++) {
2288 address_space_rw(&address_space_io
, port
, attrs
,
2290 direction
== KVM_EXIT_IO_OUT
);
2295 static int kvm_handle_internal_error(CPUState
*cpu
, struct kvm_run
*run
)
2297 fprintf(stderr
, "KVM internal error. Suberror: %d\n",
2298 run
->internal
.suberror
);
2300 if (kvm_check_extension(kvm_state
, KVM_CAP_INTERNAL_ERROR_DATA
)) {
2303 for (i
= 0; i
< run
->internal
.ndata
; ++i
) {
2304 fprintf(stderr
, "extra data[%d]: %"PRIx64
"\n",
2305 i
, (uint64_t)run
->internal
.data
[i
]);
2308 if (run
->internal
.suberror
== KVM_INTERNAL_ERROR_EMULATION
) {
2309 fprintf(stderr
, "emulation failure\n");
2310 if (!kvm_arch_stop_on_emulation_error(cpu
)) {
2311 cpu_dump_state(cpu
, stderr
, CPU_DUMP_CODE
);
2312 return EXCP_INTERRUPT
;
2315 /* FIXME: Should trigger a qmp message to let management know
2316 * something went wrong.
2321 void kvm_flush_coalesced_mmio_buffer(void)
2323 KVMState
*s
= kvm_state
;
2325 if (s
->coalesced_flush_in_progress
) {
2329 s
->coalesced_flush_in_progress
= true;
2331 if (s
->coalesced_mmio_ring
) {
2332 struct kvm_coalesced_mmio_ring
*ring
= s
->coalesced_mmio_ring
;
2333 while (ring
->first
!= ring
->last
) {
2334 struct kvm_coalesced_mmio
*ent
;
2336 ent
= &ring
->coalesced_mmio
[ring
->first
];
2338 if (ent
->pio
== 1) {
2339 address_space_write(&address_space_io
, ent
->phys_addr
,
2340 MEMTXATTRS_UNSPECIFIED
, ent
->data
,
2343 cpu_physical_memory_write(ent
->phys_addr
, ent
->data
, ent
->len
);
2346 ring
->first
= (ring
->first
+ 1) % KVM_COALESCED_MMIO_MAX
;
2350 s
->coalesced_flush_in_progress
= false;
2353 static void do_kvm_cpu_synchronize_state(CPUState
*cpu
, run_on_cpu_data arg
)
2355 if (!cpu
->vcpu_dirty
) {
2356 kvm_arch_get_registers(cpu
);
2357 cpu
->vcpu_dirty
= true;
2361 void kvm_cpu_synchronize_state(CPUState
*cpu
)
2363 if (!cpu
->vcpu_dirty
) {
2364 run_on_cpu(cpu
, do_kvm_cpu_synchronize_state
, RUN_ON_CPU_NULL
);
2368 static void do_kvm_cpu_synchronize_post_reset(CPUState
*cpu
, run_on_cpu_data arg
)
2370 kvm_arch_put_registers(cpu
, KVM_PUT_RESET_STATE
);
2371 cpu
->vcpu_dirty
= false;
2374 void kvm_cpu_synchronize_post_reset(CPUState
*cpu
)
2376 run_on_cpu(cpu
, do_kvm_cpu_synchronize_post_reset
, RUN_ON_CPU_NULL
);
2379 static void do_kvm_cpu_synchronize_post_init(CPUState
*cpu
, run_on_cpu_data arg
)
2381 kvm_arch_put_registers(cpu
, KVM_PUT_FULL_STATE
);
2382 cpu
->vcpu_dirty
= false;
2385 void kvm_cpu_synchronize_post_init(CPUState
*cpu
)
2387 run_on_cpu(cpu
, do_kvm_cpu_synchronize_post_init
, RUN_ON_CPU_NULL
);
2390 static void do_kvm_cpu_synchronize_pre_loadvm(CPUState
*cpu
, run_on_cpu_data arg
)
2392 cpu
->vcpu_dirty
= true;
2395 void kvm_cpu_synchronize_pre_loadvm(CPUState
*cpu
)
2397 run_on_cpu(cpu
, do_kvm_cpu_synchronize_pre_loadvm
, RUN_ON_CPU_NULL
);
2400 #ifdef KVM_HAVE_MCE_INJECTION
2401 static __thread
void *pending_sigbus_addr
;
2402 static __thread
int pending_sigbus_code
;
2403 static __thread
bool have_sigbus_pending
;
2406 static void kvm_cpu_kick(CPUState
*cpu
)
2408 qatomic_set(&cpu
->kvm_run
->immediate_exit
, 1);
2411 static void kvm_cpu_kick_self(void)
2413 if (kvm_immediate_exit
) {
2414 kvm_cpu_kick(current_cpu
);
2416 qemu_cpu_kick_self();
2420 static void kvm_eat_signals(CPUState
*cpu
)
2422 struct timespec ts
= { 0, 0 };
2428 if (kvm_immediate_exit
) {
2429 qatomic_set(&cpu
->kvm_run
->immediate_exit
, 0);
2430 /* Write kvm_run->immediate_exit before the cpu->exit_request
2431 * write in kvm_cpu_exec.
2437 sigemptyset(&waitset
);
2438 sigaddset(&waitset
, SIG_IPI
);
2441 r
= sigtimedwait(&waitset
, &siginfo
, &ts
);
2442 if (r
== -1 && !(errno
== EAGAIN
|| errno
== EINTR
)) {
2443 perror("sigtimedwait");
2447 r
= sigpending(&chkset
);
2449 perror("sigpending");
2452 } while (sigismember(&chkset
, SIG_IPI
));
2455 int kvm_cpu_exec(CPUState
*cpu
)
2457 struct kvm_run
*run
= cpu
->kvm_run
;
2460 DPRINTF("kvm_cpu_exec()\n");
2462 if (kvm_arch_process_async_events(cpu
)) {
2463 qatomic_set(&cpu
->exit_request
, 0);
2467 qemu_mutex_unlock_iothread();
2468 cpu_exec_start(cpu
);
2473 if (cpu
->vcpu_dirty
) {
2474 kvm_arch_put_registers(cpu
, KVM_PUT_RUNTIME_STATE
);
2475 cpu
->vcpu_dirty
= false;
2478 kvm_arch_pre_run(cpu
, run
);
2479 if (qatomic_read(&cpu
->exit_request
)) {
2480 DPRINTF("interrupt exit requested\n");
2482 * KVM requires us to reenter the kernel after IO exits to complete
2483 * instruction emulation. This self-signal will ensure that we
2486 kvm_cpu_kick_self();
2489 /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
2490 * Matching barrier in kvm_eat_signals.
2494 run_ret
= kvm_vcpu_ioctl(cpu
, KVM_RUN
, 0);
2496 attrs
= kvm_arch_post_run(cpu
, run
);
2498 #ifdef KVM_HAVE_MCE_INJECTION
2499 if (unlikely(have_sigbus_pending
)) {
2500 qemu_mutex_lock_iothread();
2501 kvm_arch_on_sigbus_vcpu(cpu
, pending_sigbus_code
,
2502 pending_sigbus_addr
);
2503 have_sigbus_pending
= false;
2504 qemu_mutex_unlock_iothread();
2509 if (run_ret
== -EINTR
|| run_ret
== -EAGAIN
) {
2510 DPRINTF("io window exit\n");
2511 kvm_eat_signals(cpu
);
2512 ret
= EXCP_INTERRUPT
;
2515 fprintf(stderr
, "error: kvm run failed %s\n",
2516 strerror(-run_ret
));
2518 if (run_ret
== -EBUSY
) {
2520 "This is probably because your SMT is enabled.\n"
2521 "VCPU can only run on primary threads with all "
2522 "secondary threads offline.\n");
2529 trace_kvm_run_exit(cpu
->cpu_index
, run
->exit_reason
);
2530 switch (run
->exit_reason
) {
2532 DPRINTF("handle_io\n");
2533 /* Called outside BQL */
2534 kvm_handle_io(run
->io
.port
, attrs
,
2535 (uint8_t *)run
+ run
->io
.data_offset
,
2542 DPRINTF("handle_mmio\n");
2543 /* Called outside BQL */
2544 address_space_rw(&address_space_memory
,
2545 run
->mmio
.phys_addr
, attrs
,
2548 run
->mmio
.is_write
);
2551 case KVM_EXIT_IRQ_WINDOW_OPEN
:
2552 DPRINTF("irq_window_open\n");
2553 ret
= EXCP_INTERRUPT
;
2555 case KVM_EXIT_SHUTDOWN
:
2556 DPRINTF("shutdown\n");
2557 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
2558 ret
= EXCP_INTERRUPT
;
2560 case KVM_EXIT_UNKNOWN
:
2561 fprintf(stderr
, "KVM: unknown exit, hardware reason %" PRIx64
"\n",
2562 (uint64_t)run
->hw
.hardware_exit_reason
);
2565 case KVM_EXIT_INTERNAL_ERROR
:
2566 ret
= kvm_handle_internal_error(cpu
, run
);
2568 case KVM_EXIT_SYSTEM_EVENT
:
2569 switch (run
->system_event
.type
) {
2570 case KVM_SYSTEM_EVENT_SHUTDOWN
:
2571 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN
);
2572 ret
= EXCP_INTERRUPT
;
2574 case KVM_SYSTEM_EVENT_RESET
:
2575 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
2576 ret
= EXCP_INTERRUPT
;
2578 case KVM_SYSTEM_EVENT_CRASH
:
2579 kvm_cpu_synchronize_state(cpu
);
2580 qemu_mutex_lock_iothread();
2581 qemu_system_guest_panicked(cpu_get_crash_info(cpu
));
2582 qemu_mutex_unlock_iothread();
2586 DPRINTF("kvm_arch_handle_exit\n");
2587 ret
= kvm_arch_handle_exit(cpu
, run
);
2592 DPRINTF("kvm_arch_handle_exit\n");
2593 ret
= kvm_arch_handle_exit(cpu
, run
);
2599 qemu_mutex_lock_iothread();
2602 cpu_dump_state(cpu
, stderr
, CPU_DUMP_CODE
);
2603 vm_stop(RUN_STATE_INTERNAL_ERROR
);
2606 qatomic_set(&cpu
->exit_request
, 0);
2610 int kvm_ioctl(KVMState
*s
, int type
, ...)
2617 arg
= va_arg(ap
, void *);
2620 trace_kvm_ioctl(type
, arg
);
2621 ret
= ioctl(s
->fd
, type
, arg
);
2628 int kvm_vm_ioctl(KVMState
*s
, int type
, ...)
2635 arg
= va_arg(ap
, void *);
2638 trace_kvm_vm_ioctl(type
, arg
);
2639 ret
= ioctl(s
->vmfd
, type
, arg
);
2646 int kvm_vcpu_ioctl(CPUState
*cpu
, int type
, ...)
2653 arg
= va_arg(ap
, void *);
2656 trace_kvm_vcpu_ioctl(cpu
->cpu_index
, type
, arg
);
2657 ret
= ioctl(cpu
->kvm_fd
, type
, arg
);
2664 int kvm_device_ioctl(int fd
, int type
, ...)
2671 arg
= va_arg(ap
, void *);
2674 trace_kvm_device_ioctl(fd
, type
, arg
);
2675 ret
= ioctl(fd
, type
, arg
);
2682 int kvm_vm_check_attr(KVMState
*s
, uint32_t group
, uint64_t attr
)
2685 struct kvm_device_attr attribute
= {
2690 if (!kvm_vm_attributes_allowed
) {
2694 ret
= kvm_vm_ioctl(s
, KVM_HAS_DEVICE_ATTR
, &attribute
);
2695 /* kvm returns 0 on success for HAS_DEVICE_ATTR */
2699 int kvm_device_check_attr(int dev_fd
, uint32_t group
, uint64_t attr
)
2701 struct kvm_device_attr attribute
= {
2707 return kvm_device_ioctl(dev_fd
, KVM_HAS_DEVICE_ATTR
, &attribute
) ? 0 : 1;
2710 int kvm_device_access(int fd
, int group
, uint64_t attr
,
2711 void *val
, bool write
, Error
**errp
)
2713 struct kvm_device_attr kvmattr
;
2717 kvmattr
.group
= group
;
2718 kvmattr
.attr
= attr
;
2719 kvmattr
.addr
= (uintptr_t)val
;
2721 err
= kvm_device_ioctl(fd
,
2722 write
? KVM_SET_DEVICE_ATTR
: KVM_GET_DEVICE_ATTR
,
2725 error_setg_errno(errp
, -err
,
2726 "KVM_%s_DEVICE_ATTR failed: Group %d "
2727 "attr 0x%016" PRIx64
,
2728 write
? "SET" : "GET", group
, attr
);
2733 bool kvm_has_sync_mmu(void)
2735 return kvm_state
->sync_mmu
;
2738 int kvm_has_vcpu_events(void)
2740 return kvm_state
->vcpu_events
;
2743 int kvm_has_robust_singlestep(void)
2745 return kvm_state
->robust_singlestep
;
2748 int kvm_has_debugregs(void)
2750 return kvm_state
->debugregs
;
2753 int kvm_max_nested_state_length(void)
2755 return kvm_state
->max_nested_state_len
;
2758 int kvm_has_many_ioeventfds(void)
2760 if (!kvm_enabled()) {
2763 return kvm_state
->many_ioeventfds
;
2766 int kvm_has_gsi_routing(void)
2768 #ifdef KVM_CAP_IRQ_ROUTING
2769 return kvm_check_extension(kvm_state
, KVM_CAP_IRQ_ROUTING
);
2775 int kvm_has_intx_set_mask(void)
2777 return kvm_state
->intx_set_mask
;
2780 bool kvm_arm_supports_user_irq(void)
2782 return kvm_check_extension(kvm_state
, KVM_CAP_ARM_USER_IRQ
);
2785 #ifdef KVM_CAP_SET_GUEST_DEBUG
2786 struct kvm_sw_breakpoint
*kvm_find_sw_breakpoint(CPUState
*cpu
,
2789 struct kvm_sw_breakpoint
*bp
;
2791 QTAILQ_FOREACH(bp
, &cpu
->kvm_state
->kvm_sw_breakpoints
, entry
) {
2799 int kvm_sw_breakpoints_active(CPUState
*cpu
)
2801 return !QTAILQ_EMPTY(&cpu
->kvm_state
->kvm_sw_breakpoints
);
2804 struct kvm_set_guest_debug_data
{
2805 struct kvm_guest_debug dbg
;
2809 static void kvm_invoke_set_guest_debug(CPUState
*cpu
, run_on_cpu_data data
)
2811 struct kvm_set_guest_debug_data
*dbg_data
=
2812 (struct kvm_set_guest_debug_data
*) data
.host_ptr
;
2814 dbg_data
->err
= kvm_vcpu_ioctl(cpu
, KVM_SET_GUEST_DEBUG
,
2818 int kvm_update_guest_debug(CPUState
*cpu
, unsigned long reinject_trap
)
2820 struct kvm_set_guest_debug_data data
;
2822 data
.dbg
.control
= reinject_trap
;
2824 if (cpu
->singlestep_enabled
) {
2825 data
.dbg
.control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_SINGLESTEP
;
2827 kvm_arch_update_guest_debug(cpu
, &data
.dbg
);
2829 run_on_cpu(cpu
, kvm_invoke_set_guest_debug
,
2830 RUN_ON_CPU_HOST_PTR(&data
));
2834 int kvm_insert_breakpoint(CPUState
*cpu
, target_ulong addr
,
2835 target_ulong len
, int type
)
2837 struct kvm_sw_breakpoint
*bp
;
2840 if (type
== GDB_BREAKPOINT_SW
) {
2841 bp
= kvm_find_sw_breakpoint(cpu
, addr
);
2847 bp
= g_malloc(sizeof(struct kvm_sw_breakpoint
));
2850 err
= kvm_arch_insert_sw_breakpoint(cpu
, bp
);
2856 QTAILQ_INSERT_HEAD(&cpu
->kvm_state
->kvm_sw_breakpoints
, bp
, entry
);
2858 err
= kvm_arch_insert_hw_breakpoint(addr
, len
, type
);
2865 err
= kvm_update_guest_debug(cpu
, 0);
2873 int kvm_remove_breakpoint(CPUState
*cpu
, target_ulong addr
,
2874 target_ulong len
, int type
)
2876 struct kvm_sw_breakpoint
*bp
;
2879 if (type
== GDB_BREAKPOINT_SW
) {
2880 bp
= kvm_find_sw_breakpoint(cpu
, addr
);
2885 if (bp
->use_count
> 1) {
2890 err
= kvm_arch_remove_sw_breakpoint(cpu
, bp
);
2895 QTAILQ_REMOVE(&cpu
->kvm_state
->kvm_sw_breakpoints
, bp
, entry
);
2898 err
= kvm_arch_remove_hw_breakpoint(addr
, len
, type
);
2905 err
= kvm_update_guest_debug(cpu
, 0);
2913 void kvm_remove_all_breakpoints(CPUState
*cpu
)
2915 struct kvm_sw_breakpoint
*bp
, *next
;
2916 KVMState
*s
= cpu
->kvm_state
;
2919 QTAILQ_FOREACH_SAFE(bp
, &s
->kvm_sw_breakpoints
, entry
, next
) {
2920 if (kvm_arch_remove_sw_breakpoint(cpu
, bp
) != 0) {
2921 /* Try harder to find a CPU that currently sees the breakpoint. */
2922 CPU_FOREACH(tmpcpu
) {
2923 if (kvm_arch_remove_sw_breakpoint(tmpcpu
, bp
) == 0) {
2928 QTAILQ_REMOVE(&s
->kvm_sw_breakpoints
, bp
, entry
);
2931 kvm_arch_remove_all_hw_breakpoints();
2934 kvm_update_guest_debug(cpu
, 0);
2938 #else /* !KVM_CAP_SET_GUEST_DEBUG */
2940 int kvm_update_guest_debug(CPUState
*cpu
, unsigned long reinject_trap
)
2945 int kvm_insert_breakpoint(CPUState
*cpu
, target_ulong addr
,
2946 target_ulong len
, int type
)
2951 int kvm_remove_breakpoint(CPUState
*cpu
, target_ulong addr
,
2952 target_ulong len
, int type
)
2957 void kvm_remove_all_breakpoints(CPUState
*cpu
)
2960 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
2962 static int kvm_set_signal_mask(CPUState
*cpu
, const sigset_t
*sigset
)
2964 KVMState
*s
= kvm_state
;
2965 struct kvm_signal_mask
*sigmask
;
2968 sigmask
= g_malloc(sizeof(*sigmask
) + sizeof(*sigset
));
2970 sigmask
->len
= s
->sigmask_len
;
2971 memcpy(sigmask
->sigset
, sigset
, sizeof(*sigset
));
2972 r
= kvm_vcpu_ioctl(cpu
, KVM_SET_SIGNAL_MASK
, sigmask
);
2978 static void kvm_ipi_signal(int sig
)
2981 assert(kvm_immediate_exit
);
2982 kvm_cpu_kick(current_cpu
);
2986 void kvm_init_cpu_signals(CPUState
*cpu
)
2990 struct sigaction sigact
;
2992 memset(&sigact
, 0, sizeof(sigact
));
2993 sigact
.sa_handler
= kvm_ipi_signal
;
2994 sigaction(SIG_IPI
, &sigact
, NULL
);
2996 pthread_sigmask(SIG_BLOCK
, NULL
, &set
);
2997 #if defined KVM_HAVE_MCE_INJECTION
2998 sigdelset(&set
, SIGBUS
);
2999 pthread_sigmask(SIG_SETMASK
, &set
, NULL
);
3001 sigdelset(&set
, SIG_IPI
);
3002 if (kvm_immediate_exit
) {
3003 r
= pthread_sigmask(SIG_SETMASK
, &set
, NULL
);
3005 r
= kvm_set_signal_mask(cpu
, &set
);
3008 fprintf(stderr
, "kvm_set_signal_mask: %s\n", strerror(-r
));
3013 /* Called asynchronously in VCPU thread. */
3014 int kvm_on_sigbus_vcpu(CPUState
*cpu
, int code
, void *addr
)
3016 #ifdef KVM_HAVE_MCE_INJECTION
3017 if (have_sigbus_pending
) {
3020 have_sigbus_pending
= true;
3021 pending_sigbus_addr
= addr
;
3022 pending_sigbus_code
= code
;
3023 qatomic_set(&cpu
->exit_request
, 1);
3030 /* Called synchronously (via signalfd) in main thread. */
3031 int kvm_on_sigbus(int code
, void *addr
)
3033 #ifdef KVM_HAVE_MCE_INJECTION
3034 /* Action required MCE kills the process if SIGBUS is blocked. Because
3035 * that's what happens in the I/O thread, where we handle MCE via signalfd,
3036 * we can only get action optional here.
3038 assert(code
!= BUS_MCEERR_AR
);
3039 kvm_arch_on_sigbus_vcpu(first_cpu
, code
, addr
);
3046 int kvm_create_device(KVMState
*s
, uint64_t type
, bool test
)
3049 struct kvm_create_device create_dev
;
3051 create_dev
.type
= type
;
3053 create_dev
.flags
= test
? KVM_CREATE_DEVICE_TEST
: 0;
3055 if (!kvm_check_extension(s
, KVM_CAP_DEVICE_CTRL
)) {
3059 ret
= kvm_vm_ioctl(s
, KVM_CREATE_DEVICE
, &create_dev
);
3064 return test
? 0 : create_dev
.fd
;
3067 bool kvm_device_supported(int vmfd
, uint64_t type
)
3069 struct kvm_create_device create_dev
= {
3072 .flags
= KVM_CREATE_DEVICE_TEST
,
3075 if (ioctl(vmfd
, KVM_CHECK_EXTENSION
, KVM_CAP_DEVICE_CTRL
) <= 0) {
3079 return (ioctl(vmfd
, KVM_CREATE_DEVICE
, &create_dev
) >= 0);
3082 int kvm_set_one_reg(CPUState
*cs
, uint64_t id
, void *source
)
3084 struct kvm_one_reg reg
;
3088 reg
.addr
= (uintptr_t) source
;
3089 r
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
3091 trace_kvm_failed_reg_set(id
, strerror(-r
));
3096 int kvm_get_one_reg(CPUState
*cs
, uint64_t id
, void *target
)
3098 struct kvm_one_reg reg
;
3102 reg
.addr
= (uintptr_t) target
;
3103 r
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
3105 trace_kvm_failed_reg_get(id
, strerror(-r
));
3110 static bool kvm_accel_has_memory(MachineState
*ms
, AddressSpace
*as
,
3111 hwaddr start_addr
, hwaddr size
)
3113 KVMState
*kvm
= KVM_STATE(ms
->accelerator
);
3116 for (i
= 0; i
< kvm
->nr_as
; ++i
) {
3117 if (kvm
->as
[i
].as
== as
&& kvm
->as
[i
].ml
) {
3118 size
= MIN(kvm_max_slot_size
, size
);
3119 return NULL
!= kvm_lookup_matching_slot(kvm
->as
[i
].ml
,
3127 static void kvm_get_kvm_shadow_mem(Object
*obj
, Visitor
*v
,
3128 const char *name
, void *opaque
,
3131 KVMState
*s
= KVM_STATE(obj
);
3132 int64_t value
= s
->kvm_shadow_mem
;
3134 visit_type_int(v
, name
, &value
, errp
);
3137 static void kvm_set_kvm_shadow_mem(Object
*obj
, Visitor
*v
,
3138 const char *name
, void *opaque
,
3141 KVMState
*s
= KVM_STATE(obj
);
3144 if (!visit_type_int(v
, name
, &value
, errp
)) {
3148 s
->kvm_shadow_mem
= value
;
3151 static void kvm_set_kernel_irqchip(Object
*obj
, Visitor
*v
,
3152 const char *name
, void *opaque
,
3155 KVMState
*s
= KVM_STATE(obj
);
3158 if (!visit_type_OnOffSplit(v
, name
, &mode
, errp
)) {
3162 case ON_OFF_SPLIT_ON
:
3163 s
->kernel_irqchip_allowed
= true;
3164 s
->kernel_irqchip_required
= true;
3165 s
->kernel_irqchip_split
= ON_OFF_AUTO_OFF
;
3167 case ON_OFF_SPLIT_OFF
:
3168 s
->kernel_irqchip_allowed
= false;
3169 s
->kernel_irqchip_required
= false;
3170 s
->kernel_irqchip_split
= ON_OFF_AUTO_OFF
;
3172 case ON_OFF_SPLIT_SPLIT
:
3173 s
->kernel_irqchip_allowed
= true;
3174 s
->kernel_irqchip_required
= true;
3175 s
->kernel_irqchip_split
= ON_OFF_AUTO_ON
;
3178 /* The value was checked in visit_type_OnOffSplit() above. If
3179 * we get here, then something is wrong in QEMU.
3185 bool kvm_kernel_irqchip_allowed(void)
3187 return kvm_state
->kernel_irqchip_allowed
;
3190 bool kvm_kernel_irqchip_required(void)
3192 return kvm_state
->kernel_irqchip_required
;
3195 bool kvm_kernel_irqchip_split(void)
3197 return kvm_state
->kernel_irqchip_split
== ON_OFF_AUTO_ON
;
3200 static void kvm_accel_instance_init(Object
*obj
)
3202 KVMState
*s
= KVM_STATE(obj
);
3204 s
->kvm_shadow_mem
= -1;
3205 s
->kernel_irqchip_allowed
= true;
3206 s
->kernel_irqchip_split
= ON_OFF_AUTO_AUTO
;
3209 static void kvm_accel_class_init(ObjectClass
*oc
, void *data
)
3211 AccelClass
*ac
= ACCEL_CLASS(oc
);
3213 ac
->init_machine
= kvm_init
;
3214 ac
->has_memory
= kvm_accel_has_memory
;
3215 ac
->allowed
= &kvm_allowed
;
3217 object_class_property_add(oc
, "kernel-irqchip", "on|off|split",
3218 NULL
, kvm_set_kernel_irqchip
,
3220 object_class_property_set_description(oc
, "kernel-irqchip",
3221 "Configure KVM in-kernel irqchip");
3223 object_class_property_add(oc
, "kvm-shadow-mem", "int",
3224 kvm_get_kvm_shadow_mem
, kvm_set_kvm_shadow_mem
,
3226 object_class_property_set_description(oc
, "kvm-shadow-mem",
3227 "KVM shadow MMU size");
3230 static const TypeInfo kvm_accel_type
= {
3231 .name
= TYPE_KVM_ACCEL
,
3232 .parent
= TYPE_ACCEL
,
3233 .instance_init
= kvm_accel_instance_init
,
3234 .class_init
= kvm_accel_class_init
,
3235 .instance_size
= sizeof(KVMState
),
3238 static void kvm_type_init(void)
3240 type_register_static(&kvm_accel_type
);
3243 type_init(kvm_type_init
);