4 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include <sys/types.h>
17 #include <sys/ioctl.h>
21 #include <linux/kvm.h>
23 #include "qemu-common.h"
24 #include "qemu/atomic.h"
25 #include "qemu/option.h"
26 #include "qemu/config-file.h"
27 #include "sysemu/sysemu.h"
28 #include "sysemu/accel.h"
30 #include "hw/pci/msi.h"
31 #include "hw/s390x/adapter.h"
32 #include "exec/gdbstub.h"
33 #include "sysemu/kvm.h"
34 #include "qemu/bswap.h"
35 #include "exec/memory.h"
36 #include "exec/ram_addr.h"
37 #include "exec/address-spaces.h"
38 #include "qemu/event_notifier.h"
41 #include "hw/boards.h"
43 /* This check must be after config-host.h is included */
45 #include <sys/eventfd.h>
48 /* KVM uses PAGE_SIZE in its definition of COALESCED_MMIO_MAX */
49 #define PAGE_SIZE TARGET_PAGE_SIZE
54 #define DPRINTF(fmt, ...) \
55 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
57 #define DPRINTF(fmt, ...) \
61 #define KVM_MSI_HASHTAB_SIZE 256
63 typedef struct KVMSlot
66 ram_addr_t memory_size
;
72 typedef struct kvm_dirty_log KVMDirtyLog
;
76 AccelState parent_obj
;
83 struct kvm_coalesced_mmio_ring
*coalesced_mmio_ring
;
84 bool coalesced_flush_in_progress
;
85 int broken_set_mem_region
;
88 int robust_singlestep
;
90 #ifdef KVM_CAP_SET_GUEST_DEBUG
91 struct kvm_sw_breakpoint_head kvm_sw_breakpoints
;
97 /* The man page (and posix) say ioctl numbers are signed int, but
98 * they're not. Linux, glibc and *BSD all treat ioctl numbers as
99 * unsigned, and treating them as signed here can break things */
100 unsigned irq_set_ioctl
;
101 unsigned int sigmask_len
;
102 #ifdef KVM_CAP_IRQ_ROUTING
103 struct kvm_irq_routing
*irq_routes
;
104 int nr_allocated_irq_routes
;
105 uint32_t *used_gsi_bitmap
;
106 unsigned int gsi_count
;
107 QTAILQ_HEAD(msi_hashtab
, KVMMSIRoute
) msi_hashtab
[KVM_MSI_HASHTAB_SIZE
];
112 #define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm")
114 #define KVM_STATE(obj) \
115 OBJECT_CHECK(KVMState, (obj), TYPE_KVM_ACCEL)
118 bool kvm_kernel_irqchip
;
119 bool kvm_async_interrupts_allowed
;
120 bool kvm_halt_in_kernel_allowed
;
121 bool kvm_eventfds_allowed
;
122 bool kvm_irqfds_allowed
;
123 bool kvm_resamplefds_allowed
;
124 bool kvm_msi_via_irqfd_allowed
;
125 bool kvm_gsi_routing_allowed
;
126 bool kvm_gsi_direct_mapping
;
128 bool kvm_readonly_mem_allowed
;
129 bool kvm_vm_attributes_allowed
;
131 static const KVMCapabilityInfo kvm_required_capabilites
[] = {
132 KVM_CAP_INFO(USER_MEMORY
),
133 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS
),
137 static KVMSlot
*kvm_get_free_slot(KVMState
*s
)
141 for (i
= 0; i
< s
->nr_slots
; i
++) {
142 if (s
->slots
[i
].memory_size
== 0) {
150 bool kvm_has_free_slot(MachineState
*ms
)
152 return kvm_get_free_slot(KVM_STATE(ms
->accelerator
));
155 static KVMSlot
*kvm_alloc_slot(KVMState
*s
)
157 KVMSlot
*slot
= kvm_get_free_slot(s
);
163 fprintf(stderr
, "%s: no free slot available\n", __func__
);
167 static KVMSlot
*kvm_lookup_matching_slot(KVMState
*s
,
173 for (i
= 0; i
< s
->nr_slots
; i
++) {
174 KVMSlot
*mem
= &s
->slots
[i
];
176 if (start_addr
== mem
->start_addr
&&
177 end_addr
== mem
->start_addr
+ mem
->memory_size
) {
186 * Find overlapping slot with lowest start address
188 static KVMSlot
*kvm_lookup_overlapping_slot(KVMState
*s
,
192 KVMSlot
*found
= NULL
;
195 for (i
= 0; i
< s
->nr_slots
; i
++) {
196 KVMSlot
*mem
= &s
->slots
[i
];
198 if (mem
->memory_size
== 0 ||
199 (found
&& found
->start_addr
< mem
->start_addr
)) {
203 if (end_addr
> mem
->start_addr
&&
204 start_addr
< mem
->start_addr
+ mem
->memory_size
) {
212 int kvm_physical_memory_addr_from_host(KVMState
*s
, void *ram
,
217 for (i
= 0; i
< s
->nr_slots
; i
++) {
218 KVMSlot
*mem
= &s
->slots
[i
];
220 if (ram
>= mem
->ram
&& ram
< mem
->ram
+ mem
->memory_size
) {
221 *phys_addr
= mem
->start_addr
+ (ram
- mem
->ram
);
229 static int kvm_set_user_memory_region(KVMState
*s
, KVMSlot
*slot
)
231 struct kvm_userspace_memory_region mem
;
233 mem
.slot
= slot
->slot
;
234 mem
.guest_phys_addr
= slot
->start_addr
;
235 mem
.userspace_addr
= (unsigned long)slot
->ram
;
236 mem
.flags
= slot
->flags
;
237 if (s
->migration_log
) {
238 mem
.flags
|= KVM_MEM_LOG_DIRTY_PAGES
;
241 if (slot
->memory_size
&& mem
.flags
& KVM_MEM_READONLY
) {
242 /* Set the slot size to 0 before setting the slot to the desired
243 * value. This is needed based on KVM commit 75d61fbc. */
245 kvm_vm_ioctl(s
, KVM_SET_USER_MEMORY_REGION
, &mem
);
247 mem
.memory_size
= slot
->memory_size
;
248 return kvm_vm_ioctl(s
, KVM_SET_USER_MEMORY_REGION
, &mem
);
251 int kvm_init_vcpu(CPUState
*cpu
)
253 KVMState
*s
= kvm_state
;
257 DPRINTF("kvm_init_vcpu\n");
259 ret
= kvm_vm_ioctl(s
, KVM_CREATE_VCPU
, (void *)kvm_arch_vcpu_id(cpu
));
261 DPRINTF("kvm_create_vcpu failed\n");
267 cpu
->kvm_vcpu_dirty
= true;
269 mmap_size
= kvm_ioctl(s
, KVM_GET_VCPU_MMAP_SIZE
, 0);
272 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
276 cpu
->kvm_run
= mmap(NULL
, mmap_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
278 if (cpu
->kvm_run
== MAP_FAILED
) {
280 DPRINTF("mmap'ing vcpu state failed\n");
284 if (s
->coalesced_mmio
&& !s
->coalesced_mmio_ring
) {
285 s
->coalesced_mmio_ring
=
286 (void *)cpu
->kvm_run
+ s
->coalesced_mmio
* PAGE_SIZE
;
289 ret
= kvm_arch_init_vcpu(cpu
);
295 * dirty pages logging control
298 static int kvm_mem_flags(KVMState
*s
, bool log_dirty
, bool readonly
)
301 flags
= log_dirty
? KVM_MEM_LOG_DIRTY_PAGES
: 0;
302 if (readonly
&& kvm_readonly_mem_allowed
) {
303 flags
|= KVM_MEM_READONLY
;
308 static int kvm_slot_dirty_pages_log_change(KVMSlot
*mem
, bool log_dirty
)
310 KVMState
*s
= kvm_state
;
311 int flags
, mask
= KVM_MEM_LOG_DIRTY_PAGES
;
314 old_flags
= mem
->flags
;
316 flags
= (mem
->flags
& ~mask
) | kvm_mem_flags(s
, log_dirty
, false);
319 /* If nothing changed effectively, no need to issue ioctl */
320 if (s
->migration_log
) {
321 flags
|= KVM_MEM_LOG_DIRTY_PAGES
;
324 if (flags
== old_flags
) {
328 return kvm_set_user_memory_region(s
, mem
);
331 static int kvm_dirty_pages_log_change(hwaddr phys_addr
,
332 ram_addr_t size
, bool log_dirty
)
334 KVMState
*s
= kvm_state
;
335 KVMSlot
*mem
= kvm_lookup_matching_slot(s
, phys_addr
, phys_addr
+ size
);
340 return kvm_slot_dirty_pages_log_change(mem
, log_dirty
);
344 static void kvm_log_start(MemoryListener
*listener
,
345 MemoryRegionSection
*section
,
354 r
= kvm_dirty_pages_log_change(section
->offset_within_address_space
,
355 int128_get64(section
->size
), true);
361 static void kvm_log_stop(MemoryListener
*listener
,
362 MemoryRegionSection
*section
,
371 r
= kvm_dirty_pages_log_change(section
->offset_within_address_space
,
372 int128_get64(section
->size
), false);
378 static int kvm_set_migration_log(bool enable
)
380 KVMState
*s
= kvm_state
;
384 s
->migration_log
= enable
;
386 for (i
= 0; i
< s
->nr_slots
; i
++) {
389 if (!mem
->memory_size
) {
392 if (!!(mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) == enable
) {
395 err
= kvm_set_user_memory_region(s
, mem
);
403 /* get kvm's dirty pages bitmap and update qemu's */
404 static int kvm_get_dirty_pages_log_range(MemoryRegionSection
*section
,
405 unsigned long *bitmap
)
407 ram_addr_t start
= section
->offset_within_region
+ section
->mr
->ram_addr
;
408 ram_addr_t pages
= int128_get64(section
->size
) / getpagesize();
410 cpu_physical_memory_set_dirty_lebitmap(bitmap
, start
, pages
);
414 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
417 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
418 * This function updates qemu's dirty bitmap using
419 * memory_region_set_dirty(). This means all bits are set
422 * @start_add: start of logged region.
423 * @end_addr: end of logged region.
425 static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection
*section
)
427 KVMState
*s
= kvm_state
;
428 unsigned long size
, allocated_size
= 0;
432 hwaddr start_addr
= section
->offset_within_address_space
;
433 hwaddr end_addr
= start_addr
+ int128_get64(section
->size
);
435 d
.dirty_bitmap
= NULL
;
436 while (start_addr
< end_addr
) {
437 mem
= kvm_lookup_overlapping_slot(s
, start_addr
, end_addr
);
442 /* XXX bad kernel interface alert
443 * For dirty bitmap, kernel allocates array of size aligned to
444 * bits-per-long. But for case when the kernel is 64bits and
445 * the userspace is 32bits, userspace can't align to the same
446 * bits-per-long, since sizeof(long) is different between kernel
447 * and user space. This way, userspace will provide buffer which
448 * may be 4 bytes less than the kernel will use, resulting in
449 * userspace memory corruption (which is not detectable by valgrind
450 * too, in most cases).
451 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
452 * a hope that sizeof(long) wont become >8 any time soon.
454 size
= ALIGN(((mem
->memory_size
) >> TARGET_PAGE_BITS
),
455 /*HOST_LONG_BITS*/ 64) / 8;
456 if (!d
.dirty_bitmap
) {
457 d
.dirty_bitmap
= g_malloc(size
);
458 } else if (size
> allocated_size
) {
459 d
.dirty_bitmap
= g_realloc(d
.dirty_bitmap
, size
);
461 allocated_size
= size
;
462 memset(d
.dirty_bitmap
, 0, allocated_size
);
466 if (kvm_vm_ioctl(s
, KVM_GET_DIRTY_LOG
, &d
) == -1) {
467 DPRINTF("ioctl failed %d\n", errno
);
472 kvm_get_dirty_pages_log_range(section
, d
.dirty_bitmap
);
473 start_addr
= mem
->start_addr
+ mem
->memory_size
;
475 g_free(d
.dirty_bitmap
);
480 static void kvm_coalesce_mmio_region(MemoryListener
*listener
,
481 MemoryRegionSection
*secion
,
482 hwaddr start
, hwaddr size
)
484 KVMState
*s
= kvm_state
;
486 if (s
->coalesced_mmio
) {
487 struct kvm_coalesced_mmio_zone zone
;
493 (void)kvm_vm_ioctl(s
, KVM_REGISTER_COALESCED_MMIO
, &zone
);
497 static void kvm_uncoalesce_mmio_region(MemoryListener
*listener
,
498 MemoryRegionSection
*secion
,
499 hwaddr start
, hwaddr size
)
501 KVMState
*s
= kvm_state
;
503 if (s
->coalesced_mmio
) {
504 struct kvm_coalesced_mmio_zone zone
;
510 (void)kvm_vm_ioctl(s
, KVM_UNREGISTER_COALESCED_MMIO
, &zone
);
514 int kvm_check_extension(KVMState
*s
, unsigned int extension
)
518 ret
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, extension
);
526 int kvm_vm_check_extension(KVMState
*s
, unsigned int extension
)
530 ret
= kvm_vm_ioctl(s
, KVM_CHECK_EXTENSION
, extension
);
532 /* VM wide version not implemented, use global one instead */
533 ret
= kvm_check_extension(s
, extension
);
539 static uint32_t adjust_ioeventfd_endianness(uint32_t val
, uint32_t size
)
541 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
542 /* The kernel expects ioeventfd values in HOST_WORDS_BIGENDIAN
543 * endianness, but the memory core hands them in target endianness.
544 * For example, PPC is always treated as big-endian even if running
545 * on KVM and on PPC64LE. Correct here.
559 static int kvm_set_ioeventfd_mmio(int fd
, hwaddr addr
, uint32_t val
,
560 bool assign
, uint32_t size
, bool datamatch
)
563 struct kvm_ioeventfd iofd
= {
564 .datamatch
= datamatch
? adjust_ioeventfd_endianness(val
, size
) : 0,
571 if (!kvm_enabled()) {
576 iofd
.flags
|= KVM_IOEVENTFD_FLAG_DATAMATCH
;
579 iofd
.flags
|= KVM_IOEVENTFD_FLAG_DEASSIGN
;
582 ret
= kvm_vm_ioctl(kvm_state
, KVM_IOEVENTFD
, &iofd
);
591 static int kvm_set_ioeventfd_pio(int fd
, uint16_t addr
, uint16_t val
,
592 bool assign
, uint32_t size
, bool datamatch
)
594 struct kvm_ioeventfd kick
= {
595 .datamatch
= datamatch
? adjust_ioeventfd_endianness(val
, size
) : 0,
597 .flags
= KVM_IOEVENTFD_FLAG_PIO
,
602 if (!kvm_enabled()) {
606 kick
.flags
|= KVM_IOEVENTFD_FLAG_DATAMATCH
;
609 kick
.flags
|= KVM_IOEVENTFD_FLAG_DEASSIGN
;
611 r
= kvm_vm_ioctl(kvm_state
, KVM_IOEVENTFD
, &kick
);
619 static int kvm_check_many_ioeventfds(void)
621 /* Userspace can use ioeventfd for io notification. This requires a host
622 * that supports eventfd(2) and an I/O thread; since eventfd does not
623 * support SIGIO it cannot interrupt the vcpu.
625 * Older kernels have a 6 device limit on the KVM io bus. Find out so we
626 * can avoid creating too many ioeventfds.
628 #if defined(CONFIG_EVENTFD)
631 for (i
= 0; i
< ARRAY_SIZE(ioeventfds
); i
++) {
632 ioeventfds
[i
] = eventfd(0, EFD_CLOEXEC
);
633 if (ioeventfds
[i
] < 0) {
636 ret
= kvm_set_ioeventfd_pio(ioeventfds
[i
], 0, i
, true, 2, true);
638 close(ioeventfds
[i
]);
643 /* Decide whether many devices are supported or not */
644 ret
= i
== ARRAY_SIZE(ioeventfds
);
647 kvm_set_ioeventfd_pio(ioeventfds
[i
], 0, i
, false, 2, true);
648 close(ioeventfds
[i
]);
656 static const KVMCapabilityInfo
*
657 kvm_check_extension_list(KVMState
*s
, const KVMCapabilityInfo
*list
)
660 if (!kvm_check_extension(s
, list
->value
)) {
668 static void kvm_set_phys_mem(MemoryRegionSection
*section
, bool add
)
670 KVMState
*s
= kvm_state
;
673 MemoryRegion
*mr
= section
->mr
;
675 memory_region_get_dirty_log_mask(mr
) & ~(1 << DIRTY_MEMORY_MIGRATION
);
676 bool writeable
= !mr
->readonly
&& !mr
->rom_device
;
677 bool readonly_flag
= mr
->readonly
|| memory_region_is_romd(mr
);
678 hwaddr start_addr
= section
->offset_within_address_space
;
679 ram_addr_t size
= int128_get64(section
->size
);
683 /* kvm works in page size chunks, but the function may be called
684 with sub-page size and unaligned start address. Pad the start
685 address to next and truncate size to previous page boundary. */
686 delta
= (TARGET_PAGE_SIZE
- (start_addr
& ~TARGET_PAGE_MASK
));
687 delta
&= ~TARGET_PAGE_MASK
;
693 size
&= TARGET_PAGE_MASK
;
694 if (!size
|| (start_addr
& ~TARGET_PAGE_MASK
)) {
698 if (!memory_region_is_ram(mr
)) {
699 if (writeable
|| !kvm_readonly_mem_allowed
) {
701 } else if (!mr
->romd_mode
) {
702 /* If the memory device is not in romd_mode, then we actually want
703 * to remove the kvm memory slot so all accesses will trap. */
708 ram
= memory_region_get_ram_ptr(mr
) + section
->offset_within_region
+ delta
;
711 mem
= kvm_lookup_overlapping_slot(s
, start_addr
, start_addr
+ size
);
716 if (add
&& start_addr
>= mem
->start_addr
&&
717 (start_addr
+ size
<= mem
->start_addr
+ mem
->memory_size
) &&
718 (ram
- start_addr
== mem
->ram
- mem
->start_addr
)) {
719 /* The new slot fits into the existing one and comes with
720 * identical parameters - update flags and done. */
721 kvm_slot_dirty_pages_log_change(mem
, log_dirty
);
727 if ((mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) || s
->migration_log
) {
728 kvm_physical_sync_dirty_bitmap(section
);
731 /* unregister the overlapping slot */
732 mem
->memory_size
= 0;
733 err
= kvm_set_user_memory_region(s
, mem
);
735 fprintf(stderr
, "%s: error unregistering overlapping slot: %s\n",
736 __func__
, strerror(-err
));
740 /* Workaround for older KVM versions: we can't join slots, even not by
741 * unregistering the previous ones and then registering the larger
742 * slot. We have to maintain the existing fragmentation. Sigh.
744 * This workaround assumes that the new slot starts at the same
745 * address as the first existing one. If not or if some overlapping
746 * slot comes around later, we will fail (not seen in practice so far)
747 * - and actually require a recent KVM version. */
748 if (s
->broken_set_mem_region
&&
749 old
.start_addr
== start_addr
&& old
.memory_size
< size
&& add
) {
750 mem
= kvm_alloc_slot(s
);
751 mem
->memory_size
= old
.memory_size
;
752 mem
->start_addr
= old
.start_addr
;
754 mem
->flags
= kvm_mem_flags(s
, log_dirty
, readonly_flag
);
756 err
= kvm_set_user_memory_region(s
, mem
);
758 fprintf(stderr
, "%s: error updating slot: %s\n", __func__
,
763 start_addr
+= old
.memory_size
;
764 ram
+= old
.memory_size
;
765 size
-= old
.memory_size
;
769 /* register prefix slot */
770 if (old
.start_addr
< start_addr
) {
771 mem
= kvm_alloc_slot(s
);
772 mem
->memory_size
= start_addr
- old
.start_addr
;
773 mem
->start_addr
= old
.start_addr
;
775 mem
->flags
= kvm_mem_flags(s
, log_dirty
, readonly_flag
);
777 err
= kvm_set_user_memory_region(s
, mem
);
779 fprintf(stderr
, "%s: error registering prefix slot: %s\n",
780 __func__
, strerror(-err
));
782 fprintf(stderr
, "%s: This is probably because your kernel's " \
783 "PAGE_SIZE is too big. Please try to use 4k " \
784 "PAGE_SIZE!\n", __func__
);
790 /* register suffix slot */
791 if (old
.start_addr
+ old
.memory_size
> start_addr
+ size
) {
792 ram_addr_t size_delta
;
794 mem
= kvm_alloc_slot(s
);
795 mem
->start_addr
= start_addr
+ size
;
796 size_delta
= mem
->start_addr
- old
.start_addr
;
797 mem
->memory_size
= old
.memory_size
- size_delta
;
798 mem
->ram
= old
.ram
+ size_delta
;
799 mem
->flags
= kvm_mem_flags(s
, log_dirty
, readonly_flag
);
801 err
= kvm_set_user_memory_region(s
, mem
);
803 fprintf(stderr
, "%s: error registering suffix slot: %s\n",
804 __func__
, strerror(-err
));
810 /* in case the KVM bug workaround already "consumed" the new slot */
817 mem
= kvm_alloc_slot(s
);
818 mem
->memory_size
= size
;
819 mem
->start_addr
= start_addr
;
821 mem
->flags
= kvm_mem_flags(s
, log_dirty
, readonly_flag
);
823 err
= kvm_set_user_memory_region(s
, mem
);
825 fprintf(stderr
, "%s: error registering slot: %s\n", __func__
,
831 static void kvm_region_add(MemoryListener
*listener
,
832 MemoryRegionSection
*section
)
834 memory_region_ref(section
->mr
);
835 kvm_set_phys_mem(section
, true);
838 static void kvm_region_del(MemoryListener
*listener
,
839 MemoryRegionSection
*section
)
841 kvm_set_phys_mem(section
, false);
842 memory_region_unref(section
->mr
);
845 static void kvm_log_sync(MemoryListener
*listener
,
846 MemoryRegionSection
*section
)
850 r
= kvm_physical_sync_dirty_bitmap(section
);
856 static void kvm_log_global_start(struct MemoryListener
*listener
)
860 r
= kvm_set_migration_log(1);
864 static void kvm_log_global_stop(struct MemoryListener
*listener
)
868 r
= kvm_set_migration_log(0);
872 static void kvm_mem_ioeventfd_add(MemoryListener
*listener
,
873 MemoryRegionSection
*section
,
874 bool match_data
, uint64_t data
,
877 int fd
= event_notifier_get_fd(e
);
880 r
= kvm_set_ioeventfd_mmio(fd
, section
->offset_within_address_space
,
881 data
, true, int128_get64(section
->size
),
884 fprintf(stderr
, "%s: error adding ioeventfd: %s\n",
885 __func__
, strerror(-r
));
890 static void kvm_mem_ioeventfd_del(MemoryListener
*listener
,
891 MemoryRegionSection
*section
,
892 bool match_data
, uint64_t data
,
895 int fd
= event_notifier_get_fd(e
);
898 r
= kvm_set_ioeventfd_mmio(fd
, section
->offset_within_address_space
,
899 data
, false, int128_get64(section
->size
),
906 static void kvm_io_ioeventfd_add(MemoryListener
*listener
,
907 MemoryRegionSection
*section
,
908 bool match_data
, uint64_t data
,
911 int fd
= event_notifier_get_fd(e
);
914 r
= kvm_set_ioeventfd_pio(fd
, section
->offset_within_address_space
,
915 data
, true, int128_get64(section
->size
),
918 fprintf(stderr
, "%s: error adding ioeventfd: %s\n",
919 __func__
, strerror(-r
));
924 static void kvm_io_ioeventfd_del(MemoryListener
*listener
,
925 MemoryRegionSection
*section
,
926 bool match_data
, uint64_t data
,
930 int fd
= event_notifier_get_fd(e
);
933 r
= kvm_set_ioeventfd_pio(fd
, section
->offset_within_address_space
,
934 data
, false, int128_get64(section
->size
),
941 static MemoryListener kvm_memory_listener
= {
942 .region_add
= kvm_region_add
,
943 .region_del
= kvm_region_del
,
944 .log_start
= kvm_log_start
,
945 .log_stop
= kvm_log_stop
,
946 .log_sync
= kvm_log_sync
,
947 .log_global_start
= kvm_log_global_start
,
948 .log_global_stop
= kvm_log_global_stop
,
949 .eventfd_add
= kvm_mem_ioeventfd_add
,
950 .eventfd_del
= kvm_mem_ioeventfd_del
,
951 .coalesced_mmio_add
= kvm_coalesce_mmio_region
,
952 .coalesced_mmio_del
= kvm_uncoalesce_mmio_region
,
956 static MemoryListener kvm_io_listener
= {
957 .eventfd_add
= kvm_io_ioeventfd_add
,
958 .eventfd_del
= kvm_io_ioeventfd_del
,
962 static void kvm_handle_interrupt(CPUState
*cpu
, int mask
)
964 cpu
->interrupt_request
|= mask
;
966 if (!qemu_cpu_is_self(cpu
)) {
971 int kvm_set_irq(KVMState
*s
, int irq
, int level
)
973 struct kvm_irq_level event
;
976 assert(kvm_async_interrupts_enabled());
980 ret
= kvm_vm_ioctl(s
, s
->irq_set_ioctl
, &event
);
982 perror("kvm_set_irq");
986 return (s
->irq_set_ioctl
== KVM_IRQ_LINE
) ? 1 : event
.status
;
989 #ifdef KVM_CAP_IRQ_ROUTING
990 typedef struct KVMMSIRoute
{
991 struct kvm_irq_routing_entry kroute
;
992 QTAILQ_ENTRY(KVMMSIRoute
) entry
;
995 static void set_gsi(KVMState
*s
, unsigned int gsi
)
997 s
->used_gsi_bitmap
[gsi
/ 32] |= 1U << (gsi
% 32);
1000 static void clear_gsi(KVMState
*s
, unsigned int gsi
)
1002 s
->used_gsi_bitmap
[gsi
/ 32] &= ~(1U << (gsi
% 32));
1005 void kvm_init_irq_routing(KVMState
*s
)
1009 gsi_count
= kvm_check_extension(s
, KVM_CAP_IRQ_ROUTING
) - 1;
1010 if (gsi_count
> 0) {
1011 unsigned int gsi_bits
, i
;
1013 /* Round up so we can search ints using ffs */
1014 gsi_bits
= ALIGN(gsi_count
, 32);
1015 s
->used_gsi_bitmap
= g_malloc0(gsi_bits
/ 8);
1016 s
->gsi_count
= gsi_count
;
1018 /* Mark any over-allocated bits as already in use */
1019 for (i
= gsi_count
; i
< gsi_bits
; i
++) {
1024 s
->irq_routes
= g_malloc0(sizeof(*s
->irq_routes
));
1025 s
->nr_allocated_irq_routes
= 0;
1027 if (!s
->direct_msi
) {
1028 for (i
= 0; i
< KVM_MSI_HASHTAB_SIZE
; i
++) {
1029 QTAILQ_INIT(&s
->msi_hashtab
[i
]);
1033 kvm_arch_init_irq_routing(s
);
1036 void kvm_irqchip_commit_routes(KVMState
*s
)
1040 s
->irq_routes
->flags
= 0;
1041 ret
= kvm_vm_ioctl(s
, KVM_SET_GSI_ROUTING
, s
->irq_routes
);
1045 static void kvm_add_routing_entry(KVMState
*s
,
1046 struct kvm_irq_routing_entry
*entry
)
1048 struct kvm_irq_routing_entry
*new;
1051 if (s
->irq_routes
->nr
== s
->nr_allocated_irq_routes
) {
1052 n
= s
->nr_allocated_irq_routes
* 2;
1056 size
= sizeof(struct kvm_irq_routing
);
1057 size
+= n
* sizeof(*new);
1058 s
->irq_routes
= g_realloc(s
->irq_routes
, size
);
1059 s
->nr_allocated_irq_routes
= n
;
1061 n
= s
->irq_routes
->nr
++;
1062 new = &s
->irq_routes
->entries
[n
];
1066 set_gsi(s
, entry
->gsi
);
1069 static int kvm_update_routing_entry(KVMState
*s
,
1070 struct kvm_irq_routing_entry
*new_entry
)
1072 struct kvm_irq_routing_entry
*entry
;
1075 for (n
= 0; n
< s
->irq_routes
->nr
; n
++) {
1076 entry
= &s
->irq_routes
->entries
[n
];
1077 if (entry
->gsi
!= new_entry
->gsi
) {
1081 if(!memcmp(entry
, new_entry
, sizeof *entry
)) {
1085 *entry
= *new_entry
;
1087 kvm_irqchip_commit_routes(s
);
1095 void kvm_irqchip_add_irq_route(KVMState
*s
, int irq
, int irqchip
, int pin
)
1097 struct kvm_irq_routing_entry e
= {};
1099 assert(pin
< s
->gsi_count
);
1102 e
.type
= KVM_IRQ_ROUTING_IRQCHIP
;
1104 e
.u
.irqchip
.irqchip
= irqchip
;
1105 e
.u
.irqchip
.pin
= pin
;
1106 kvm_add_routing_entry(s
, &e
);
1109 void kvm_irqchip_release_virq(KVMState
*s
, int virq
)
1111 struct kvm_irq_routing_entry
*e
;
1114 if (kvm_gsi_direct_mapping()) {
1118 for (i
= 0; i
< s
->irq_routes
->nr
; i
++) {
1119 e
= &s
->irq_routes
->entries
[i
];
1120 if (e
->gsi
== virq
) {
1121 s
->irq_routes
->nr
--;
1122 *e
= s
->irq_routes
->entries
[s
->irq_routes
->nr
];
1128 static unsigned int kvm_hash_msi(uint32_t data
)
1130 /* This is optimized for IA32 MSI layout. However, no other arch shall
1131 * repeat the mistake of not providing a direct MSI injection API. */
1135 static void kvm_flush_dynamic_msi_routes(KVMState
*s
)
1137 KVMMSIRoute
*route
, *next
;
1140 for (hash
= 0; hash
< KVM_MSI_HASHTAB_SIZE
; hash
++) {
1141 QTAILQ_FOREACH_SAFE(route
, &s
->msi_hashtab
[hash
], entry
, next
) {
1142 kvm_irqchip_release_virq(s
, route
->kroute
.gsi
);
1143 QTAILQ_REMOVE(&s
->msi_hashtab
[hash
], route
, entry
);
1149 static int kvm_irqchip_get_virq(KVMState
*s
)
1151 uint32_t *word
= s
->used_gsi_bitmap
;
1152 int max_words
= ALIGN(s
->gsi_count
, 32) / 32;
1157 /* Return the lowest unused GSI in the bitmap */
1158 for (i
= 0; i
< max_words
; i
++) {
1159 zeroes
= ctz32(~word
[i
]);
1164 return zeroes
+ i
* 32;
1166 if (!s
->direct_msi
&& retry
) {
1168 kvm_flush_dynamic_msi_routes(s
);
1175 static KVMMSIRoute
*kvm_lookup_msi_route(KVMState
*s
, MSIMessage msg
)
1177 unsigned int hash
= kvm_hash_msi(msg
.data
);
1180 QTAILQ_FOREACH(route
, &s
->msi_hashtab
[hash
], entry
) {
1181 if (route
->kroute
.u
.msi
.address_lo
== (uint32_t)msg
.address
&&
1182 route
->kroute
.u
.msi
.address_hi
== (msg
.address
>> 32) &&
1183 route
->kroute
.u
.msi
.data
== le32_to_cpu(msg
.data
)) {
1190 int kvm_irqchip_send_msi(KVMState
*s
, MSIMessage msg
)
1195 if (s
->direct_msi
) {
1196 msi
.address_lo
= (uint32_t)msg
.address
;
1197 msi
.address_hi
= msg
.address
>> 32;
1198 msi
.data
= le32_to_cpu(msg
.data
);
1200 memset(msi
.pad
, 0, sizeof(msi
.pad
));
1202 return kvm_vm_ioctl(s
, KVM_SIGNAL_MSI
, &msi
);
1205 route
= kvm_lookup_msi_route(s
, msg
);
1209 virq
= kvm_irqchip_get_virq(s
);
1214 route
= g_malloc0(sizeof(KVMMSIRoute
));
1215 route
->kroute
.gsi
= virq
;
1216 route
->kroute
.type
= KVM_IRQ_ROUTING_MSI
;
1217 route
->kroute
.flags
= 0;
1218 route
->kroute
.u
.msi
.address_lo
= (uint32_t)msg
.address
;
1219 route
->kroute
.u
.msi
.address_hi
= msg
.address
>> 32;
1220 route
->kroute
.u
.msi
.data
= le32_to_cpu(msg
.data
);
1222 kvm_add_routing_entry(s
, &route
->kroute
);
1223 kvm_irqchip_commit_routes(s
);
1225 QTAILQ_INSERT_TAIL(&s
->msi_hashtab
[kvm_hash_msi(msg
.data
)], route
,
1229 assert(route
->kroute
.type
== KVM_IRQ_ROUTING_MSI
);
1231 return kvm_set_irq(s
, route
->kroute
.gsi
, 1);
1234 int kvm_irqchip_add_msi_route(KVMState
*s
, MSIMessage msg
)
1236 struct kvm_irq_routing_entry kroute
= {};
1239 if (kvm_gsi_direct_mapping()) {
1240 return kvm_arch_msi_data_to_gsi(msg
.data
);
1243 if (!kvm_gsi_routing_enabled()) {
1247 virq
= kvm_irqchip_get_virq(s
);
1253 kroute
.type
= KVM_IRQ_ROUTING_MSI
;
1255 kroute
.u
.msi
.address_lo
= (uint32_t)msg
.address
;
1256 kroute
.u
.msi
.address_hi
= msg
.address
>> 32;
1257 kroute
.u
.msi
.data
= le32_to_cpu(msg
.data
);
1258 if (kvm_arch_fixup_msi_route(&kroute
, msg
.address
, msg
.data
)) {
1259 kvm_irqchip_release_virq(s
, virq
);
1263 kvm_add_routing_entry(s
, &kroute
);
1264 kvm_irqchip_commit_routes(s
);
1269 int kvm_irqchip_update_msi_route(KVMState
*s
, int virq
, MSIMessage msg
)
1271 struct kvm_irq_routing_entry kroute
= {};
1273 if (kvm_gsi_direct_mapping()) {
1277 if (!kvm_irqchip_in_kernel()) {
1282 kroute
.type
= KVM_IRQ_ROUTING_MSI
;
1284 kroute
.u
.msi
.address_lo
= (uint32_t)msg
.address
;
1285 kroute
.u
.msi
.address_hi
= msg
.address
>> 32;
1286 kroute
.u
.msi
.data
= le32_to_cpu(msg
.data
);
1287 if (kvm_arch_fixup_msi_route(&kroute
, msg
.address
, msg
.data
)) {
1291 return kvm_update_routing_entry(s
, &kroute
);
1294 static int kvm_irqchip_assign_irqfd(KVMState
*s
, int fd
, int rfd
, int virq
,
1297 struct kvm_irqfd irqfd
= {
1300 .flags
= assign
? 0 : KVM_IRQFD_FLAG_DEASSIGN
,
1304 irqfd
.flags
|= KVM_IRQFD_FLAG_RESAMPLE
;
1305 irqfd
.resamplefd
= rfd
;
1308 if (!kvm_irqfds_enabled()) {
1312 return kvm_vm_ioctl(s
, KVM_IRQFD
, &irqfd
);
1315 int kvm_irqchip_add_adapter_route(KVMState
*s
, AdapterInfo
*adapter
)
1317 struct kvm_irq_routing_entry kroute
= {};
1320 if (!kvm_gsi_routing_enabled()) {
1324 virq
= kvm_irqchip_get_virq(s
);
1330 kroute
.type
= KVM_IRQ_ROUTING_S390_ADAPTER
;
1332 kroute
.u
.adapter
.summary_addr
= adapter
->summary_addr
;
1333 kroute
.u
.adapter
.ind_addr
= adapter
->ind_addr
;
1334 kroute
.u
.adapter
.summary_offset
= adapter
->summary_offset
;
1335 kroute
.u
.adapter
.ind_offset
= adapter
->ind_offset
;
1336 kroute
.u
.adapter
.adapter_id
= adapter
->adapter_id
;
1338 kvm_add_routing_entry(s
, &kroute
);
1339 kvm_irqchip_commit_routes(s
);
1344 #else /* !KVM_CAP_IRQ_ROUTING */
1346 void kvm_init_irq_routing(KVMState
*s
)
1350 void kvm_irqchip_release_virq(KVMState
*s
, int virq
)
1354 int kvm_irqchip_send_msi(KVMState
*s
, MSIMessage msg
)
1359 int kvm_irqchip_add_msi_route(KVMState
*s
, MSIMessage msg
)
1364 int kvm_irqchip_add_adapter_route(KVMState
*s
, AdapterInfo
*adapter
)
1369 static int kvm_irqchip_assign_irqfd(KVMState
*s
, int fd
, int virq
, bool assign
)
1374 int kvm_irqchip_update_msi_route(KVMState
*s
, int virq
, MSIMessage msg
)
1378 #endif /* !KVM_CAP_IRQ_ROUTING */
1380 int kvm_irqchip_add_irqfd_notifier(KVMState
*s
, EventNotifier
*n
,
1381 EventNotifier
*rn
, int virq
)
1383 return kvm_irqchip_assign_irqfd(s
, event_notifier_get_fd(n
),
1384 rn
? event_notifier_get_fd(rn
) : -1, virq
, true);
1387 int kvm_irqchip_remove_irqfd_notifier(KVMState
*s
, EventNotifier
*n
, int virq
)
1389 return kvm_irqchip_assign_irqfd(s
, event_notifier_get_fd(n
), -1, virq
,
1393 static int kvm_irqchip_create(MachineState
*machine
, KVMState
*s
)
1397 if (!machine_kernel_irqchip_allowed(machine
) ||
1398 (!kvm_check_extension(s
, KVM_CAP_IRQCHIP
) &&
1399 (kvm_vm_enable_cap(s
, KVM_CAP_S390_IRQCHIP
, 0) < 0))) {
1403 /* First probe and see if there's a arch-specific hook to create the
1404 * in-kernel irqchip for us */
1405 ret
= kvm_arch_irqchip_create(s
);
1408 } else if (ret
== 0) {
1409 ret
= kvm_vm_ioctl(s
, KVM_CREATE_IRQCHIP
);
1411 fprintf(stderr
, "Create kernel irqchip failed\n");
1416 kvm_kernel_irqchip
= true;
1417 /* If we have an in-kernel IRQ chip then we must have asynchronous
1418 * interrupt delivery (though the reverse is not necessarily true)
1420 kvm_async_interrupts_allowed
= true;
1421 kvm_halt_in_kernel_allowed
= true;
1423 kvm_init_irq_routing(s
);
1428 /* Find number of supported CPUs using the recommended
1429 * procedure from the kernel API documentation to cope with
1430 * older kernels that may be missing capabilities.
1432 static int kvm_recommended_vcpus(KVMState
*s
)
1434 int ret
= kvm_check_extension(s
, KVM_CAP_NR_VCPUS
);
1435 return (ret
) ? ret
: 4;
1438 static int kvm_max_vcpus(KVMState
*s
)
1440 int ret
= kvm_check_extension(s
, KVM_CAP_MAX_VCPUS
);
1441 return (ret
) ? ret
: kvm_recommended_vcpus(s
);
1444 static int kvm_init(MachineState
*ms
)
1446 MachineClass
*mc
= MACHINE_GET_CLASS(ms
);
1447 static const char upgrade_note
[] =
1448 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
1449 "(see http://sourceforge.net/projects/kvm).\n";
1454 { "SMP", smp_cpus
},
1455 { "hotpluggable", max_cpus
},
1458 int soft_vcpus_limit
, hard_vcpus_limit
;
1460 const KVMCapabilityInfo
*missing_cap
;
1463 const char *kvm_type
;
1465 s
= KVM_STATE(ms
->accelerator
);
1468 * On systems where the kernel can support different base page
1469 * sizes, host page size may be different from TARGET_PAGE_SIZE,
1470 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
1471 * page size for the system though.
1473 assert(TARGET_PAGE_SIZE
<= getpagesize());
1478 #ifdef KVM_CAP_SET_GUEST_DEBUG
1479 QTAILQ_INIT(&s
->kvm_sw_breakpoints
);
1482 s
->fd
= qemu_open("/dev/kvm", O_RDWR
);
1484 fprintf(stderr
, "Could not access KVM kernel module: %m\n");
1489 ret
= kvm_ioctl(s
, KVM_GET_API_VERSION
, 0);
1490 if (ret
< KVM_API_VERSION
) {
1494 fprintf(stderr
, "kvm version too old\n");
1498 if (ret
> KVM_API_VERSION
) {
1500 fprintf(stderr
, "kvm version not supported\n");
1504 s
->nr_slots
= kvm_check_extension(s
, KVM_CAP_NR_MEMSLOTS
);
1506 /* If unspecified, use the default value */
1511 s
->slots
= g_malloc0(s
->nr_slots
* sizeof(KVMSlot
));
1513 for (i
= 0; i
< s
->nr_slots
; i
++) {
1514 s
->slots
[i
].slot
= i
;
1517 /* check the vcpu limits */
1518 soft_vcpus_limit
= kvm_recommended_vcpus(s
);
1519 hard_vcpus_limit
= kvm_max_vcpus(s
);
1522 if (nc
->num
> soft_vcpus_limit
) {
1524 "Warning: Number of %s cpus requested (%d) exceeds "
1525 "the recommended cpus supported by KVM (%d)\n",
1526 nc
->name
, nc
->num
, soft_vcpus_limit
);
1528 if (nc
->num
> hard_vcpus_limit
) {
1529 fprintf(stderr
, "Number of %s cpus requested (%d) exceeds "
1530 "the maximum cpus supported by KVM (%d)\n",
1531 nc
->name
, nc
->num
, hard_vcpus_limit
);
1538 kvm_type
= qemu_opt_get(qemu_get_machine_opts(), "kvm-type");
1540 type
= mc
->kvm_type(kvm_type
);
1541 } else if (kvm_type
) {
1543 fprintf(stderr
, "Invalid argument kvm-type=%s\n", kvm_type
);
1548 ret
= kvm_ioctl(s
, KVM_CREATE_VM
, type
);
1549 } while (ret
== -EINTR
);
1552 fprintf(stderr
, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret
,
1556 if (ret
== -EINVAL
) {
1558 "Host kernel setup problem detected. Please verify:\n");
1559 fprintf(stderr
, "- for kernels supporting the switch_amode or"
1560 " user_mode parameters, whether\n");
1562 " user space is running in primary address space\n");
1564 "- for kernels supporting the vm.allocate_pgste sysctl, "
1565 "whether it is enabled\n");
1572 missing_cap
= kvm_check_extension_list(s
, kvm_required_capabilites
);
1575 kvm_check_extension_list(s
, kvm_arch_required_capabilities
);
1579 fprintf(stderr
, "kvm does not support %s\n%s",
1580 missing_cap
->name
, upgrade_note
);
1584 s
->coalesced_mmio
= kvm_check_extension(s
, KVM_CAP_COALESCED_MMIO
);
1586 s
->broken_set_mem_region
= 1;
1587 ret
= kvm_check_extension(s
, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS
);
1589 s
->broken_set_mem_region
= 0;
1592 #ifdef KVM_CAP_VCPU_EVENTS
1593 s
->vcpu_events
= kvm_check_extension(s
, KVM_CAP_VCPU_EVENTS
);
1596 s
->robust_singlestep
=
1597 kvm_check_extension(s
, KVM_CAP_X86_ROBUST_SINGLESTEP
);
1599 #ifdef KVM_CAP_DEBUGREGS
1600 s
->debugregs
= kvm_check_extension(s
, KVM_CAP_DEBUGREGS
);
1603 #ifdef KVM_CAP_XSAVE
1604 s
->xsave
= kvm_check_extension(s
, KVM_CAP_XSAVE
);
1608 s
->xcrs
= kvm_check_extension(s
, KVM_CAP_XCRS
);
1611 #ifdef KVM_CAP_PIT_STATE2
1612 s
->pit_state2
= kvm_check_extension(s
, KVM_CAP_PIT_STATE2
);
1615 #ifdef KVM_CAP_IRQ_ROUTING
1616 s
->direct_msi
= (kvm_check_extension(s
, KVM_CAP_SIGNAL_MSI
) > 0);
1619 s
->intx_set_mask
= kvm_check_extension(s
, KVM_CAP_PCI_2_3
);
1621 s
->irq_set_ioctl
= KVM_IRQ_LINE
;
1622 if (kvm_check_extension(s
, KVM_CAP_IRQ_INJECT_STATUS
)) {
1623 s
->irq_set_ioctl
= KVM_IRQ_LINE_STATUS
;
1626 #ifdef KVM_CAP_READONLY_MEM
1627 kvm_readonly_mem_allowed
=
1628 (kvm_check_extension(s
, KVM_CAP_READONLY_MEM
) > 0);
1631 kvm_eventfds_allowed
=
1632 (kvm_check_extension(s
, KVM_CAP_IOEVENTFD
) > 0);
1634 kvm_irqfds_allowed
=
1635 (kvm_check_extension(s
, KVM_CAP_IRQFD
) > 0);
1637 kvm_resamplefds_allowed
=
1638 (kvm_check_extension(s
, KVM_CAP_IRQFD_RESAMPLE
) > 0);
1640 kvm_vm_attributes_allowed
=
1641 (kvm_check_extension(s
, KVM_CAP_VM_ATTRIBUTES
) > 0);
1643 ret
= kvm_arch_init(ms
, s
);
1648 ret
= kvm_irqchip_create(ms
, s
);
1654 memory_listener_register(&kvm_memory_listener
, &address_space_memory
);
1655 memory_listener_register(&kvm_io_listener
, &address_space_io
);
1657 s
->many_ioeventfds
= kvm_check_many_ioeventfds();
1659 cpu_interrupt_handler
= kvm_handle_interrupt
;
1676 void kvm_set_sigmask_len(KVMState
*s
, unsigned int sigmask_len
)
1678 s
->sigmask_len
= sigmask_len
;
1681 static void kvm_handle_io(uint16_t port
, MemTxAttrs attrs
, void *data
, int direction
,
1682 int size
, uint32_t count
)
1685 uint8_t *ptr
= data
;
1687 for (i
= 0; i
< count
; i
++) {
1688 address_space_rw(&address_space_io
, port
, attrs
,
1690 direction
== KVM_EXIT_IO_OUT
);
1695 static int kvm_handle_internal_error(CPUState
*cpu
, struct kvm_run
*run
)
1697 fprintf(stderr
, "KVM internal error. Suberror: %d\n",
1698 run
->internal
.suberror
);
1700 if (kvm_check_extension(kvm_state
, KVM_CAP_INTERNAL_ERROR_DATA
)) {
1703 for (i
= 0; i
< run
->internal
.ndata
; ++i
) {
1704 fprintf(stderr
, "extra data[%d]: %"PRIx64
"\n",
1705 i
, (uint64_t)run
->internal
.data
[i
]);
1708 if (run
->internal
.suberror
== KVM_INTERNAL_ERROR_EMULATION
) {
1709 fprintf(stderr
, "emulation failure\n");
1710 if (!kvm_arch_stop_on_emulation_error(cpu
)) {
1711 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_CODE
);
1712 return EXCP_INTERRUPT
;
1715 /* FIXME: Should trigger a qmp message to let management know
1716 * something went wrong.
1721 void kvm_flush_coalesced_mmio_buffer(void)
1723 KVMState
*s
= kvm_state
;
1725 if (s
->coalesced_flush_in_progress
) {
1729 s
->coalesced_flush_in_progress
= true;
1731 if (s
->coalesced_mmio_ring
) {
1732 struct kvm_coalesced_mmio_ring
*ring
= s
->coalesced_mmio_ring
;
1733 while (ring
->first
!= ring
->last
) {
1734 struct kvm_coalesced_mmio
*ent
;
1736 ent
= &ring
->coalesced_mmio
[ring
->first
];
1738 cpu_physical_memory_write(ent
->phys_addr
, ent
->data
, ent
->len
);
1740 ring
->first
= (ring
->first
+ 1) % KVM_COALESCED_MMIO_MAX
;
1744 s
->coalesced_flush_in_progress
= false;
1747 static void do_kvm_cpu_synchronize_state(void *arg
)
1749 CPUState
*cpu
= arg
;
1751 if (!cpu
->kvm_vcpu_dirty
) {
1752 kvm_arch_get_registers(cpu
);
1753 cpu
->kvm_vcpu_dirty
= true;
1757 void kvm_cpu_synchronize_state(CPUState
*cpu
)
1759 if (!cpu
->kvm_vcpu_dirty
) {
1760 run_on_cpu(cpu
, do_kvm_cpu_synchronize_state
, cpu
);
1764 static void do_kvm_cpu_synchronize_post_reset(void *arg
)
1766 CPUState
*cpu
= arg
;
1768 kvm_arch_put_registers(cpu
, KVM_PUT_RESET_STATE
);
1769 cpu
->kvm_vcpu_dirty
= false;
1772 void kvm_cpu_synchronize_post_reset(CPUState
*cpu
)
1774 run_on_cpu(cpu
, do_kvm_cpu_synchronize_post_reset
, cpu
);
1777 static void do_kvm_cpu_synchronize_post_init(void *arg
)
1779 CPUState
*cpu
= arg
;
1781 kvm_arch_put_registers(cpu
, KVM_PUT_FULL_STATE
);
1782 cpu
->kvm_vcpu_dirty
= false;
1785 void kvm_cpu_synchronize_post_init(CPUState
*cpu
)
1787 run_on_cpu(cpu
, do_kvm_cpu_synchronize_post_init
, cpu
);
1790 void kvm_cpu_clean_state(CPUState
*cpu
)
1792 cpu
->kvm_vcpu_dirty
= false;
1795 int kvm_cpu_exec(CPUState
*cpu
)
1797 struct kvm_run
*run
= cpu
->kvm_run
;
1800 DPRINTF("kvm_cpu_exec()\n");
1802 if (kvm_arch_process_async_events(cpu
)) {
1803 cpu
->exit_request
= 0;
1810 if (cpu
->kvm_vcpu_dirty
) {
1811 kvm_arch_put_registers(cpu
, KVM_PUT_RUNTIME_STATE
);
1812 cpu
->kvm_vcpu_dirty
= false;
1815 kvm_arch_pre_run(cpu
, run
);
1816 if (cpu
->exit_request
) {
1817 DPRINTF("interrupt exit requested\n");
1819 * KVM requires us to reenter the kernel after IO exits to complete
1820 * instruction emulation. This self-signal will ensure that we
1823 qemu_cpu_kick_self();
1825 qemu_mutex_unlock_iothread();
1827 run_ret
= kvm_vcpu_ioctl(cpu
, KVM_RUN
, 0);
1829 qemu_mutex_lock_iothread();
1830 attrs
= kvm_arch_post_run(cpu
, run
);
1833 if (run_ret
== -EINTR
|| run_ret
== -EAGAIN
) {
1834 DPRINTF("io window exit\n");
1835 ret
= EXCP_INTERRUPT
;
1838 fprintf(stderr
, "error: kvm run failed %s\n",
1839 strerror(-run_ret
));
1841 if (run_ret
== -EBUSY
) {
1843 "This is probably because your SMT is enabled.\n"
1844 "VCPU can only run on primary threads with all "
1845 "secondary threads offline.\n");
1852 trace_kvm_run_exit(cpu
->cpu_index
, run
->exit_reason
);
1853 switch (run
->exit_reason
) {
1855 DPRINTF("handle_io\n");
1856 kvm_handle_io(run
->io
.port
, attrs
,
1857 (uint8_t *)run
+ run
->io
.data_offset
,
1864 DPRINTF("handle_mmio\n");
1865 address_space_rw(&address_space_memory
,
1866 run
->mmio
.phys_addr
, attrs
,
1869 run
->mmio
.is_write
);
1872 case KVM_EXIT_IRQ_WINDOW_OPEN
:
1873 DPRINTF("irq_window_open\n");
1874 ret
= EXCP_INTERRUPT
;
1876 case KVM_EXIT_SHUTDOWN
:
1877 DPRINTF("shutdown\n");
1878 qemu_system_reset_request();
1879 ret
= EXCP_INTERRUPT
;
1881 case KVM_EXIT_UNKNOWN
:
1882 fprintf(stderr
, "KVM: unknown exit, hardware reason %" PRIx64
"\n",
1883 (uint64_t)run
->hw
.hardware_exit_reason
);
1886 case KVM_EXIT_INTERNAL_ERROR
:
1887 ret
= kvm_handle_internal_error(cpu
, run
);
1889 case KVM_EXIT_SYSTEM_EVENT
:
1890 switch (run
->system_event
.type
) {
1891 case KVM_SYSTEM_EVENT_SHUTDOWN
:
1892 qemu_system_shutdown_request();
1893 ret
= EXCP_INTERRUPT
;
1895 case KVM_SYSTEM_EVENT_RESET
:
1896 qemu_system_reset_request();
1897 ret
= EXCP_INTERRUPT
;
1900 DPRINTF("kvm_arch_handle_exit\n");
1901 ret
= kvm_arch_handle_exit(cpu
, run
);
1906 DPRINTF("kvm_arch_handle_exit\n");
1907 ret
= kvm_arch_handle_exit(cpu
, run
);
1913 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_CODE
);
1914 vm_stop(RUN_STATE_INTERNAL_ERROR
);
1917 cpu
->exit_request
= 0;
1921 int kvm_ioctl(KVMState
*s
, int type
, ...)
1928 arg
= va_arg(ap
, void *);
1931 trace_kvm_ioctl(type
, arg
);
1932 ret
= ioctl(s
->fd
, type
, arg
);
1939 int kvm_vm_ioctl(KVMState
*s
, int type
, ...)
1946 arg
= va_arg(ap
, void *);
1949 trace_kvm_vm_ioctl(type
, arg
);
1950 ret
= ioctl(s
->vmfd
, type
, arg
);
1957 int kvm_vcpu_ioctl(CPUState
*cpu
, int type
, ...)
1964 arg
= va_arg(ap
, void *);
1967 trace_kvm_vcpu_ioctl(cpu
->cpu_index
, type
, arg
);
1968 ret
= ioctl(cpu
->kvm_fd
, type
, arg
);
1975 int kvm_device_ioctl(int fd
, int type
, ...)
1982 arg
= va_arg(ap
, void *);
1985 trace_kvm_device_ioctl(fd
, type
, arg
);
1986 ret
= ioctl(fd
, type
, arg
);
1993 int kvm_vm_check_attr(KVMState
*s
, uint32_t group
, uint64_t attr
)
1996 struct kvm_device_attr attribute
= {
2001 if (!kvm_vm_attributes_allowed
) {
2005 ret
= kvm_vm_ioctl(s
, KVM_HAS_DEVICE_ATTR
, &attribute
);
2006 /* kvm returns 0 on success for HAS_DEVICE_ATTR */
2010 int kvm_has_sync_mmu(void)
2012 return kvm_check_extension(kvm_state
, KVM_CAP_SYNC_MMU
);
2015 int kvm_has_vcpu_events(void)
2017 return kvm_state
->vcpu_events
;
2020 int kvm_has_robust_singlestep(void)
2022 return kvm_state
->robust_singlestep
;
2025 int kvm_has_debugregs(void)
2027 return kvm_state
->debugregs
;
2030 int kvm_has_xsave(void)
2032 return kvm_state
->xsave
;
2035 int kvm_has_xcrs(void)
2037 return kvm_state
->xcrs
;
2040 int kvm_has_pit_state2(void)
2042 return kvm_state
->pit_state2
;
2045 int kvm_has_many_ioeventfds(void)
2047 if (!kvm_enabled()) {
2050 return kvm_state
->many_ioeventfds
;
2053 int kvm_has_gsi_routing(void)
2055 #ifdef KVM_CAP_IRQ_ROUTING
2056 return kvm_check_extension(kvm_state
, KVM_CAP_IRQ_ROUTING
);
2062 int kvm_has_intx_set_mask(void)
2064 return kvm_state
->intx_set_mask
;
2067 void kvm_setup_guest_memory(void *start
, size_t size
)
2069 if (!kvm_has_sync_mmu()) {
2070 int ret
= qemu_madvise(start
, size
, QEMU_MADV_DONTFORK
);
2073 perror("qemu_madvise");
2075 "Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
2081 #ifdef KVM_CAP_SET_GUEST_DEBUG
2082 struct kvm_sw_breakpoint
*kvm_find_sw_breakpoint(CPUState
*cpu
,
2085 struct kvm_sw_breakpoint
*bp
;
2087 QTAILQ_FOREACH(bp
, &cpu
->kvm_state
->kvm_sw_breakpoints
, entry
) {
2095 int kvm_sw_breakpoints_active(CPUState
*cpu
)
2097 return !QTAILQ_EMPTY(&cpu
->kvm_state
->kvm_sw_breakpoints
);
2100 struct kvm_set_guest_debug_data
{
2101 struct kvm_guest_debug dbg
;
2106 static void kvm_invoke_set_guest_debug(void *data
)
2108 struct kvm_set_guest_debug_data
*dbg_data
= data
;
2110 dbg_data
->err
= kvm_vcpu_ioctl(dbg_data
->cpu
, KVM_SET_GUEST_DEBUG
,
2114 int kvm_update_guest_debug(CPUState
*cpu
, unsigned long reinject_trap
)
2116 struct kvm_set_guest_debug_data data
;
2118 data
.dbg
.control
= reinject_trap
;
2120 if (cpu
->singlestep_enabled
) {
2121 data
.dbg
.control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_SINGLESTEP
;
2123 kvm_arch_update_guest_debug(cpu
, &data
.dbg
);
2126 run_on_cpu(cpu
, kvm_invoke_set_guest_debug
, &data
);
2130 int kvm_insert_breakpoint(CPUState
*cpu
, target_ulong addr
,
2131 target_ulong len
, int type
)
2133 struct kvm_sw_breakpoint
*bp
;
2136 if (type
== GDB_BREAKPOINT_SW
) {
2137 bp
= kvm_find_sw_breakpoint(cpu
, addr
);
2143 bp
= g_malloc(sizeof(struct kvm_sw_breakpoint
));
2146 err
= kvm_arch_insert_sw_breakpoint(cpu
, bp
);
2152 QTAILQ_INSERT_HEAD(&cpu
->kvm_state
->kvm_sw_breakpoints
, bp
, entry
);
2154 err
= kvm_arch_insert_hw_breakpoint(addr
, len
, type
);
2161 err
= kvm_update_guest_debug(cpu
, 0);
2169 int kvm_remove_breakpoint(CPUState
*cpu
, target_ulong addr
,
2170 target_ulong len
, int type
)
2172 struct kvm_sw_breakpoint
*bp
;
2175 if (type
== GDB_BREAKPOINT_SW
) {
2176 bp
= kvm_find_sw_breakpoint(cpu
, addr
);
2181 if (bp
->use_count
> 1) {
2186 err
= kvm_arch_remove_sw_breakpoint(cpu
, bp
);
2191 QTAILQ_REMOVE(&cpu
->kvm_state
->kvm_sw_breakpoints
, bp
, entry
);
2194 err
= kvm_arch_remove_hw_breakpoint(addr
, len
, type
);
2201 err
= kvm_update_guest_debug(cpu
, 0);
2209 void kvm_remove_all_breakpoints(CPUState
*cpu
)
2211 struct kvm_sw_breakpoint
*bp
, *next
;
2212 KVMState
*s
= cpu
->kvm_state
;
2215 QTAILQ_FOREACH_SAFE(bp
, &s
->kvm_sw_breakpoints
, entry
, next
) {
2216 if (kvm_arch_remove_sw_breakpoint(cpu
, bp
) != 0) {
2217 /* Try harder to find a CPU that currently sees the breakpoint. */
2218 CPU_FOREACH(tmpcpu
) {
2219 if (kvm_arch_remove_sw_breakpoint(tmpcpu
, bp
) == 0) {
2224 QTAILQ_REMOVE(&s
->kvm_sw_breakpoints
, bp
, entry
);
2227 kvm_arch_remove_all_hw_breakpoints();
2230 kvm_update_guest_debug(cpu
, 0);
2234 #else /* !KVM_CAP_SET_GUEST_DEBUG */
2236 int kvm_update_guest_debug(CPUState
*cpu
, unsigned long reinject_trap
)
2241 int kvm_insert_breakpoint(CPUState
*cpu
, target_ulong addr
,
2242 target_ulong len
, int type
)
2247 int kvm_remove_breakpoint(CPUState
*cpu
, target_ulong addr
,
2248 target_ulong len
, int type
)
2253 void kvm_remove_all_breakpoints(CPUState
*cpu
)
2256 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
2258 int kvm_set_signal_mask(CPUState
*cpu
, const sigset_t
*sigset
)
2260 KVMState
*s
= kvm_state
;
2261 struct kvm_signal_mask
*sigmask
;
2265 return kvm_vcpu_ioctl(cpu
, KVM_SET_SIGNAL_MASK
, NULL
);
2268 sigmask
= g_malloc(sizeof(*sigmask
) + sizeof(*sigset
));
2270 sigmask
->len
= s
->sigmask_len
;
2271 memcpy(sigmask
->sigset
, sigset
, sizeof(*sigset
));
2272 r
= kvm_vcpu_ioctl(cpu
, KVM_SET_SIGNAL_MASK
, sigmask
);
2277 int kvm_on_sigbus_vcpu(CPUState
*cpu
, int code
, void *addr
)
2279 return kvm_arch_on_sigbus_vcpu(cpu
, code
, addr
);
2282 int kvm_on_sigbus(int code
, void *addr
)
2284 return kvm_arch_on_sigbus(code
, addr
);
2287 int kvm_create_device(KVMState
*s
, uint64_t type
, bool test
)
2290 struct kvm_create_device create_dev
;
2292 create_dev
.type
= type
;
2294 create_dev
.flags
= test
? KVM_CREATE_DEVICE_TEST
: 0;
2296 if (!kvm_check_extension(s
, KVM_CAP_DEVICE_CTRL
)) {
2300 ret
= kvm_vm_ioctl(s
, KVM_CREATE_DEVICE
, &create_dev
);
2305 return test
? 0 : create_dev
.fd
;
2308 int kvm_set_one_reg(CPUState
*cs
, uint64_t id
, void *source
)
2310 struct kvm_one_reg reg
;
2314 reg
.addr
= (uintptr_t) source
;
2315 r
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
2317 trace_kvm_failed_reg_set(id
, strerror(r
));
2322 int kvm_get_one_reg(CPUState
*cs
, uint64_t id
, void *target
)
2324 struct kvm_one_reg reg
;
2328 reg
.addr
= (uintptr_t) target
;
2329 r
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
2331 trace_kvm_failed_reg_get(id
, strerror(r
));
2336 static void kvm_accel_class_init(ObjectClass
*oc
, void *data
)
2338 AccelClass
*ac
= ACCEL_CLASS(oc
);
2340 ac
->init_machine
= kvm_init
;
2341 ac
->allowed
= &kvm_allowed
;
2344 static const TypeInfo kvm_accel_type
= {
2345 .name
= TYPE_KVM_ACCEL
,
2346 .parent
= TYPE_ACCEL
,
2347 .class_init
= kvm_accel_class_init
,
2348 .instance_size
= sizeof(KVMState
),
2351 static void kvm_type_init(void)
2353 type_register_static(&kvm_accel_type
);
2356 type_init(kvm_type_init
);