accel/kvm: Use negative KVM type for error propagation
[qemu/armbru.git] / accel / kvm / kvm-all.c
blob3bac5aa678b3d4111de484bcdc78f2a794d160dd
1 /*
2 * QEMU KVM support
4 * Copyright IBM, Corp. 2008
5 * Red Hat, Inc. 2008
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include <sys/ioctl.h>
18 #include <poll.h>
20 #include <linux/kvm.h>
22 #include "qemu/atomic.h"
23 #include "qemu/option.h"
24 #include "qemu/config-file.h"
25 #include "qemu/error-report.h"
26 #include "qapi/error.h"
27 #include "hw/pci/msi.h"
28 #include "hw/pci/msix.h"
29 #include "hw/s390x/adapter.h"
30 #include "exec/gdbstub.h"
31 #include "sysemu/kvm_int.h"
32 #include "sysemu/runstate.h"
33 #include "sysemu/cpus.h"
34 #include "sysemu/accel-blocker.h"
35 #include "qemu/bswap.h"
36 #include "exec/memory.h"
37 #include "exec/ram_addr.h"
38 #include "qemu/event_notifier.h"
39 #include "qemu/main-loop.h"
40 #include "trace.h"
41 #include "hw/irq.h"
42 #include "qapi/visitor.h"
43 #include "qapi/qapi-types-common.h"
44 #include "qapi/qapi-visit-common.h"
45 #include "sysemu/reset.h"
46 #include "qemu/guest-random.h"
47 #include "sysemu/hw_accel.h"
48 #include "kvm-cpus.h"
49 #include "sysemu/dirtylimit.h"
50 #include "qemu/range.h"
52 #include "hw/boards.h"
53 #include "sysemu/stats.h"
55 /* This check must be after config-host.h is included */
56 #ifdef CONFIG_EVENTFD
57 #include <sys/eventfd.h>
58 #endif
60 /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
61 * need to use the real host PAGE_SIZE, as that's what KVM will use.
63 #ifdef PAGE_SIZE
64 #undef PAGE_SIZE
65 #endif
66 #define PAGE_SIZE qemu_real_host_page_size()
68 #ifndef KVM_GUESTDBG_BLOCKIRQ
69 #define KVM_GUESTDBG_BLOCKIRQ 0
70 #endif
72 //#define DEBUG_KVM
74 #ifdef DEBUG_KVM
75 #define DPRINTF(fmt, ...) \
76 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
77 #else
78 #define DPRINTF(fmt, ...) \
79 do { } while (0)
80 #endif
82 struct KVMParkedVcpu {
83 unsigned long vcpu_id;
84 int kvm_fd;
85 QLIST_ENTRY(KVMParkedVcpu) node;
88 KVMState *kvm_state;
89 bool kvm_kernel_irqchip;
90 bool kvm_split_irqchip;
91 bool kvm_async_interrupts_allowed;
92 bool kvm_halt_in_kernel_allowed;
93 bool kvm_eventfds_allowed;
94 bool kvm_irqfds_allowed;
95 bool kvm_resamplefds_allowed;
96 bool kvm_msi_via_irqfd_allowed;
97 bool kvm_gsi_routing_allowed;
98 bool kvm_gsi_direct_mapping;
99 bool kvm_allowed;
100 bool kvm_readonly_mem_allowed;
101 bool kvm_vm_attributes_allowed;
102 bool kvm_direct_msi_allowed;
103 bool kvm_ioeventfd_any_length_allowed;
104 bool kvm_msi_use_devid;
105 bool kvm_has_guest_debug;
106 static int kvm_sstep_flags;
107 static bool kvm_immediate_exit;
108 static hwaddr kvm_max_slot_size = ~0;
110 static const KVMCapabilityInfo kvm_required_capabilites[] = {
111 KVM_CAP_INFO(USER_MEMORY),
112 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
113 KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
114 KVM_CAP_LAST_INFO
117 static NotifierList kvm_irqchip_change_notifiers =
118 NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers);
120 struct KVMResampleFd {
121 int gsi;
122 EventNotifier *resample_event;
123 QLIST_ENTRY(KVMResampleFd) node;
125 typedef struct KVMResampleFd KVMResampleFd;
128 * Only used with split irqchip where we need to do the resample fd
129 * kick for the kernel from userspace.
131 static QLIST_HEAD(, KVMResampleFd) kvm_resample_fd_list =
132 QLIST_HEAD_INITIALIZER(kvm_resample_fd_list);
134 static QemuMutex kml_slots_lock;
136 #define kvm_slots_lock() qemu_mutex_lock(&kml_slots_lock)
137 #define kvm_slots_unlock() qemu_mutex_unlock(&kml_slots_lock)
139 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem);
141 static inline void kvm_resample_fd_remove(int gsi)
143 KVMResampleFd *rfd;
145 QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
146 if (rfd->gsi == gsi) {
147 QLIST_REMOVE(rfd, node);
148 g_free(rfd);
149 break;
154 static inline void kvm_resample_fd_insert(int gsi, EventNotifier *event)
156 KVMResampleFd *rfd = g_new0(KVMResampleFd, 1);
158 rfd->gsi = gsi;
159 rfd->resample_event = event;
161 QLIST_INSERT_HEAD(&kvm_resample_fd_list, rfd, node);
164 void kvm_resample_fd_notify(int gsi)
166 KVMResampleFd *rfd;
168 QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
169 if (rfd->gsi == gsi) {
170 event_notifier_set(rfd->resample_event);
171 trace_kvm_resample_fd_notify(gsi);
172 return;
177 int kvm_get_max_memslots(void)
179 KVMState *s = KVM_STATE(current_accel());
181 return s->nr_slots;
184 /* Called with KVMMemoryListener.slots_lock held */
185 static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
187 KVMState *s = kvm_state;
188 int i;
190 for (i = 0; i < s->nr_slots; i++) {
191 if (kml->slots[i].memory_size == 0) {
192 return &kml->slots[i];
196 return NULL;
199 bool kvm_has_free_slot(MachineState *ms)
201 KVMState *s = KVM_STATE(ms->accelerator);
202 bool result;
203 KVMMemoryListener *kml = &s->memory_listener;
205 kvm_slots_lock();
206 result = !!kvm_get_free_slot(kml);
207 kvm_slots_unlock();
209 return result;
212 /* Called with KVMMemoryListener.slots_lock held */
213 static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
215 KVMSlot *slot = kvm_get_free_slot(kml);
217 if (slot) {
218 return slot;
221 fprintf(stderr, "%s: no free slot available\n", __func__);
222 abort();
225 static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
226 hwaddr start_addr,
227 hwaddr size)
229 KVMState *s = kvm_state;
230 int i;
232 for (i = 0; i < s->nr_slots; i++) {
233 KVMSlot *mem = &kml->slots[i];
235 if (start_addr == mem->start_addr && size == mem->memory_size) {
236 return mem;
240 return NULL;
244 * Calculate and align the start address and the size of the section.
245 * Return the size. If the size is 0, the aligned section is empty.
247 static hwaddr kvm_align_section(MemoryRegionSection *section,
248 hwaddr *start)
250 hwaddr size = int128_get64(section->size);
251 hwaddr delta, aligned;
253 /* kvm works in page size chunks, but the function may be called
254 with sub-page size and unaligned start address. Pad the start
255 address to next and truncate size to previous page boundary. */
256 aligned = ROUND_UP(section->offset_within_address_space,
257 qemu_real_host_page_size());
258 delta = aligned - section->offset_within_address_space;
259 *start = aligned;
260 if (delta > size) {
261 return 0;
264 return (size - delta) & qemu_real_host_page_mask();
267 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
268 hwaddr *phys_addr)
270 KVMMemoryListener *kml = &s->memory_listener;
271 int i, ret = 0;
273 kvm_slots_lock();
274 for (i = 0; i < s->nr_slots; i++) {
275 KVMSlot *mem = &kml->slots[i];
277 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
278 *phys_addr = mem->start_addr + (ram - mem->ram);
279 ret = 1;
280 break;
283 kvm_slots_unlock();
285 return ret;
288 static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new)
290 KVMState *s = kvm_state;
291 struct kvm_userspace_memory_region mem;
292 int ret;
294 mem.slot = slot->slot | (kml->as_id << 16);
295 mem.guest_phys_addr = slot->start_addr;
296 mem.userspace_addr = (unsigned long)slot->ram;
297 mem.flags = slot->flags;
299 if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) {
300 /* Set the slot size to 0 before setting the slot to the desired
301 * value. This is needed based on KVM commit 75d61fbc. */
302 mem.memory_size = 0;
303 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
304 if (ret < 0) {
305 goto err;
308 mem.memory_size = slot->memory_size;
309 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
310 slot->old_flags = mem.flags;
311 err:
312 trace_kvm_set_user_memory(mem.slot, mem.flags, mem.guest_phys_addr,
313 mem.memory_size, mem.userspace_addr, ret);
314 if (ret < 0) {
315 error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d,"
316 " start=0x%" PRIx64 ", size=0x%" PRIx64 ": %s",
317 __func__, mem.slot, slot->start_addr,
318 (uint64_t)mem.memory_size, strerror(errno));
320 return ret;
323 static int do_kvm_destroy_vcpu(CPUState *cpu)
325 KVMState *s = kvm_state;
326 long mmap_size;
327 struct KVMParkedVcpu *vcpu = NULL;
328 int ret = 0;
330 DPRINTF("kvm_destroy_vcpu\n");
332 ret = kvm_arch_destroy_vcpu(cpu);
333 if (ret < 0) {
334 goto err;
337 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
338 if (mmap_size < 0) {
339 ret = mmap_size;
340 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
341 goto err;
344 ret = munmap(cpu->kvm_run, mmap_size);
345 if (ret < 0) {
346 goto err;
349 if (cpu->kvm_dirty_gfns) {
350 ret = munmap(cpu->kvm_dirty_gfns, s->kvm_dirty_ring_bytes);
351 if (ret < 0) {
352 goto err;
356 vcpu = g_malloc0(sizeof(*vcpu));
357 vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
358 vcpu->kvm_fd = cpu->kvm_fd;
359 QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
360 err:
361 return ret;
364 void kvm_destroy_vcpu(CPUState *cpu)
366 if (do_kvm_destroy_vcpu(cpu) < 0) {
367 error_report("kvm_destroy_vcpu failed");
368 exit(EXIT_FAILURE);
372 static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
374 struct KVMParkedVcpu *cpu;
376 QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
377 if (cpu->vcpu_id == vcpu_id) {
378 int kvm_fd;
380 QLIST_REMOVE(cpu, node);
381 kvm_fd = cpu->kvm_fd;
382 g_free(cpu);
383 return kvm_fd;
387 return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
390 int kvm_init_vcpu(CPUState *cpu, Error **errp)
392 KVMState *s = kvm_state;
393 long mmap_size;
394 int ret;
396 trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
398 ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu));
399 if (ret < 0) {
400 error_setg_errno(errp, -ret, "kvm_init_vcpu: kvm_get_vcpu failed (%lu)",
401 kvm_arch_vcpu_id(cpu));
402 goto err;
405 cpu->kvm_fd = ret;
406 cpu->kvm_state = s;
407 cpu->vcpu_dirty = true;
408 cpu->dirty_pages = 0;
409 cpu->throttle_us_per_full = 0;
411 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
412 if (mmap_size < 0) {
413 ret = mmap_size;
414 error_setg_errno(errp, -mmap_size,
415 "kvm_init_vcpu: KVM_GET_VCPU_MMAP_SIZE failed");
416 goto err;
419 cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
420 cpu->kvm_fd, 0);
421 if (cpu->kvm_run == MAP_FAILED) {
422 ret = -errno;
423 error_setg_errno(errp, ret,
424 "kvm_init_vcpu: mmap'ing vcpu state failed (%lu)",
425 kvm_arch_vcpu_id(cpu));
426 goto err;
429 if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
430 s->coalesced_mmio_ring =
431 (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
434 if (s->kvm_dirty_ring_size) {
435 /* Use MAP_SHARED to share pages with the kernel */
436 cpu->kvm_dirty_gfns = mmap(NULL, s->kvm_dirty_ring_bytes,
437 PROT_READ | PROT_WRITE, MAP_SHARED,
438 cpu->kvm_fd,
439 PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET);
440 if (cpu->kvm_dirty_gfns == MAP_FAILED) {
441 ret = -errno;
442 DPRINTF("mmap'ing vcpu dirty gfns failed: %d\n", ret);
443 goto err;
447 ret = kvm_arch_init_vcpu(cpu);
448 if (ret < 0) {
449 error_setg_errno(errp, -ret,
450 "kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)",
451 kvm_arch_vcpu_id(cpu));
453 cpu->kvm_vcpu_stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
455 err:
456 return ret;
460 * dirty pages logging control
463 static int kvm_mem_flags(MemoryRegion *mr)
465 bool readonly = mr->readonly || memory_region_is_romd(mr);
466 int flags = 0;
468 if (memory_region_get_dirty_log_mask(mr) != 0) {
469 flags |= KVM_MEM_LOG_DIRTY_PAGES;
471 if (readonly && kvm_readonly_mem_allowed) {
472 flags |= KVM_MEM_READONLY;
474 return flags;
477 /* Called with KVMMemoryListener.slots_lock held */
478 static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
479 MemoryRegion *mr)
481 mem->flags = kvm_mem_flags(mr);
483 /* If nothing changed effectively, no need to issue ioctl */
484 if (mem->flags == mem->old_flags) {
485 return 0;
488 kvm_slot_init_dirty_bitmap(mem);
489 return kvm_set_user_memory_region(kml, mem, false);
492 static int kvm_section_update_flags(KVMMemoryListener *kml,
493 MemoryRegionSection *section)
495 hwaddr start_addr, size, slot_size;
496 KVMSlot *mem;
497 int ret = 0;
499 size = kvm_align_section(section, &start_addr);
500 if (!size) {
501 return 0;
504 kvm_slots_lock();
506 while (size && !ret) {
507 slot_size = MIN(kvm_max_slot_size, size);
508 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
509 if (!mem) {
510 /* We don't have a slot if we want to trap every access. */
511 goto out;
514 ret = kvm_slot_update_flags(kml, mem, section->mr);
515 start_addr += slot_size;
516 size -= slot_size;
519 out:
520 kvm_slots_unlock();
521 return ret;
524 static void kvm_log_start(MemoryListener *listener,
525 MemoryRegionSection *section,
526 int old, int new)
528 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
529 int r;
531 if (old != 0) {
532 return;
535 r = kvm_section_update_flags(kml, section);
536 if (r < 0) {
537 abort();
541 static void kvm_log_stop(MemoryListener *listener,
542 MemoryRegionSection *section,
543 int old, int new)
545 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
546 int r;
548 if (new != 0) {
549 return;
552 r = kvm_section_update_flags(kml, section);
553 if (r < 0) {
554 abort();
558 /* get kvm's dirty pages bitmap and update qemu's */
559 static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
561 ram_addr_t start = slot->ram_start_offset;
562 ram_addr_t pages = slot->memory_size / qemu_real_host_page_size();
564 cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
567 static void kvm_slot_reset_dirty_pages(KVMSlot *slot)
569 memset(slot->dirty_bmap, 0, slot->dirty_bmap_size);
572 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
574 /* Allocate the dirty bitmap for a slot */
575 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem)
577 if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || mem->dirty_bmap) {
578 return;
582 * XXX bad kernel interface alert
583 * For dirty bitmap, kernel allocates array of size aligned to
584 * bits-per-long. But for case when the kernel is 64bits and
585 * the userspace is 32bits, userspace can't align to the same
586 * bits-per-long, since sizeof(long) is different between kernel
587 * and user space. This way, userspace will provide buffer which
588 * may be 4 bytes less than the kernel will use, resulting in
589 * userspace memory corruption (which is not detectable by valgrind
590 * too, in most cases).
591 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
592 * a hope that sizeof(long) won't become >8 any time soon.
594 * Note: the granule of kvm dirty log is qemu_real_host_page_size.
595 * And mem->memory_size is aligned to it (otherwise this mem can't
596 * be registered to KVM).
598 hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size(),
599 /*HOST_LONG_BITS*/ 64) / 8;
600 mem->dirty_bmap = g_malloc0(bitmap_size);
601 mem->dirty_bmap_size = bitmap_size;
605 * Sync dirty bitmap from kernel to KVMSlot.dirty_bmap, return true if
606 * succeeded, false otherwise
608 static bool kvm_slot_get_dirty_log(KVMState *s, KVMSlot *slot)
610 struct kvm_dirty_log d = {};
611 int ret;
613 d.dirty_bitmap = slot->dirty_bmap;
614 d.slot = slot->slot | (slot->as_id << 16);
615 ret = kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d);
617 if (ret == -ENOENT) {
618 /* kernel does not have dirty bitmap in this slot */
619 ret = 0;
621 if (ret) {
622 error_report_once("%s: KVM_GET_DIRTY_LOG failed with %d",
623 __func__, ret);
625 return ret == 0;
628 /* Should be with all slots_lock held for the address spaces. */
629 static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id,
630 uint32_t slot_id, uint64_t offset)
632 KVMMemoryListener *kml;
633 KVMSlot *mem;
635 if (as_id >= s->nr_as) {
636 return;
639 kml = s->as[as_id].ml;
640 mem = &kml->slots[slot_id];
642 if (!mem->memory_size || offset >=
643 (mem->memory_size / qemu_real_host_page_size())) {
644 return;
647 set_bit(offset, mem->dirty_bmap);
650 static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
653 * Read the flags before the value. Pairs with barrier in
654 * KVM's kvm_dirty_ring_push() function.
656 return qatomic_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY;
659 static void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
662 * Use a store-release so that the CPU that executes KVM_RESET_DIRTY_RINGS
663 * sees the full content of the ring:
665 * CPU0 CPU1 CPU2
666 * ------------------------------------------------------------------------------
667 * fill gfn0
668 * store-rel flags for gfn0
669 * load-acq flags for gfn0
670 * store-rel RESET for gfn0
671 * ioctl(RESET_RINGS)
672 * load-acq flags for gfn0
673 * check if flags have RESET
675 * The synchronization goes from CPU2 to CPU0 to CPU1.
677 qatomic_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET);
681 * Should be with all slots_lock held for the address spaces. It returns the
682 * dirty page we've collected on this dirty ring.
684 static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu)
686 struct kvm_dirty_gfn *dirty_gfns = cpu->kvm_dirty_gfns, *cur;
687 uint32_t ring_size = s->kvm_dirty_ring_size;
688 uint32_t count = 0, fetch = cpu->kvm_fetch_index;
691 * It's possible that we race with vcpu creation code where the vcpu is
692 * put onto the vcpus list but not yet initialized the dirty ring
693 * structures. If so, skip it.
695 if (!cpu->created) {
696 return 0;
699 assert(dirty_gfns && ring_size);
700 trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index);
702 while (true) {
703 cur = &dirty_gfns[fetch % ring_size];
704 if (!dirty_gfn_is_dirtied(cur)) {
705 break;
707 kvm_dirty_ring_mark_page(s, cur->slot >> 16, cur->slot & 0xffff,
708 cur->offset);
709 dirty_gfn_set_collected(cur);
710 trace_kvm_dirty_ring_page(cpu->cpu_index, fetch, cur->offset);
711 fetch++;
712 count++;
714 cpu->kvm_fetch_index = fetch;
715 cpu->dirty_pages += count;
717 return count;
720 /* Must be with slots_lock held */
721 static uint64_t kvm_dirty_ring_reap_locked(KVMState *s, CPUState* cpu)
723 int ret;
724 uint64_t total = 0;
725 int64_t stamp;
727 stamp = get_clock();
729 if (cpu) {
730 total = kvm_dirty_ring_reap_one(s, cpu);
731 } else {
732 CPU_FOREACH(cpu) {
733 total += kvm_dirty_ring_reap_one(s, cpu);
737 if (total) {
738 ret = kvm_vm_ioctl(s, KVM_RESET_DIRTY_RINGS);
739 assert(ret == total);
742 stamp = get_clock() - stamp;
744 if (total) {
745 trace_kvm_dirty_ring_reap(total, stamp / 1000);
748 return total;
752 * Currently for simplicity, we must hold BQL before calling this. We can
753 * consider to drop the BQL if we're clear with all the race conditions.
755 static uint64_t kvm_dirty_ring_reap(KVMState *s, CPUState *cpu)
757 uint64_t total;
760 * We need to lock all kvm slots for all address spaces here,
761 * because:
763 * (1) We need to mark dirty for dirty bitmaps in multiple slots
764 * and for tons of pages, so it's better to take the lock here
765 * once rather than once per page. And more importantly,
767 * (2) We must _NOT_ publish dirty bits to the other threads
768 * (e.g., the migration thread) via the kvm memory slot dirty
769 * bitmaps before correctly re-protect those dirtied pages.
770 * Otherwise we can have potential risk of data corruption if
771 * the page data is read in the other thread before we do
772 * reset below.
774 kvm_slots_lock();
775 total = kvm_dirty_ring_reap_locked(s, cpu);
776 kvm_slots_unlock();
778 return total;
781 static void do_kvm_cpu_synchronize_kick(CPUState *cpu, run_on_cpu_data arg)
783 /* No need to do anything */
787 * Kick all vcpus out in a synchronized way. When returned, we
788 * guarantee that every vcpu has been kicked and at least returned to
789 * userspace once.
791 static void kvm_cpu_synchronize_kick_all(void)
793 CPUState *cpu;
795 CPU_FOREACH(cpu) {
796 run_on_cpu(cpu, do_kvm_cpu_synchronize_kick, RUN_ON_CPU_NULL);
801 * Flush all the existing dirty pages to the KVM slot buffers. When
802 * this call returns, we guarantee that all the touched dirty pages
803 * before calling this function have been put into the per-kvmslot
804 * dirty bitmap.
806 * This function must be called with BQL held.
808 static void kvm_dirty_ring_flush(void)
810 trace_kvm_dirty_ring_flush(0);
812 * The function needs to be serialized. Since this function
813 * should always be with BQL held, serialization is guaranteed.
814 * However, let's be sure of it.
816 assert(qemu_mutex_iothread_locked());
818 * First make sure to flush the hardware buffers by kicking all
819 * vcpus out in a synchronous way.
821 kvm_cpu_synchronize_kick_all();
822 kvm_dirty_ring_reap(kvm_state, NULL);
823 trace_kvm_dirty_ring_flush(1);
827 * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
829 * This function will first try to fetch dirty bitmap from the kernel,
830 * and then updates qemu's dirty bitmap.
832 * NOTE: caller must be with kml->slots_lock held.
834 * @kml: the KVM memory listener object
835 * @section: the memory section to sync the dirty bitmap with
837 static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
838 MemoryRegionSection *section)
840 KVMState *s = kvm_state;
841 KVMSlot *mem;
842 hwaddr start_addr, size;
843 hwaddr slot_size;
845 size = kvm_align_section(section, &start_addr);
846 while (size) {
847 slot_size = MIN(kvm_max_slot_size, size);
848 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
849 if (!mem) {
850 /* We don't have a slot if we want to trap every access. */
851 return;
853 if (kvm_slot_get_dirty_log(s, mem)) {
854 kvm_slot_sync_dirty_pages(mem);
856 start_addr += slot_size;
857 size -= slot_size;
861 /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
862 #define KVM_CLEAR_LOG_SHIFT 6
863 #define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size() << KVM_CLEAR_LOG_SHIFT)
864 #define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
866 static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
867 uint64_t size)
869 KVMState *s = kvm_state;
870 uint64_t end, bmap_start, start_delta, bmap_npages;
871 struct kvm_clear_dirty_log d;
872 unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size();
873 int ret;
876 * We need to extend either the start or the size or both to
877 * satisfy the KVM interface requirement. Firstly, do the start
878 * page alignment on 64 host pages
880 bmap_start = start & KVM_CLEAR_LOG_MASK;
881 start_delta = start - bmap_start;
882 bmap_start /= psize;
885 * The kernel interface has restriction on the size too, that either:
887 * (1) the size is 64 host pages aligned (just like the start), or
888 * (2) the size fills up until the end of the KVM memslot.
890 bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN)
891 << KVM_CLEAR_LOG_SHIFT;
892 end = mem->memory_size / psize;
893 if (bmap_npages > end - bmap_start) {
894 bmap_npages = end - bmap_start;
896 start_delta /= psize;
899 * Prepare the bitmap to clear dirty bits. Here we must guarantee
900 * that we won't clear any unknown dirty bits otherwise we might
901 * accidentally clear some set bits which are not yet synced from
902 * the kernel into QEMU's bitmap, then we'll lose track of the
903 * guest modifications upon those pages (which can directly lead
904 * to guest data loss or panic after migration).
906 * Layout of the KVMSlot.dirty_bmap:
908 * |<-------- bmap_npages -----------..>|
909 * [1]
910 * start_delta size
911 * |----------------|-------------|------------------|------------|
912 * ^ ^ ^ ^
913 * | | | |
914 * start bmap_start (start) end
915 * of memslot of memslot
917 * [1] bmap_npages can be aligned to either 64 pages or the end of slot
920 assert(bmap_start % BITS_PER_LONG == 0);
921 /* We should never do log_clear before log_sync */
922 assert(mem->dirty_bmap);
923 if (start_delta || bmap_npages - size / psize) {
924 /* Slow path - we need to manipulate a temp bitmap */
925 bmap_clear = bitmap_new(bmap_npages);
926 bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap,
927 bmap_start, start_delta + size / psize);
929 * We need to fill the holes at start because that was not
930 * specified by the caller and we extended the bitmap only for
931 * 64 pages alignment
933 bitmap_clear(bmap_clear, 0, start_delta);
934 d.dirty_bitmap = bmap_clear;
935 } else {
937 * Fast path - both start and size align well with BITS_PER_LONG
938 * (or the end of memory slot)
940 d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start);
943 d.first_page = bmap_start;
944 /* It should never overflow. If it happens, say something */
945 assert(bmap_npages <= UINT32_MAX);
946 d.num_pages = bmap_npages;
947 d.slot = mem->slot | (as_id << 16);
949 ret = kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d);
950 if (ret < 0 && ret != -ENOENT) {
951 error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
952 "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d",
953 __func__, d.slot, (uint64_t)d.first_page,
954 (uint32_t)d.num_pages, ret);
955 } else {
956 ret = 0;
957 trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages);
961 * After we have updated the remote dirty bitmap, we update the
962 * cached bitmap as well for the memslot, then if another user
963 * clears the same region we know we shouldn't clear it again on
964 * the remote otherwise it's data loss as well.
966 bitmap_clear(mem->dirty_bmap, bmap_start + start_delta,
967 size / psize);
968 /* This handles the NULL case well */
969 g_free(bmap_clear);
970 return ret;
975 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
977 * NOTE: this will be a no-op if we haven't enabled manual dirty log
978 * protection in the host kernel because in that case this operation
979 * will be done within log_sync().
981 * @kml: the kvm memory listener
982 * @section: the memory range to clear dirty bitmap
984 static int kvm_physical_log_clear(KVMMemoryListener *kml,
985 MemoryRegionSection *section)
987 KVMState *s = kvm_state;
988 uint64_t start, size, offset, count;
989 KVMSlot *mem;
990 int ret = 0, i;
992 if (!s->manual_dirty_log_protect) {
993 /* No need to do explicit clear */
994 return ret;
997 start = section->offset_within_address_space;
998 size = int128_get64(section->size);
1000 if (!size) {
1001 /* Nothing more we can do... */
1002 return ret;
1005 kvm_slots_lock();
1007 for (i = 0; i < s->nr_slots; i++) {
1008 mem = &kml->slots[i];
1009 /* Discard slots that are empty or do not overlap the section */
1010 if (!mem->memory_size ||
1011 mem->start_addr > start + size - 1 ||
1012 start > mem->start_addr + mem->memory_size - 1) {
1013 continue;
1016 if (start >= mem->start_addr) {
1017 /* The slot starts before section or is aligned to it. */
1018 offset = start - mem->start_addr;
1019 count = MIN(mem->memory_size - offset, size);
1020 } else {
1021 /* The slot starts after section. */
1022 offset = 0;
1023 count = MIN(mem->memory_size, size - (mem->start_addr - start));
1025 ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count);
1026 if (ret < 0) {
1027 break;
1031 kvm_slots_unlock();
1033 return ret;
1036 static void kvm_coalesce_mmio_region(MemoryListener *listener,
1037 MemoryRegionSection *secion,
1038 hwaddr start, hwaddr size)
1040 KVMState *s = kvm_state;
1042 if (s->coalesced_mmio) {
1043 struct kvm_coalesced_mmio_zone zone;
1045 zone.addr = start;
1046 zone.size = size;
1047 zone.pad = 0;
1049 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
1053 static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
1054 MemoryRegionSection *secion,
1055 hwaddr start, hwaddr size)
1057 KVMState *s = kvm_state;
1059 if (s->coalesced_mmio) {
1060 struct kvm_coalesced_mmio_zone zone;
1062 zone.addr = start;
1063 zone.size = size;
1064 zone.pad = 0;
1066 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
1070 static void kvm_coalesce_pio_add(MemoryListener *listener,
1071 MemoryRegionSection *section,
1072 hwaddr start, hwaddr size)
1074 KVMState *s = kvm_state;
1076 if (s->coalesced_pio) {
1077 struct kvm_coalesced_mmio_zone zone;
1079 zone.addr = start;
1080 zone.size = size;
1081 zone.pio = 1;
1083 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
1087 static void kvm_coalesce_pio_del(MemoryListener *listener,
1088 MemoryRegionSection *section,
1089 hwaddr start, hwaddr size)
1091 KVMState *s = kvm_state;
1093 if (s->coalesced_pio) {
1094 struct kvm_coalesced_mmio_zone zone;
1096 zone.addr = start;
1097 zone.size = size;
1098 zone.pio = 1;
1100 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
1104 static MemoryListener kvm_coalesced_pio_listener = {
1105 .name = "kvm-coalesced-pio",
1106 .coalesced_io_add = kvm_coalesce_pio_add,
1107 .coalesced_io_del = kvm_coalesce_pio_del,
1108 .priority = MEMORY_LISTENER_PRIORITY_MIN,
1111 int kvm_check_extension(KVMState *s, unsigned int extension)
1113 int ret;
1115 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
1116 if (ret < 0) {
1117 ret = 0;
1120 return ret;
1123 int kvm_vm_check_extension(KVMState *s, unsigned int extension)
1125 int ret;
1127 ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
1128 if (ret < 0) {
1129 /* VM wide version not implemented, use global one instead */
1130 ret = kvm_check_extension(s, extension);
1133 return ret;
1136 typedef struct HWPoisonPage {
1137 ram_addr_t ram_addr;
1138 QLIST_ENTRY(HWPoisonPage) list;
1139 } HWPoisonPage;
1141 static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
1142 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
1144 static void kvm_unpoison_all(void *param)
1146 HWPoisonPage *page, *next_page;
1148 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
1149 QLIST_REMOVE(page, list);
1150 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
1151 g_free(page);
1155 void kvm_hwpoison_page_add(ram_addr_t ram_addr)
1157 HWPoisonPage *page;
1159 QLIST_FOREACH(page, &hwpoison_page_list, list) {
1160 if (page->ram_addr == ram_addr) {
1161 return;
1164 page = g_new(HWPoisonPage, 1);
1165 page->ram_addr = ram_addr;
1166 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
1169 static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
1171 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
1172 /* The kernel expects ioeventfd values in HOST_BIG_ENDIAN
1173 * endianness, but the memory core hands them in target endianness.
1174 * For example, PPC is always treated as big-endian even if running
1175 * on KVM and on PPC64LE. Correct here.
1177 switch (size) {
1178 case 2:
1179 val = bswap16(val);
1180 break;
1181 case 4:
1182 val = bswap32(val);
1183 break;
1185 #endif
1186 return val;
1189 static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
1190 bool assign, uint32_t size, bool datamatch)
1192 int ret;
1193 struct kvm_ioeventfd iofd = {
1194 .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1195 .addr = addr,
1196 .len = size,
1197 .flags = 0,
1198 .fd = fd,
1201 trace_kvm_set_ioeventfd_mmio(fd, (uint64_t)addr, val, assign, size,
1202 datamatch);
1203 if (!kvm_enabled()) {
1204 return -ENOSYS;
1207 if (datamatch) {
1208 iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1210 if (!assign) {
1211 iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1214 ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
1216 if (ret < 0) {
1217 return -errno;
1220 return 0;
1223 static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
1224 bool assign, uint32_t size, bool datamatch)
1226 struct kvm_ioeventfd kick = {
1227 .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1228 .addr = addr,
1229 .flags = KVM_IOEVENTFD_FLAG_PIO,
1230 .len = size,
1231 .fd = fd,
1233 int r;
1234 trace_kvm_set_ioeventfd_pio(fd, addr, val, assign, size, datamatch);
1235 if (!kvm_enabled()) {
1236 return -ENOSYS;
1238 if (datamatch) {
1239 kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1241 if (!assign) {
1242 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1244 r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
1245 if (r < 0) {
1246 return r;
1248 return 0;
1252 static int kvm_check_many_ioeventfds(void)
1254 /* Userspace can use ioeventfd for io notification. This requires a host
1255 * that supports eventfd(2) and an I/O thread; since eventfd does not
1256 * support SIGIO it cannot interrupt the vcpu.
1258 * Older kernels have a 6 device limit on the KVM io bus. Find out so we
1259 * can avoid creating too many ioeventfds.
1261 #if defined(CONFIG_EVENTFD)
1262 int ioeventfds[7];
1263 int i, ret = 0;
1264 for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
1265 ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
1266 if (ioeventfds[i] < 0) {
1267 break;
1269 ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true);
1270 if (ret < 0) {
1271 close(ioeventfds[i]);
1272 break;
1276 /* Decide whether many devices are supported or not */
1277 ret = i == ARRAY_SIZE(ioeventfds);
1279 while (i-- > 0) {
1280 kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true);
1281 close(ioeventfds[i]);
1283 return ret;
1284 #else
1285 return 0;
1286 #endif
1289 static const KVMCapabilityInfo *
1290 kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
1292 while (list->name) {
1293 if (!kvm_check_extension(s, list->value)) {
1294 return list;
1296 list++;
1298 return NULL;
1301 void kvm_set_max_memslot_size(hwaddr max_slot_size)
1303 g_assert(
1304 ROUND_UP(max_slot_size, qemu_real_host_page_size()) == max_slot_size
1306 kvm_max_slot_size = max_slot_size;
1309 /* Called with KVMMemoryListener.slots_lock held */
1310 static void kvm_set_phys_mem(KVMMemoryListener *kml,
1311 MemoryRegionSection *section, bool add)
1313 KVMSlot *mem;
1314 int err;
1315 MemoryRegion *mr = section->mr;
1316 bool writable = !mr->readonly && !mr->rom_device;
1317 hwaddr start_addr, size, slot_size, mr_offset;
1318 ram_addr_t ram_start_offset;
1319 void *ram;
1321 if (!memory_region_is_ram(mr)) {
1322 if (writable || !kvm_readonly_mem_allowed) {
1323 return;
1324 } else if (!mr->romd_mode) {
1325 /* If the memory device is not in romd_mode, then we actually want
1326 * to remove the kvm memory slot so all accesses will trap. */
1327 add = false;
1331 size = kvm_align_section(section, &start_addr);
1332 if (!size) {
1333 return;
1336 /* The offset of the kvmslot within the memory region */
1337 mr_offset = section->offset_within_region + start_addr -
1338 section->offset_within_address_space;
1340 /* use aligned delta to align the ram address and offset */
1341 ram = memory_region_get_ram_ptr(mr) + mr_offset;
1342 ram_start_offset = memory_region_get_ram_addr(mr) + mr_offset;
1344 if (!add) {
1345 do {
1346 slot_size = MIN(kvm_max_slot_size, size);
1347 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
1348 if (!mem) {
1349 return;
1351 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1353 * NOTE: We should be aware of the fact that here we're only
1354 * doing a best effort to sync dirty bits. No matter whether
1355 * we're using dirty log or dirty ring, we ignored two facts:
1357 * (1) dirty bits can reside in hardware buffers (PML)
1359 * (2) after we collected dirty bits here, pages can be dirtied
1360 * again before we do the final KVM_SET_USER_MEMORY_REGION to
1361 * remove the slot.
1363 * Not easy. Let's cross the fingers until it's fixed.
1365 if (kvm_state->kvm_dirty_ring_size) {
1366 kvm_dirty_ring_reap_locked(kvm_state, NULL);
1367 if (kvm_state->kvm_dirty_ring_with_bitmap) {
1368 kvm_slot_sync_dirty_pages(mem);
1369 kvm_slot_get_dirty_log(kvm_state, mem);
1371 } else {
1372 kvm_slot_get_dirty_log(kvm_state, mem);
1374 kvm_slot_sync_dirty_pages(mem);
1377 /* unregister the slot */
1378 g_free(mem->dirty_bmap);
1379 mem->dirty_bmap = NULL;
1380 mem->memory_size = 0;
1381 mem->flags = 0;
1382 err = kvm_set_user_memory_region(kml, mem, false);
1383 if (err) {
1384 fprintf(stderr, "%s: error unregistering slot: %s\n",
1385 __func__, strerror(-err));
1386 abort();
1388 start_addr += slot_size;
1389 size -= slot_size;
1390 } while (size);
1391 return;
1394 /* register the new slot */
1395 do {
1396 slot_size = MIN(kvm_max_slot_size, size);
1397 mem = kvm_alloc_slot(kml);
1398 mem->as_id = kml->as_id;
1399 mem->memory_size = slot_size;
1400 mem->start_addr = start_addr;
1401 mem->ram_start_offset = ram_start_offset;
1402 mem->ram = ram;
1403 mem->flags = kvm_mem_flags(mr);
1404 kvm_slot_init_dirty_bitmap(mem);
1405 err = kvm_set_user_memory_region(kml, mem, true);
1406 if (err) {
1407 fprintf(stderr, "%s: error registering slot: %s\n", __func__,
1408 strerror(-err));
1409 abort();
1411 start_addr += slot_size;
1412 ram_start_offset += slot_size;
1413 ram += slot_size;
1414 size -= slot_size;
1415 } while (size);
1418 static void *kvm_dirty_ring_reaper_thread(void *data)
1420 KVMState *s = data;
1421 struct KVMDirtyRingReaper *r = &s->reaper;
1423 rcu_register_thread();
1425 trace_kvm_dirty_ring_reaper("init");
1427 while (true) {
1428 r->reaper_state = KVM_DIRTY_RING_REAPER_WAIT;
1429 trace_kvm_dirty_ring_reaper("wait");
1431 * TODO: provide a smarter timeout rather than a constant?
1433 sleep(1);
1435 /* keep sleeping so that dirtylimit not be interfered by reaper */
1436 if (dirtylimit_in_service()) {
1437 continue;
1440 trace_kvm_dirty_ring_reaper("wakeup");
1441 r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;
1443 qemu_mutex_lock_iothread();
1444 kvm_dirty_ring_reap(s, NULL);
1445 qemu_mutex_unlock_iothread();
1447 r->reaper_iteration++;
1450 trace_kvm_dirty_ring_reaper("exit");
1452 rcu_unregister_thread();
1454 return NULL;
1457 static int kvm_dirty_ring_reaper_init(KVMState *s)
1459 struct KVMDirtyRingReaper *r = &s->reaper;
1461 qemu_thread_create(&r->reaper_thr, "kvm-reaper",
1462 kvm_dirty_ring_reaper_thread,
1463 s, QEMU_THREAD_JOINABLE);
1465 return 0;
1468 static int kvm_dirty_ring_init(KVMState *s)
1470 uint32_t ring_size = s->kvm_dirty_ring_size;
1471 uint64_t ring_bytes = ring_size * sizeof(struct kvm_dirty_gfn);
1472 unsigned int capability = KVM_CAP_DIRTY_LOG_RING;
1473 int ret;
1475 s->kvm_dirty_ring_size = 0;
1476 s->kvm_dirty_ring_bytes = 0;
1478 /* Bail if the dirty ring size isn't specified */
1479 if (!ring_size) {
1480 return 0;
1484 * Read the max supported pages. Fall back to dirty logging mode
1485 * if the dirty ring isn't supported.
1487 ret = kvm_vm_check_extension(s, capability);
1488 if (ret <= 0) {
1489 capability = KVM_CAP_DIRTY_LOG_RING_ACQ_REL;
1490 ret = kvm_vm_check_extension(s, capability);
1493 if (ret <= 0) {
1494 warn_report("KVM dirty ring not available, using bitmap method");
1495 return 0;
1498 if (ring_bytes > ret) {
1499 error_report("KVM dirty ring size %" PRIu32 " too big "
1500 "(maximum is %ld). Please use a smaller value.",
1501 ring_size, (long)ret / sizeof(struct kvm_dirty_gfn));
1502 return -EINVAL;
1505 ret = kvm_vm_enable_cap(s, capability, 0, ring_bytes);
1506 if (ret) {
1507 error_report("Enabling of KVM dirty ring failed: %s. "
1508 "Suggested minimum value is 1024.", strerror(-ret));
1509 return -EIO;
1512 /* Enable the backup bitmap if it is supported */
1513 ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP);
1514 if (ret > 0) {
1515 ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP, 0);
1516 if (ret) {
1517 error_report("Enabling of KVM dirty ring's backup bitmap failed: "
1518 "%s. ", strerror(-ret));
1519 return -EIO;
1522 s->kvm_dirty_ring_with_bitmap = true;
1525 s->kvm_dirty_ring_size = ring_size;
1526 s->kvm_dirty_ring_bytes = ring_bytes;
1528 return 0;
1531 static void kvm_region_add(MemoryListener *listener,
1532 MemoryRegionSection *section)
1534 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1535 KVMMemoryUpdate *update;
1537 update = g_new0(KVMMemoryUpdate, 1);
1538 update->section = *section;
1540 QSIMPLEQ_INSERT_TAIL(&kml->transaction_add, update, next);
1543 static void kvm_region_del(MemoryListener *listener,
1544 MemoryRegionSection *section)
1546 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1547 KVMMemoryUpdate *update;
1549 update = g_new0(KVMMemoryUpdate, 1);
1550 update->section = *section;
1552 QSIMPLEQ_INSERT_TAIL(&kml->transaction_del, update, next);
1555 static void kvm_region_commit(MemoryListener *listener)
1557 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener,
1558 listener);
1559 KVMMemoryUpdate *u1, *u2;
1560 bool need_inhibit = false;
1562 if (QSIMPLEQ_EMPTY(&kml->transaction_add) &&
1563 QSIMPLEQ_EMPTY(&kml->transaction_del)) {
1564 return;
1568 * We have to be careful when regions to add overlap with ranges to remove.
1569 * We have to simulate atomic KVM memslot updates by making sure no ioctl()
1570 * is currently active.
1572 * The lists are order by addresses, so it's easy to find overlaps.
1574 u1 = QSIMPLEQ_FIRST(&kml->transaction_del);
1575 u2 = QSIMPLEQ_FIRST(&kml->transaction_add);
1576 while (u1 && u2) {
1577 Range r1, r2;
1579 range_init_nofail(&r1, u1->section.offset_within_address_space,
1580 int128_get64(u1->section.size));
1581 range_init_nofail(&r2, u2->section.offset_within_address_space,
1582 int128_get64(u2->section.size));
1584 if (range_overlaps_range(&r1, &r2)) {
1585 need_inhibit = true;
1586 break;
1588 if (range_lob(&r1) < range_lob(&r2)) {
1589 u1 = QSIMPLEQ_NEXT(u1, next);
1590 } else {
1591 u2 = QSIMPLEQ_NEXT(u2, next);
1595 kvm_slots_lock();
1596 if (need_inhibit) {
1597 accel_ioctl_inhibit_begin();
1600 /* Remove all memslots before adding the new ones. */
1601 while (!QSIMPLEQ_EMPTY(&kml->transaction_del)) {
1602 u1 = QSIMPLEQ_FIRST(&kml->transaction_del);
1603 QSIMPLEQ_REMOVE_HEAD(&kml->transaction_del, next);
1605 kvm_set_phys_mem(kml, &u1->section, false);
1606 memory_region_unref(u1->section.mr);
1608 g_free(u1);
1610 while (!QSIMPLEQ_EMPTY(&kml->transaction_add)) {
1611 u1 = QSIMPLEQ_FIRST(&kml->transaction_add);
1612 QSIMPLEQ_REMOVE_HEAD(&kml->transaction_add, next);
1614 memory_region_ref(u1->section.mr);
1615 kvm_set_phys_mem(kml, &u1->section, true);
1617 g_free(u1);
1620 if (need_inhibit) {
1621 accel_ioctl_inhibit_end();
1623 kvm_slots_unlock();
1626 static void kvm_log_sync(MemoryListener *listener,
1627 MemoryRegionSection *section)
1629 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1631 kvm_slots_lock();
1632 kvm_physical_sync_dirty_bitmap(kml, section);
1633 kvm_slots_unlock();
1636 static void kvm_log_sync_global(MemoryListener *l, bool last_stage)
1638 KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener);
1639 KVMState *s = kvm_state;
1640 KVMSlot *mem;
1641 int i;
1643 /* Flush all kernel dirty addresses into KVMSlot dirty bitmap */
1644 kvm_dirty_ring_flush();
1647 * TODO: make this faster when nr_slots is big while there are
1648 * only a few used slots (small VMs).
1650 kvm_slots_lock();
1651 for (i = 0; i < s->nr_slots; i++) {
1652 mem = &kml->slots[i];
1653 if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1654 kvm_slot_sync_dirty_pages(mem);
1656 if (s->kvm_dirty_ring_with_bitmap && last_stage &&
1657 kvm_slot_get_dirty_log(s, mem)) {
1658 kvm_slot_sync_dirty_pages(mem);
1662 * This is not needed by KVM_GET_DIRTY_LOG because the
1663 * ioctl will unconditionally overwrite the whole region.
1664 * However kvm dirty ring has no such side effect.
1666 kvm_slot_reset_dirty_pages(mem);
1669 kvm_slots_unlock();
1672 static void kvm_log_clear(MemoryListener *listener,
1673 MemoryRegionSection *section)
1675 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1676 int r;
1678 r = kvm_physical_log_clear(kml, section);
1679 if (r < 0) {
1680 error_report_once("%s: kvm log clear failed: mr=%s "
1681 "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__,
1682 section->mr->name, section->offset_within_region,
1683 int128_get64(section->size));
1684 abort();
1688 static void kvm_mem_ioeventfd_add(MemoryListener *listener,
1689 MemoryRegionSection *section,
1690 bool match_data, uint64_t data,
1691 EventNotifier *e)
1693 int fd = event_notifier_get_fd(e);
1694 int r;
1696 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1697 data, true, int128_get64(section->size),
1698 match_data);
1699 if (r < 0) {
1700 fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1701 __func__, strerror(-r), -r);
1702 abort();
1706 static void kvm_mem_ioeventfd_del(MemoryListener *listener,
1707 MemoryRegionSection *section,
1708 bool match_data, uint64_t data,
1709 EventNotifier *e)
1711 int fd = event_notifier_get_fd(e);
1712 int r;
1714 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1715 data, false, int128_get64(section->size),
1716 match_data);
1717 if (r < 0) {
1718 fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1719 __func__, strerror(-r), -r);
1720 abort();
1724 static void kvm_io_ioeventfd_add(MemoryListener *listener,
1725 MemoryRegionSection *section,
1726 bool match_data, uint64_t data,
1727 EventNotifier *e)
1729 int fd = event_notifier_get_fd(e);
1730 int r;
1732 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1733 data, true, int128_get64(section->size),
1734 match_data);
1735 if (r < 0) {
1736 fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1737 __func__, strerror(-r), -r);
1738 abort();
1742 static void kvm_io_ioeventfd_del(MemoryListener *listener,
1743 MemoryRegionSection *section,
1744 bool match_data, uint64_t data,
1745 EventNotifier *e)
1748 int fd = event_notifier_get_fd(e);
1749 int r;
1751 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1752 data, false, int128_get64(section->size),
1753 match_data);
1754 if (r < 0) {
1755 fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1756 __func__, strerror(-r), -r);
1757 abort();
1761 void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
1762 AddressSpace *as, int as_id, const char *name)
1764 int i;
1766 kml->slots = g_new0(KVMSlot, s->nr_slots);
1767 kml->as_id = as_id;
1769 for (i = 0; i < s->nr_slots; i++) {
1770 kml->slots[i].slot = i;
1773 QSIMPLEQ_INIT(&kml->transaction_add);
1774 QSIMPLEQ_INIT(&kml->transaction_del);
1776 kml->listener.region_add = kvm_region_add;
1777 kml->listener.region_del = kvm_region_del;
1778 kml->listener.commit = kvm_region_commit;
1779 kml->listener.log_start = kvm_log_start;
1780 kml->listener.log_stop = kvm_log_stop;
1781 kml->listener.priority = MEMORY_LISTENER_PRIORITY_ACCEL;
1782 kml->listener.name = name;
1784 if (s->kvm_dirty_ring_size) {
1785 kml->listener.log_sync_global = kvm_log_sync_global;
1786 } else {
1787 kml->listener.log_sync = kvm_log_sync;
1788 kml->listener.log_clear = kvm_log_clear;
1791 memory_listener_register(&kml->listener, as);
1793 for (i = 0; i < s->nr_as; ++i) {
1794 if (!s->as[i].as) {
1795 s->as[i].as = as;
1796 s->as[i].ml = kml;
1797 break;
1802 static MemoryListener kvm_io_listener = {
1803 .name = "kvm-io",
1804 .eventfd_add = kvm_io_ioeventfd_add,
1805 .eventfd_del = kvm_io_ioeventfd_del,
1806 .priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND,
1809 int kvm_set_irq(KVMState *s, int irq, int level)
1811 struct kvm_irq_level event;
1812 int ret;
1814 assert(kvm_async_interrupts_enabled());
1816 event.level = level;
1817 event.irq = irq;
1818 ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
1819 if (ret < 0) {
1820 perror("kvm_set_irq");
1821 abort();
1824 return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
1827 #ifdef KVM_CAP_IRQ_ROUTING
1828 typedef struct KVMMSIRoute {
1829 struct kvm_irq_routing_entry kroute;
1830 QTAILQ_ENTRY(KVMMSIRoute) entry;
1831 } KVMMSIRoute;
1833 static void set_gsi(KVMState *s, unsigned int gsi)
1835 set_bit(gsi, s->used_gsi_bitmap);
1838 static void clear_gsi(KVMState *s, unsigned int gsi)
1840 clear_bit(gsi, s->used_gsi_bitmap);
1843 void kvm_init_irq_routing(KVMState *s)
1845 int gsi_count, i;
1847 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
1848 if (gsi_count > 0) {
1849 /* Round up so we can search ints using ffs */
1850 s->used_gsi_bitmap = bitmap_new(gsi_count);
1851 s->gsi_count = gsi_count;
1854 s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
1855 s->nr_allocated_irq_routes = 0;
1857 if (!kvm_direct_msi_allowed) {
1858 for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
1859 QTAILQ_INIT(&s->msi_hashtab[i]);
1863 kvm_arch_init_irq_routing(s);
1866 void kvm_irqchip_commit_routes(KVMState *s)
1868 int ret;
1870 if (kvm_gsi_direct_mapping()) {
1871 return;
1874 if (!kvm_gsi_routing_enabled()) {
1875 return;
1878 s->irq_routes->flags = 0;
1879 trace_kvm_irqchip_commit_routes();
1880 ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
1881 assert(ret == 0);
1884 static void kvm_add_routing_entry(KVMState *s,
1885 struct kvm_irq_routing_entry *entry)
1887 struct kvm_irq_routing_entry *new;
1888 int n, size;
1890 if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
1891 n = s->nr_allocated_irq_routes * 2;
1892 if (n < 64) {
1893 n = 64;
1895 size = sizeof(struct kvm_irq_routing);
1896 size += n * sizeof(*new);
1897 s->irq_routes = g_realloc(s->irq_routes, size);
1898 s->nr_allocated_irq_routes = n;
1900 n = s->irq_routes->nr++;
1901 new = &s->irq_routes->entries[n];
1903 *new = *entry;
1905 set_gsi(s, entry->gsi);
1908 static int kvm_update_routing_entry(KVMState *s,
1909 struct kvm_irq_routing_entry *new_entry)
1911 struct kvm_irq_routing_entry *entry;
1912 int n;
1914 for (n = 0; n < s->irq_routes->nr; n++) {
1915 entry = &s->irq_routes->entries[n];
1916 if (entry->gsi != new_entry->gsi) {
1917 continue;
1920 if(!memcmp(entry, new_entry, sizeof *entry)) {
1921 return 0;
1924 *entry = *new_entry;
1926 return 0;
1929 return -ESRCH;
1932 void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
1934 struct kvm_irq_routing_entry e = {};
1936 assert(pin < s->gsi_count);
1938 e.gsi = irq;
1939 e.type = KVM_IRQ_ROUTING_IRQCHIP;
1940 e.flags = 0;
1941 e.u.irqchip.irqchip = irqchip;
1942 e.u.irqchip.pin = pin;
1943 kvm_add_routing_entry(s, &e);
1946 void kvm_irqchip_release_virq(KVMState *s, int virq)
1948 struct kvm_irq_routing_entry *e;
1949 int i;
1951 if (kvm_gsi_direct_mapping()) {
1952 return;
1955 for (i = 0; i < s->irq_routes->nr; i++) {
1956 e = &s->irq_routes->entries[i];
1957 if (e->gsi == virq) {
1958 s->irq_routes->nr--;
1959 *e = s->irq_routes->entries[s->irq_routes->nr];
1962 clear_gsi(s, virq);
1963 kvm_arch_release_virq_post(virq);
1964 trace_kvm_irqchip_release_virq(virq);
1967 void kvm_irqchip_add_change_notifier(Notifier *n)
1969 notifier_list_add(&kvm_irqchip_change_notifiers, n);
1972 void kvm_irqchip_remove_change_notifier(Notifier *n)
1974 notifier_remove(n);
1977 void kvm_irqchip_change_notify(void)
1979 notifier_list_notify(&kvm_irqchip_change_notifiers, NULL);
1982 static unsigned int kvm_hash_msi(uint32_t data)
1984 /* This is optimized for IA32 MSI layout. However, no other arch shall
1985 * repeat the mistake of not providing a direct MSI injection API. */
1986 return data & 0xff;
1989 static void kvm_flush_dynamic_msi_routes(KVMState *s)
1991 KVMMSIRoute *route, *next;
1992 unsigned int hash;
1994 for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
1995 QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
1996 kvm_irqchip_release_virq(s, route->kroute.gsi);
1997 QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
1998 g_free(route);
2003 static int kvm_irqchip_get_virq(KVMState *s)
2005 int next_virq;
2008 * PIC and IOAPIC share the first 16 GSI numbers, thus the available
2009 * GSI numbers are more than the number of IRQ route. Allocating a GSI
2010 * number can succeed even though a new route entry cannot be added.
2011 * When this happens, flush dynamic MSI entries to free IRQ route entries.
2013 if (!kvm_direct_msi_allowed && s->irq_routes->nr == s->gsi_count) {
2014 kvm_flush_dynamic_msi_routes(s);
2017 /* Return the lowest unused GSI in the bitmap */
2018 next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
2019 if (next_virq >= s->gsi_count) {
2020 return -ENOSPC;
2021 } else {
2022 return next_virq;
2026 static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
2028 unsigned int hash = kvm_hash_msi(msg.data);
2029 KVMMSIRoute *route;
2031 QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
2032 if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
2033 route->kroute.u.msi.address_hi == (msg.address >> 32) &&
2034 route->kroute.u.msi.data == le32_to_cpu(msg.data)) {
2035 return route;
2038 return NULL;
2041 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
2043 struct kvm_msi msi;
2044 KVMMSIRoute *route;
2046 if (kvm_direct_msi_allowed) {
2047 msi.address_lo = (uint32_t)msg.address;
2048 msi.address_hi = msg.address >> 32;
2049 msi.data = le32_to_cpu(msg.data);
2050 msi.flags = 0;
2051 memset(msi.pad, 0, sizeof(msi.pad));
2053 return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
2056 route = kvm_lookup_msi_route(s, msg);
2057 if (!route) {
2058 int virq;
2060 virq = kvm_irqchip_get_virq(s);
2061 if (virq < 0) {
2062 return virq;
2065 route = g_new0(KVMMSIRoute, 1);
2066 route->kroute.gsi = virq;
2067 route->kroute.type = KVM_IRQ_ROUTING_MSI;
2068 route->kroute.flags = 0;
2069 route->kroute.u.msi.address_lo = (uint32_t)msg.address;
2070 route->kroute.u.msi.address_hi = msg.address >> 32;
2071 route->kroute.u.msi.data = le32_to_cpu(msg.data);
2073 kvm_add_routing_entry(s, &route->kroute);
2074 kvm_irqchip_commit_routes(s);
2076 QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
2077 entry);
2080 assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
2082 return kvm_set_irq(s, route->kroute.gsi, 1);
2085 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
2087 struct kvm_irq_routing_entry kroute = {};
2088 int virq;
2089 KVMState *s = c->s;
2090 MSIMessage msg = {0, 0};
2092 if (pci_available && dev) {
2093 msg = pci_get_msi_message(dev, vector);
2096 if (kvm_gsi_direct_mapping()) {
2097 return kvm_arch_msi_data_to_gsi(msg.data);
2100 if (!kvm_gsi_routing_enabled()) {
2101 return -ENOSYS;
2104 virq = kvm_irqchip_get_virq(s);
2105 if (virq < 0) {
2106 return virq;
2109 kroute.gsi = virq;
2110 kroute.type = KVM_IRQ_ROUTING_MSI;
2111 kroute.flags = 0;
2112 kroute.u.msi.address_lo = (uint32_t)msg.address;
2113 kroute.u.msi.address_hi = msg.address >> 32;
2114 kroute.u.msi.data = le32_to_cpu(msg.data);
2115 if (pci_available && kvm_msi_devid_required()) {
2116 kroute.flags = KVM_MSI_VALID_DEVID;
2117 kroute.u.msi.devid = pci_requester_id(dev);
2119 if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
2120 kvm_irqchip_release_virq(s, virq);
2121 return -EINVAL;
2124 trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A",
2125 vector, virq);
2127 kvm_add_routing_entry(s, &kroute);
2128 kvm_arch_add_msi_route_post(&kroute, vector, dev);
2129 c->changes++;
2131 return virq;
2134 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
2135 PCIDevice *dev)
2137 struct kvm_irq_routing_entry kroute = {};
2139 if (kvm_gsi_direct_mapping()) {
2140 return 0;
2143 if (!kvm_irqchip_in_kernel()) {
2144 return -ENOSYS;
2147 kroute.gsi = virq;
2148 kroute.type = KVM_IRQ_ROUTING_MSI;
2149 kroute.flags = 0;
2150 kroute.u.msi.address_lo = (uint32_t)msg.address;
2151 kroute.u.msi.address_hi = msg.address >> 32;
2152 kroute.u.msi.data = le32_to_cpu(msg.data);
2153 if (pci_available && kvm_msi_devid_required()) {
2154 kroute.flags = KVM_MSI_VALID_DEVID;
2155 kroute.u.msi.devid = pci_requester_id(dev);
2157 if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
2158 return -EINVAL;
2161 trace_kvm_irqchip_update_msi_route(virq);
2163 return kvm_update_routing_entry(s, &kroute);
2166 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
2167 EventNotifier *resample, int virq,
2168 bool assign)
2170 int fd = event_notifier_get_fd(event);
2171 int rfd = resample ? event_notifier_get_fd(resample) : -1;
2173 struct kvm_irqfd irqfd = {
2174 .fd = fd,
2175 .gsi = virq,
2176 .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
2179 if (rfd != -1) {
2180 assert(assign);
2181 if (kvm_irqchip_is_split()) {
2183 * When the slow irqchip (e.g. IOAPIC) is in the
2184 * userspace, KVM kernel resamplefd will not work because
2185 * the EOI of the interrupt will be delivered to userspace
2186 * instead, so the KVM kernel resamplefd kick will be
2187 * skipped. The userspace here mimics what the kernel
2188 * provides with resamplefd, remember the resamplefd and
2189 * kick it when we receive EOI of this IRQ.
2191 * This is hackery because IOAPIC is mostly bypassed
2192 * (except EOI broadcasts) when irqfd is used. However
2193 * this can bring much performance back for split irqchip
2194 * with INTx IRQs (for VFIO, this gives 93% perf of the
2195 * full fast path, which is 46% perf boost comparing to
2196 * the INTx slow path).
2198 kvm_resample_fd_insert(virq, resample);
2199 } else {
2200 irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
2201 irqfd.resamplefd = rfd;
2203 } else if (!assign) {
2204 if (kvm_irqchip_is_split()) {
2205 kvm_resample_fd_remove(virq);
2209 if (!kvm_irqfds_enabled()) {
2210 return -ENOSYS;
2213 return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
2216 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
2218 struct kvm_irq_routing_entry kroute = {};
2219 int virq;
2221 if (!kvm_gsi_routing_enabled()) {
2222 return -ENOSYS;
2225 virq = kvm_irqchip_get_virq(s);
2226 if (virq < 0) {
2227 return virq;
2230 kroute.gsi = virq;
2231 kroute.type = KVM_IRQ_ROUTING_S390_ADAPTER;
2232 kroute.flags = 0;
2233 kroute.u.adapter.summary_addr = adapter->summary_addr;
2234 kroute.u.adapter.ind_addr = adapter->ind_addr;
2235 kroute.u.adapter.summary_offset = adapter->summary_offset;
2236 kroute.u.adapter.ind_offset = adapter->ind_offset;
2237 kroute.u.adapter.adapter_id = adapter->adapter_id;
2239 kvm_add_routing_entry(s, &kroute);
2241 return virq;
2244 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
2246 struct kvm_irq_routing_entry kroute = {};
2247 int virq;
2249 if (!kvm_gsi_routing_enabled()) {
2250 return -ENOSYS;
2252 if (!kvm_check_extension(s, KVM_CAP_HYPERV_SYNIC)) {
2253 return -ENOSYS;
2255 virq = kvm_irqchip_get_virq(s);
2256 if (virq < 0) {
2257 return virq;
2260 kroute.gsi = virq;
2261 kroute.type = KVM_IRQ_ROUTING_HV_SINT;
2262 kroute.flags = 0;
2263 kroute.u.hv_sint.vcpu = vcpu;
2264 kroute.u.hv_sint.sint = sint;
2266 kvm_add_routing_entry(s, &kroute);
2267 kvm_irqchip_commit_routes(s);
2269 return virq;
2272 #else /* !KVM_CAP_IRQ_ROUTING */
2274 void kvm_init_irq_routing(KVMState *s)
2278 void kvm_irqchip_release_virq(KVMState *s, int virq)
2282 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
2284 abort();
2287 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
2289 return -ENOSYS;
2292 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
2294 return -ENOSYS;
2297 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
2299 return -ENOSYS;
2302 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
2303 EventNotifier *resample, int virq,
2304 bool assign)
2306 abort();
2309 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
2311 return -ENOSYS;
2313 #endif /* !KVM_CAP_IRQ_ROUTING */
2315 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
2316 EventNotifier *rn, int virq)
2318 return kvm_irqchip_assign_irqfd(s, n, rn, virq, true);
2321 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
2322 int virq)
2324 return kvm_irqchip_assign_irqfd(s, n, NULL, virq, false);
2327 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
2328 EventNotifier *rn, qemu_irq irq)
2330 gpointer key, gsi;
2331 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
2333 if (!found) {
2334 return -ENXIO;
2336 return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi));
2339 int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
2340 qemu_irq irq)
2342 gpointer key, gsi;
2343 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
2345 if (!found) {
2346 return -ENXIO;
2348 return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi));
2351 void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
2353 g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
2356 static void kvm_irqchip_create(KVMState *s)
2358 int ret;
2360 assert(s->kernel_irqchip_split != ON_OFF_AUTO_AUTO);
2361 if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
2363 } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
2364 ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
2365 if (ret < 0) {
2366 fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
2367 exit(1);
2369 } else {
2370 return;
2373 /* First probe and see if there's a arch-specific hook to create the
2374 * in-kernel irqchip for us */
2375 ret = kvm_arch_irqchip_create(s);
2376 if (ret == 0) {
2377 if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) {
2378 error_report("Split IRQ chip mode not supported.");
2379 exit(1);
2380 } else {
2381 ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
2384 if (ret < 0) {
2385 fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
2386 exit(1);
2389 kvm_kernel_irqchip = true;
2390 /* If we have an in-kernel IRQ chip then we must have asynchronous
2391 * interrupt delivery (though the reverse is not necessarily true)
2393 kvm_async_interrupts_allowed = true;
2394 kvm_halt_in_kernel_allowed = true;
2396 kvm_init_irq_routing(s);
2398 s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
2401 /* Find number of supported CPUs using the recommended
2402 * procedure from the kernel API documentation to cope with
2403 * older kernels that may be missing capabilities.
2405 static int kvm_recommended_vcpus(KVMState *s)
2407 int ret = kvm_vm_check_extension(s, KVM_CAP_NR_VCPUS);
2408 return (ret) ? ret : 4;
2411 static int kvm_max_vcpus(KVMState *s)
2413 int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
2414 return (ret) ? ret : kvm_recommended_vcpus(s);
2417 static int kvm_max_vcpu_id(KVMState *s)
2419 int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID);
2420 return (ret) ? ret : kvm_max_vcpus(s);
2423 bool kvm_vcpu_id_is_valid(int vcpu_id)
2425 KVMState *s = KVM_STATE(current_accel());
2426 return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s);
2429 bool kvm_dirty_ring_enabled(void)
2431 return kvm_state->kvm_dirty_ring_size ? true : false;
2434 static void query_stats_cb(StatsResultList **result, StatsTarget target,
2435 strList *names, strList *targets, Error **errp);
2436 static void query_stats_schemas_cb(StatsSchemaList **result, Error **errp);
2438 uint32_t kvm_dirty_ring_size(void)
2440 return kvm_state->kvm_dirty_ring_size;
2443 static int kvm_init(MachineState *ms)
2445 MachineClass *mc = MACHINE_GET_CLASS(ms);
2446 static const char upgrade_note[] =
2447 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
2448 "(see http://sourceforge.net/projects/kvm).\n";
2449 const struct {
2450 const char *name;
2451 int num;
2452 } num_cpus[] = {
2453 { "SMP", ms->smp.cpus },
2454 { "hotpluggable", ms->smp.max_cpus },
2455 { /* end of list */ }
2456 }, *nc = num_cpus;
2457 int soft_vcpus_limit, hard_vcpus_limit;
2458 KVMState *s;
2459 const KVMCapabilityInfo *missing_cap;
2460 int ret;
2461 int type;
2462 uint64_t dirty_log_manual_caps;
2464 qemu_mutex_init(&kml_slots_lock);
2466 s = KVM_STATE(ms->accelerator);
2469 * On systems where the kernel can support different base page
2470 * sizes, host page size may be different from TARGET_PAGE_SIZE,
2471 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
2472 * page size for the system though.
2474 assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size());
2476 s->sigmask_len = 8;
2477 accel_blocker_init();
2479 #ifdef KVM_CAP_SET_GUEST_DEBUG
2480 QTAILQ_INIT(&s->kvm_sw_breakpoints);
2481 #endif
2482 QLIST_INIT(&s->kvm_parked_vcpus);
2483 s->fd = qemu_open_old("/dev/kvm", O_RDWR);
2484 if (s->fd == -1) {
2485 fprintf(stderr, "Could not access KVM kernel module: %m\n");
2486 ret = -errno;
2487 goto err;
2490 ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
2491 if (ret < KVM_API_VERSION) {
2492 if (ret >= 0) {
2493 ret = -EINVAL;
2495 fprintf(stderr, "kvm version too old\n");
2496 goto err;
2499 if (ret > KVM_API_VERSION) {
2500 ret = -EINVAL;
2501 fprintf(stderr, "kvm version not supported\n");
2502 goto err;
2505 kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT);
2506 s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
2508 /* If unspecified, use the default value */
2509 if (!s->nr_slots) {
2510 s->nr_slots = 32;
2513 s->nr_as = kvm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE);
2514 if (s->nr_as <= 1) {
2515 s->nr_as = 1;
2517 s->as = g_new0(struct KVMAs, s->nr_as);
2519 if (object_property_find(OBJECT(current_machine), "kvm-type")) {
2520 g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine),
2521 "kvm-type",
2522 &error_abort);
2523 type = mc->kvm_type(ms, kvm_type);
2524 } else if (mc->kvm_type) {
2525 type = mc->kvm_type(ms, NULL);
2526 } else {
2527 type = kvm_arch_get_default_type(ms);
2530 if (type < 0) {
2531 ret = -EINVAL;
2532 goto err;
2535 do {
2536 ret = kvm_ioctl(s, KVM_CREATE_VM, type);
2537 } while (ret == -EINTR);
2539 if (ret < 0) {
2540 fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret,
2541 strerror(-ret));
2543 #ifdef TARGET_S390X
2544 if (ret == -EINVAL) {
2545 fprintf(stderr,
2546 "Host kernel setup problem detected. Please verify:\n");
2547 fprintf(stderr, "- for kernels supporting the switch_amode or"
2548 " user_mode parameters, whether\n");
2549 fprintf(stderr,
2550 " user space is running in primary address space\n");
2551 fprintf(stderr,
2552 "- for kernels supporting the vm.allocate_pgste sysctl, "
2553 "whether it is enabled\n");
2555 #elif defined(TARGET_PPC)
2556 if (ret == -EINVAL) {
2557 fprintf(stderr,
2558 "PPC KVM module is not loaded. Try modprobe kvm_%s.\n",
2559 (type == 2) ? "pr" : "hv");
2561 #endif
2562 goto err;
2565 s->vmfd = ret;
2567 /* check the vcpu limits */
2568 soft_vcpus_limit = kvm_recommended_vcpus(s);
2569 hard_vcpus_limit = kvm_max_vcpus(s);
2571 while (nc->name) {
2572 if (nc->num > soft_vcpus_limit) {
2573 warn_report("Number of %s cpus requested (%d) exceeds "
2574 "the recommended cpus supported by KVM (%d)",
2575 nc->name, nc->num, soft_vcpus_limit);
2577 if (nc->num > hard_vcpus_limit) {
2578 fprintf(stderr, "Number of %s cpus requested (%d) exceeds "
2579 "the maximum cpus supported by KVM (%d)\n",
2580 nc->name, nc->num, hard_vcpus_limit);
2581 exit(1);
2584 nc++;
2587 missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
2588 if (!missing_cap) {
2589 missing_cap =
2590 kvm_check_extension_list(s, kvm_arch_required_capabilities);
2592 if (missing_cap) {
2593 ret = -EINVAL;
2594 fprintf(stderr, "kvm does not support %s\n%s",
2595 missing_cap->name, upgrade_note);
2596 goto err;
2599 s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
2600 s->coalesced_pio = s->coalesced_mmio &&
2601 kvm_check_extension(s, KVM_CAP_COALESCED_PIO);
2604 * Enable KVM dirty ring if supported, otherwise fall back to
2605 * dirty logging mode
2607 ret = kvm_dirty_ring_init(s);
2608 if (ret < 0) {
2609 goto err;
2613 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is
2614 * enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no
2615 * page is wr-protected initially, which is against how kvm dirty ring is
2616 * usage - kvm dirty ring requires all pages are wr-protected at the very
2617 * beginning. Enabling this feature for dirty ring causes data corruption.
2619 * TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log,
2620 * we may expect a higher stall time when starting the migration. In the
2621 * future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too:
2622 * instead of clearing dirty bit, it can be a way to explicitly wr-protect
2623 * guest pages.
2625 if (!s->kvm_dirty_ring_size) {
2626 dirty_log_manual_caps =
2627 kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
2628 dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
2629 KVM_DIRTY_LOG_INITIALLY_SET);
2630 s->manual_dirty_log_protect = dirty_log_manual_caps;
2631 if (dirty_log_manual_caps) {
2632 ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0,
2633 dirty_log_manual_caps);
2634 if (ret) {
2635 warn_report("Trying to enable capability %"PRIu64" of "
2636 "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. "
2637 "Falling back to the legacy mode. ",
2638 dirty_log_manual_caps);
2639 s->manual_dirty_log_protect = 0;
2644 #ifdef KVM_CAP_VCPU_EVENTS
2645 s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
2646 #endif
2648 s->robust_singlestep =
2649 kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
2651 #ifdef KVM_CAP_DEBUGREGS
2652 s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
2653 #endif
2655 s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE);
2657 #ifdef KVM_CAP_IRQ_ROUTING
2658 kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
2659 #endif
2661 s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
2663 s->irq_set_ioctl = KVM_IRQ_LINE;
2664 if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
2665 s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
2668 kvm_readonly_mem_allowed =
2669 (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
2671 kvm_eventfds_allowed =
2672 (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
2674 kvm_irqfds_allowed =
2675 (kvm_check_extension(s, KVM_CAP_IRQFD) > 0);
2677 kvm_resamplefds_allowed =
2678 (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
2680 kvm_vm_attributes_allowed =
2681 (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
2683 kvm_ioeventfd_any_length_allowed =
2684 (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0);
2686 #ifdef KVM_CAP_SET_GUEST_DEBUG
2687 kvm_has_guest_debug =
2688 (kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG) > 0);
2689 #endif
2691 kvm_sstep_flags = 0;
2692 if (kvm_has_guest_debug) {
2693 kvm_sstep_flags = SSTEP_ENABLE;
2695 #if defined KVM_CAP_SET_GUEST_DEBUG2
2696 int guest_debug_flags =
2697 kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG2);
2699 if (guest_debug_flags & KVM_GUESTDBG_BLOCKIRQ) {
2700 kvm_sstep_flags |= SSTEP_NOIRQ;
2702 #endif
2705 kvm_state = s;
2707 ret = kvm_arch_init(ms, s);
2708 if (ret < 0) {
2709 goto err;
2712 if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) {
2713 s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
2716 qemu_register_reset(kvm_unpoison_all, NULL);
2718 if (s->kernel_irqchip_allowed) {
2719 kvm_irqchip_create(s);
2722 if (kvm_eventfds_allowed) {
2723 s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
2724 s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
2726 s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
2727 s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
2729 kvm_memory_listener_register(s, &s->memory_listener,
2730 &address_space_memory, 0, "kvm-memory");
2731 if (kvm_eventfds_allowed) {
2732 memory_listener_register(&kvm_io_listener,
2733 &address_space_io);
2735 memory_listener_register(&kvm_coalesced_pio_listener,
2736 &address_space_io);
2738 s->many_ioeventfds = kvm_check_many_ioeventfds();
2740 s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
2741 if (!s->sync_mmu) {
2742 ret = ram_block_discard_disable(true);
2743 assert(!ret);
2746 if (s->kvm_dirty_ring_size) {
2747 ret = kvm_dirty_ring_reaper_init(s);
2748 if (ret) {
2749 goto err;
2753 if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) {
2754 add_stats_callbacks(STATS_PROVIDER_KVM, query_stats_cb,
2755 query_stats_schemas_cb);
2758 return 0;
2760 err:
2761 assert(ret < 0);
2762 if (s->vmfd >= 0) {
2763 close(s->vmfd);
2765 if (s->fd != -1) {
2766 close(s->fd);
2768 g_free(s->memory_listener.slots);
2770 return ret;
2773 void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
2775 s->sigmask_len = sigmask_len;
2778 static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
2779 int size, uint32_t count)
2781 int i;
2782 uint8_t *ptr = data;
2784 for (i = 0; i < count; i++) {
2785 address_space_rw(&address_space_io, port, attrs,
2786 ptr, size,
2787 direction == KVM_EXIT_IO_OUT);
2788 ptr += size;
2792 static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
2794 fprintf(stderr, "KVM internal error. Suberror: %d\n",
2795 run->internal.suberror);
2797 if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
2798 int i;
2800 for (i = 0; i < run->internal.ndata; ++i) {
2801 fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n",
2802 i, (uint64_t)run->internal.data[i]);
2805 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
2806 fprintf(stderr, "emulation failure\n");
2807 if (!kvm_arch_stop_on_emulation_error(cpu)) {
2808 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2809 return EXCP_INTERRUPT;
2812 /* FIXME: Should trigger a qmp message to let management know
2813 * something went wrong.
2815 return -1;
2818 void kvm_flush_coalesced_mmio_buffer(void)
2820 KVMState *s = kvm_state;
2822 if (!s || s->coalesced_flush_in_progress) {
2823 return;
2826 s->coalesced_flush_in_progress = true;
2828 if (s->coalesced_mmio_ring) {
2829 struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
2830 while (ring->first != ring->last) {
2831 struct kvm_coalesced_mmio *ent;
2833 ent = &ring->coalesced_mmio[ring->first];
2835 if (ent->pio == 1) {
2836 address_space_write(&address_space_io, ent->phys_addr,
2837 MEMTXATTRS_UNSPECIFIED, ent->data,
2838 ent->len);
2839 } else {
2840 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
2842 smp_wmb();
2843 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
2847 s->coalesced_flush_in_progress = false;
2850 bool kvm_cpu_check_are_resettable(void)
2852 return kvm_arch_cpu_check_are_resettable();
2855 static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
2857 if (!cpu->vcpu_dirty) {
2858 kvm_arch_get_registers(cpu);
2859 cpu->vcpu_dirty = true;
2863 void kvm_cpu_synchronize_state(CPUState *cpu)
2865 if (!cpu->vcpu_dirty) {
2866 run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
2870 static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
2872 kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
2873 cpu->vcpu_dirty = false;
2876 void kvm_cpu_synchronize_post_reset(CPUState *cpu)
2878 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
2881 static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
2883 kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
2884 cpu->vcpu_dirty = false;
2887 void kvm_cpu_synchronize_post_init(CPUState *cpu)
2889 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
2892 static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
2894 cpu->vcpu_dirty = true;
2897 void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
2899 run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
2902 #ifdef KVM_HAVE_MCE_INJECTION
2903 static __thread void *pending_sigbus_addr;
2904 static __thread int pending_sigbus_code;
2905 static __thread bool have_sigbus_pending;
2906 #endif
2908 static void kvm_cpu_kick(CPUState *cpu)
2910 qatomic_set(&cpu->kvm_run->immediate_exit, 1);
2913 static void kvm_cpu_kick_self(void)
2915 if (kvm_immediate_exit) {
2916 kvm_cpu_kick(current_cpu);
2917 } else {
2918 qemu_cpu_kick_self();
2922 static void kvm_eat_signals(CPUState *cpu)
2924 struct timespec ts = { 0, 0 };
2925 siginfo_t siginfo;
2926 sigset_t waitset;
2927 sigset_t chkset;
2928 int r;
2930 if (kvm_immediate_exit) {
2931 qatomic_set(&cpu->kvm_run->immediate_exit, 0);
2932 /* Write kvm_run->immediate_exit before the cpu->exit_request
2933 * write in kvm_cpu_exec.
2935 smp_wmb();
2936 return;
2939 sigemptyset(&waitset);
2940 sigaddset(&waitset, SIG_IPI);
2942 do {
2943 r = sigtimedwait(&waitset, &siginfo, &ts);
2944 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
2945 perror("sigtimedwait");
2946 exit(1);
2949 r = sigpending(&chkset);
2950 if (r == -1) {
2951 perror("sigpending");
2952 exit(1);
2954 } while (sigismember(&chkset, SIG_IPI));
2957 int kvm_cpu_exec(CPUState *cpu)
2959 struct kvm_run *run = cpu->kvm_run;
2960 int ret, run_ret;
2962 DPRINTF("kvm_cpu_exec()\n");
2964 if (kvm_arch_process_async_events(cpu)) {
2965 qatomic_set(&cpu->exit_request, 0);
2966 return EXCP_HLT;
2969 qemu_mutex_unlock_iothread();
2970 cpu_exec_start(cpu);
2972 do {
2973 MemTxAttrs attrs;
2975 if (cpu->vcpu_dirty) {
2976 kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
2977 cpu->vcpu_dirty = false;
2980 kvm_arch_pre_run(cpu, run);
2981 if (qatomic_read(&cpu->exit_request)) {
2982 DPRINTF("interrupt exit requested\n");
2984 * KVM requires us to reenter the kernel after IO exits to complete
2985 * instruction emulation. This self-signal will ensure that we
2986 * leave ASAP again.
2988 kvm_cpu_kick_self();
2991 /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
2992 * Matching barrier in kvm_eat_signals.
2994 smp_rmb();
2996 run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
2998 attrs = kvm_arch_post_run(cpu, run);
3000 #ifdef KVM_HAVE_MCE_INJECTION
3001 if (unlikely(have_sigbus_pending)) {
3002 qemu_mutex_lock_iothread();
3003 kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
3004 pending_sigbus_addr);
3005 have_sigbus_pending = false;
3006 qemu_mutex_unlock_iothread();
3008 #endif
3010 if (run_ret < 0) {
3011 if (run_ret == -EINTR || run_ret == -EAGAIN) {
3012 DPRINTF("io window exit\n");
3013 kvm_eat_signals(cpu);
3014 ret = EXCP_INTERRUPT;
3015 break;
3017 fprintf(stderr, "error: kvm run failed %s\n",
3018 strerror(-run_ret));
3019 #ifdef TARGET_PPC
3020 if (run_ret == -EBUSY) {
3021 fprintf(stderr,
3022 "This is probably because your SMT is enabled.\n"
3023 "VCPU can only run on primary threads with all "
3024 "secondary threads offline.\n");
3026 #endif
3027 ret = -1;
3028 break;
3031 trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
3032 switch (run->exit_reason) {
3033 case KVM_EXIT_IO:
3034 DPRINTF("handle_io\n");
3035 /* Called outside BQL */
3036 kvm_handle_io(run->io.port, attrs,
3037 (uint8_t *)run + run->io.data_offset,
3038 run->io.direction,
3039 run->io.size,
3040 run->io.count);
3041 ret = 0;
3042 break;
3043 case KVM_EXIT_MMIO:
3044 DPRINTF("handle_mmio\n");
3045 /* Called outside BQL */
3046 address_space_rw(&address_space_memory,
3047 run->mmio.phys_addr, attrs,
3048 run->mmio.data,
3049 run->mmio.len,
3050 run->mmio.is_write);
3051 ret = 0;
3052 break;
3053 case KVM_EXIT_IRQ_WINDOW_OPEN:
3054 DPRINTF("irq_window_open\n");
3055 ret = EXCP_INTERRUPT;
3056 break;
3057 case KVM_EXIT_SHUTDOWN:
3058 DPRINTF("shutdown\n");
3059 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
3060 ret = EXCP_INTERRUPT;
3061 break;
3062 case KVM_EXIT_UNKNOWN:
3063 fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
3064 (uint64_t)run->hw.hardware_exit_reason);
3065 ret = -1;
3066 break;
3067 case KVM_EXIT_INTERNAL_ERROR:
3068 ret = kvm_handle_internal_error(cpu, run);
3069 break;
3070 case KVM_EXIT_DIRTY_RING_FULL:
3072 * We shouldn't continue if the dirty ring of this vcpu is
3073 * still full. Got kicked by KVM_RESET_DIRTY_RINGS.
3075 trace_kvm_dirty_ring_full(cpu->cpu_index);
3076 qemu_mutex_lock_iothread();
3078 * We throttle vCPU by making it sleep once it exit from kernel
3079 * due to dirty ring full. In the dirtylimit scenario, reaping
3080 * all vCPUs after a single vCPU dirty ring get full result in
3081 * the miss of sleep, so just reap the ring-fulled vCPU.
3083 if (dirtylimit_in_service()) {
3084 kvm_dirty_ring_reap(kvm_state, cpu);
3085 } else {
3086 kvm_dirty_ring_reap(kvm_state, NULL);
3088 qemu_mutex_unlock_iothread();
3089 dirtylimit_vcpu_execute(cpu);
3090 ret = 0;
3091 break;
3092 case KVM_EXIT_SYSTEM_EVENT:
3093 switch (run->system_event.type) {
3094 case KVM_SYSTEM_EVENT_SHUTDOWN:
3095 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
3096 ret = EXCP_INTERRUPT;
3097 break;
3098 case KVM_SYSTEM_EVENT_RESET:
3099 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
3100 ret = EXCP_INTERRUPT;
3101 break;
3102 case KVM_SYSTEM_EVENT_CRASH:
3103 kvm_cpu_synchronize_state(cpu);
3104 qemu_mutex_lock_iothread();
3105 qemu_system_guest_panicked(cpu_get_crash_info(cpu));
3106 qemu_mutex_unlock_iothread();
3107 ret = 0;
3108 break;
3109 default:
3110 DPRINTF("kvm_arch_handle_exit\n");
3111 ret = kvm_arch_handle_exit(cpu, run);
3112 break;
3114 break;
3115 default:
3116 DPRINTF("kvm_arch_handle_exit\n");
3117 ret = kvm_arch_handle_exit(cpu, run);
3118 break;
3120 } while (ret == 0);
3122 cpu_exec_end(cpu);
3123 qemu_mutex_lock_iothread();
3125 if (ret < 0) {
3126 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
3127 vm_stop(RUN_STATE_INTERNAL_ERROR);
3130 qatomic_set(&cpu->exit_request, 0);
3131 return ret;
3134 int kvm_ioctl(KVMState *s, int type, ...)
3136 int ret;
3137 void *arg;
3138 va_list ap;
3140 va_start(ap, type);
3141 arg = va_arg(ap, void *);
3142 va_end(ap);
3144 trace_kvm_ioctl(type, arg);
3145 ret = ioctl(s->fd, type, arg);
3146 if (ret == -1) {
3147 ret = -errno;
3149 return ret;
3152 int kvm_vm_ioctl(KVMState *s, int type, ...)
3154 int ret;
3155 void *arg;
3156 va_list ap;
3158 va_start(ap, type);
3159 arg = va_arg(ap, void *);
3160 va_end(ap);
3162 trace_kvm_vm_ioctl(type, arg);
3163 accel_ioctl_begin();
3164 ret = ioctl(s->vmfd, type, arg);
3165 accel_ioctl_end();
3166 if (ret == -1) {
3167 ret = -errno;
3169 return ret;
3172 int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
3174 int ret;
3175 void *arg;
3176 va_list ap;
3178 va_start(ap, type);
3179 arg = va_arg(ap, void *);
3180 va_end(ap);
3182 trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
3183 accel_cpu_ioctl_begin(cpu);
3184 ret = ioctl(cpu->kvm_fd, type, arg);
3185 accel_cpu_ioctl_end(cpu);
3186 if (ret == -1) {
3187 ret = -errno;
3189 return ret;
3192 int kvm_device_ioctl(int fd, int type, ...)
3194 int ret;
3195 void *arg;
3196 va_list ap;
3198 va_start(ap, type);
3199 arg = va_arg(ap, void *);
3200 va_end(ap);
3202 trace_kvm_device_ioctl(fd, type, arg);
3203 accel_ioctl_begin();
3204 ret = ioctl(fd, type, arg);
3205 accel_ioctl_end();
3206 if (ret == -1) {
3207 ret = -errno;
3209 return ret;
3212 int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr)
3214 int ret;
3215 struct kvm_device_attr attribute = {
3216 .group = group,
3217 .attr = attr,
3220 if (!kvm_vm_attributes_allowed) {
3221 return 0;
3224 ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute);
3225 /* kvm returns 0 on success for HAS_DEVICE_ATTR */
3226 return ret ? 0 : 1;
3229 int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
3231 struct kvm_device_attr attribute = {
3232 .group = group,
3233 .attr = attr,
3234 .flags = 0,
3237 return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1;
3240 int kvm_device_access(int fd, int group, uint64_t attr,
3241 void *val, bool write, Error **errp)
3243 struct kvm_device_attr kvmattr;
3244 int err;
3246 kvmattr.flags = 0;
3247 kvmattr.group = group;
3248 kvmattr.attr = attr;
3249 kvmattr.addr = (uintptr_t)val;
3251 err = kvm_device_ioctl(fd,
3252 write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
3253 &kvmattr);
3254 if (err < 0) {
3255 error_setg_errno(errp, -err,
3256 "KVM_%s_DEVICE_ATTR failed: Group %d "
3257 "attr 0x%016" PRIx64,
3258 write ? "SET" : "GET", group, attr);
3260 return err;
3263 bool kvm_has_sync_mmu(void)
3265 return kvm_state->sync_mmu;
3268 int kvm_has_vcpu_events(void)
3270 return kvm_state->vcpu_events;
3273 int kvm_has_robust_singlestep(void)
3275 return kvm_state->robust_singlestep;
3278 int kvm_has_debugregs(void)
3280 return kvm_state->debugregs;
3283 int kvm_max_nested_state_length(void)
3285 return kvm_state->max_nested_state_len;
3288 int kvm_has_many_ioeventfds(void)
3290 if (!kvm_enabled()) {
3291 return 0;
3293 return kvm_state->many_ioeventfds;
3296 int kvm_has_gsi_routing(void)
3298 #ifdef KVM_CAP_IRQ_ROUTING
3299 return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
3300 #else
3301 return false;
3302 #endif
3305 int kvm_has_intx_set_mask(void)
3307 return kvm_state->intx_set_mask;
3310 bool kvm_arm_supports_user_irq(void)
3312 return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ);
3315 #ifdef KVM_CAP_SET_GUEST_DEBUG
3316 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
3317 target_ulong pc)
3319 struct kvm_sw_breakpoint *bp;
3321 QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
3322 if (bp->pc == pc) {
3323 return bp;
3326 return NULL;
3329 int kvm_sw_breakpoints_active(CPUState *cpu)
3331 return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
3334 struct kvm_set_guest_debug_data {
3335 struct kvm_guest_debug dbg;
3336 int err;
3339 static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
3341 struct kvm_set_guest_debug_data *dbg_data =
3342 (struct kvm_set_guest_debug_data *) data.host_ptr;
3344 dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
3345 &dbg_data->dbg);
3348 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
3350 struct kvm_set_guest_debug_data data;
3352 data.dbg.control = reinject_trap;
3354 if (cpu->singlestep_enabled) {
3355 data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
3357 if (cpu->singlestep_enabled & SSTEP_NOIRQ) {
3358 data.dbg.control |= KVM_GUESTDBG_BLOCKIRQ;
3361 kvm_arch_update_guest_debug(cpu, &data.dbg);
3363 run_on_cpu(cpu, kvm_invoke_set_guest_debug,
3364 RUN_ON_CPU_HOST_PTR(&data));
3365 return data.err;
3368 bool kvm_supports_guest_debug(void)
3370 /* probed during kvm_init() */
3371 return kvm_has_guest_debug;
3374 int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
3376 struct kvm_sw_breakpoint *bp;
3377 int err;
3379 if (type == GDB_BREAKPOINT_SW) {
3380 bp = kvm_find_sw_breakpoint(cpu, addr);
3381 if (bp) {
3382 bp->use_count++;
3383 return 0;
3386 bp = g_new(struct kvm_sw_breakpoint, 1);
3387 bp->pc = addr;
3388 bp->use_count = 1;
3389 err = kvm_arch_insert_sw_breakpoint(cpu, bp);
3390 if (err) {
3391 g_free(bp);
3392 return err;
3395 QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
3396 } else {
3397 err = kvm_arch_insert_hw_breakpoint(addr, len, type);
3398 if (err) {
3399 return err;
3403 CPU_FOREACH(cpu) {
3404 err = kvm_update_guest_debug(cpu, 0);
3405 if (err) {
3406 return err;
3409 return 0;
3412 int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
3414 struct kvm_sw_breakpoint *bp;
3415 int err;
3417 if (type == GDB_BREAKPOINT_SW) {
3418 bp = kvm_find_sw_breakpoint(cpu, addr);
3419 if (!bp) {
3420 return -ENOENT;
3423 if (bp->use_count > 1) {
3424 bp->use_count--;
3425 return 0;
3428 err = kvm_arch_remove_sw_breakpoint(cpu, bp);
3429 if (err) {
3430 return err;
3433 QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
3434 g_free(bp);
3435 } else {
3436 err = kvm_arch_remove_hw_breakpoint(addr, len, type);
3437 if (err) {
3438 return err;
3442 CPU_FOREACH(cpu) {
3443 err = kvm_update_guest_debug(cpu, 0);
3444 if (err) {
3445 return err;
3448 return 0;
3451 void kvm_remove_all_breakpoints(CPUState *cpu)
3453 struct kvm_sw_breakpoint *bp, *next;
3454 KVMState *s = cpu->kvm_state;
3455 CPUState *tmpcpu;
3457 QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
3458 if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
3459 /* Try harder to find a CPU that currently sees the breakpoint. */
3460 CPU_FOREACH(tmpcpu) {
3461 if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
3462 break;
3466 QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
3467 g_free(bp);
3469 kvm_arch_remove_all_hw_breakpoints();
3471 CPU_FOREACH(cpu) {
3472 kvm_update_guest_debug(cpu, 0);
3476 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
3478 static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
3480 KVMState *s = kvm_state;
3481 struct kvm_signal_mask *sigmask;
3482 int r;
3484 sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
3486 sigmask->len = s->sigmask_len;
3487 memcpy(sigmask->sigset, sigset, sizeof(*sigset));
3488 r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
3489 g_free(sigmask);
3491 return r;
3494 static void kvm_ipi_signal(int sig)
3496 if (current_cpu) {
3497 assert(kvm_immediate_exit);
3498 kvm_cpu_kick(current_cpu);
3502 void kvm_init_cpu_signals(CPUState *cpu)
3504 int r;
3505 sigset_t set;
3506 struct sigaction sigact;
3508 memset(&sigact, 0, sizeof(sigact));
3509 sigact.sa_handler = kvm_ipi_signal;
3510 sigaction(SIG_IPI, &sigact, NULL);
3512 pthread_sigmask(SIG_BLOCK, NULL, &set);
3513 #if defined KVM_HAVE_MCE_INJECTION
3514 sigdelset(&set, SIGBUS);
3515 pthread_sigmask(SIG_SETMASK, &set, NULL);
3516 #endif
3517 sigdelset(&set, SIG_IPI);
3518 if (kvm_immediate_exit) {
3519 r = pthread_sigmask(SIG_SETMASK, &set, NULL);
3520 } else {
3521 r = kvm_set_signal_mask(cpu, &set);
3523 if (r) {
3524 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
3525 exit(1);
3529 /* Called asynchronously in VCPU thread. */
3530 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
3532 #ifdef KVM_HAVE_MCE_INJECTION
3533 if (have_sigbus_pending) {
3534 return 1;
3536 have_sigbus_pending = true;
3537 pending_sigbus_addr = addr;
3538 pending_sigbus_code = code;
3539 qatomic_set(&cpu->exit_request, 1);
3540 return 0;
3541 #else
3542 return 1;
3543 #endif
3546 /* Called synchronously (via signalfd) in main thread. */
3547 int kvm_on_sigbus(int code, void *addr)
3549 #ifdef KVM_HAVE_MCE_INJECTION
3550 /* Action required MCE kills the process if SIGBUS is blocked. Because
3551 * that's what happens in the I/O thread, where we handle MCE via signalfd,
3552 * we can only get action optional here.
3554 assert(code != BUS_MCEERR_AR);
3555 kvm_arch_on_sigbus_vcpu(first_cpu, code, addr);
3556 return 0;
3557 #else
3558 return 1;
3559 #endif
3562 int kvm_create_device(KVMState *s, uint64_t type, bool test)
3564 int ret;
3565 struct kvm_create_device create_dev;
3567 create_dev.type = type;
3568 create_dev.fd = -1;
3569 create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
3571 if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
3572 return -ENOTSUP;
3575 ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
3576 if (ret) {
3577 return ret;
3580 return test ? 0 : create_dev.fd;
3583 bool kvm_device_supported(int vmfd, uint64_t type)
3585 struct kvm_create_device create_dev = {
3586 .type = type,
3587 .fd = -1,
3588 .flags = KVM_CREATE_DEVICE_TEST,
3591 if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) {
3592 return false;
3595 return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0);
3598 int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
3600 struct kvm_one_reg reg;
3601 int r;
3603 reg.id = id;
3604 reg.addr = (uintptr_t) source;
3605 r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
3606 if (r) {
3607 trace_kvm_failed_reg_set(id, strerror(-r));
3609 return r;
3612 int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
3614 struct kvm_one_reg reg;
3615 int r;
3617 reg.id = id;
3618 reg.addr = (uintptr_t) target;
3619 r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
3620 if (r) {
3621 trace_kvm_failed_reg_get(id, strerror(-r));
3623 return r;
3626 static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as,
3627 hwaddr start_addr, hwaddr size)
3629 KVMState *kvm = KVM_STATE(ms->accelerator);
3630 int i;
3632 for (i = 0; i < kvm->nr_as; ++i) {
3633 if (kvm->as[i].as == as && kvm->as[i].ml) {
3634 size = MIN(kvm_max_slot_size, size);
3635 return NULL != kvm_lookup_matching_slot(kvm->as[i].ml,
3636 start_addr, size);
3640 return false;
3643 static void kvm_get_kvm_shadow_mem(Object *obj, Visitor *v,
3644 const char *name, void *opaque,
3645 Error **errp)
3647 KVMState *s = KVM_STATE(obj);
3648 int64_t value = s->kvm_shadow_mem;
3650 visit_type_int(v, name, &value, errp);
3653 static void kvm_set_kvm_shadow_mem(Object *obj, Visitor *v,
3654 const char *name, void *opaque,
3655 Error **errp)
3657 KVMState *s = KVM_STATE(obj);
3658 int64_t value;
3660 if (s->fd != -1) {
3661 error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3662 return;
3665 if (!visit_type_int(v, name, &value, errp)) {
3666 return;
3669 s->kvm_shadow_mem = value;
3672 static void kvm_set_kernel_irqchip(Object *obj, Visitor *v,
3673 const char *name, void *opaque,
3674 Error **errp)
3676 KVMState *s = KVM_STATE(obj);
3677 OnOffSplit mode;
3679 if (s->fd != -1) {
3680 error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3681 return;
3684 if (!visit_type_OnOffSplit(v, name, &mode, errp)) {
3685 return;
3687 switch (mode) {
3688 case ON_OFF_SPLIT_ON:
3689 s->kernel_irqchip_allowed = true;
3690 s->kernel_irqchip_required = true;
3691 s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3692 break;
3693 case ON_OFF_SPLIT_OFF:
3694 s->kernel_irqchip_allowed = false;
3695 s->kernel_irqchip_required = false;
3696 s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3697 break;
3698 case ON_OFF_SPLIT_SPLIT:
3699 s->kernel_irqchip_allowed = true;
3700 s->kernel_irqchip_required = true;
3701 s->kernel_irqchip_split = ON_OFF_AUTO_ON;
3702 break;
3703 default:
3704 /* The value was checked in visit_type_OnOffSplit() above. If
3705 * we get here, then something is wrong in QEMU.
3707 abort();
3711 bool kvm_kernel_irqchip_allowed(void)
3713 return kvm_state->kernel_irqchip_allowed;
3716 bool kvm_kernel_irqchip_required(void)
3718 return kvm_state->kernel_irqchip_required;
3721 bool kvm_kernel_irqchip_split(void)
3723 return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON;
3726 static void kvm_get_dirty_ring_size(Object *obj, Visitor *v,
3727 const char *name, void *opaque,
3728 Error **errp)
3730 KVMState *s = KVM_STATE(obj);
3731 uint32_t value = s->kvm_dirty_ring_size;
3733 visit_type_uint32(v, name, &value, errp);
3736 static void kvm_set_dirty_ring_size(Object *obj, Visitor *v,
3737 const char *name, void *opaque,
3738 Error **errp)
3740 KVMState *s = KVM_STATE(obj);
3741 uint32_t value;
3743 if (s->fd != -1) {
3744 error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3745 return;
3748 if (!visit_type_uint32(v, name, &value, errp)) {
3749 return;
3751 if (value & (value - 1)) {
3752 error_setg(errp, "dirty-ring-size must be a power of two.");
3753 return;
3756 s->kvm_dirty_ring_size = value;
3759 static void kvm_accel_instance_init(Object *obj)
3761 KVMState *s = KVM_STATE(obj);
3763 s->fd = -1;
3764 s->vmfd = -1;
3765 s->kvm_shadow_mem = -1;
3766 s->kernel_irqchip_allowed = true;
3767 s->kernel_irqchip_split = ON_OFF_AUTO_AUTO;
3768 /* KVM dirty ring is by default off */
3769 s->kvm_dirty_ring_size = 0;
3770 s->kvm_dirty_ring_with_bitmap = false;
3771 s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN;
3772 s->notify_window = 0;
3773 s->xen_version = 0;
3774 s->xen_gnttab_max_frames = 64;
3775 s->xen_evtchn_max_pirq = 256;
3779 * kvm_gdbstub_sstep_flags():
3781 * Returns: SSTEP_* flags that KVM supports for guest debug. The
3782 * support is probed during kvm_init()
3784 static int kvm_gdbstub_sstep_flags(void)
3786 return kvm_sstep_flags;
3789 static void kvm_accel_class_init(ObjectClass *oc, void *data)
3791 AccelClass *ac = ACCEL_CLASS(oc);
3792 ac->name = "KVM";
3793 ac->init_machine = kvm_init;
3794 ac->has_memory = kvm_accel_has_memory;
3795 ac->allowed = &kvm_allowed;
3796 ac->gdbstub_supported_sstep_flags = kvm_gdbstub_sstep_flags;
3798 object_class_property_add(oc, "kernel-irqchip", "on|off|split",
3799 NULL, kvm_set_kernel_irqchip,
3800 NULL, NULL);
3801 object_class_property_set_description(oc, "kernel-irqchip",
3802 "Configure KVM in-kernel irqchip");
3804 object_class_property_add(oc, "kvm-shadow-mem", "int",
3805 kvm_get_kvm_shadow_mem, kvm_set_kvm_shadow_mem,
3806 NULL, NULL);
3807 object_class_property_set_description(oc, "kvm-shadow-mem",
3808 "KVM shadow MMU size");
3810 object_class_property_add(oc, "dirty-ring-size", "uint32",
3811 kvm_get_dirty_ring_size, kvm_set_dirty_ring_size,
3812 NULL, NULL);
3813 object_class_property_set_description(oc, "dirty-ring-size",
3814 "Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)");
3816 kvm_arch_accel_class_init(oc);
3819 static const TypeInfo kvm_accel_type = {
3820 .name = TYPE_KVM_ACCEL,
3821 .parent = TYPE_ACCEL,
3822 .instance_init = kvm_accel_instance_init,
3823 .class_init = kvm_accel_class_init,
3824 .instance_size = sizeof(KVMState),
3827 static void kvm_type_init(void)
3829 type_register_static(&kvm_accel_type);
3832 type_init(kvm_type_init);
3834 typedef struct StatsArgs {
3835 union StatsResultsType {
3836 StatsResultList **stats;
3837 StatsSchemaList **schema;
3838 } result;
3839 strList *names;
3840 Error **errp;
3841 } StatsArgs;
3843 static StatsList *add_kvmstat_entry(struct kvm_stats_desc *pdesc,
3844 uint64_t *stats_data,
3845 StatsList *stats_list,
3846 Error **errp)
3849 Stats *stats;
3850 uint64List *val_list = NULL;
3852 /* Only add stats that we understand. */
3853 switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
3854 case KVM_STATS_TYPE_CUMULATIVE:
3855 case KVM_STATS_TYPE_INSTANT:
3856 case KVM_STATS_TYPE_PEAK:
3857 case KVM_STATS_TYPE_LINEAR_HIST:
3858 case KVM_STATS_TYPE_LOG_HIST:
3859 break;
3860 default:
3861 return stats_list;
3864 switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
3865 case KVM_STATS_UNIT_NONE:
3866 case KVM_STATS_UNIT_BYTES:
3867 case KVM_STATS_UNIT_CYCLES:
3868 case KVM_STATS_UNIT_SECONDS:
3869 case KVM_STATS_UNIT_BOOLEAN:
3870 break;
3871 default:
3872 return stats_list;
3875 switch (pdesc->flags & KVM_STATS_BASE_MASK) {
3876 case KVM_STATS_BASE_POW10:
3877 case KVM_STATS_BASE_POW2:
3878 break;
3879 default:
3880 return stats_list;
3883 /* Alloc and populate data list */
3884 stats = g_new0(Stats, 1);
3885 stats->name = g_strdup(pdesc->name);
3886 stats->value = g_new0(StatsValue, 1);;
3888 if ((pdesc->flags & KVM_STATS_UNIT_MASK) == KVM_STATS_UNIT_BOOLEAN) {
3889 stats->value->u.boolean = *stats_data;
3890 stats->value->type = QTYPE_QBOOL;
3891 } else if (pdesc->size == 1) {
3892 stats->value->u.scalar = *stats_data;
3893 stats->value->type = QTYPE_QNUM;
3894 } else {
3895 int i;
3896 for (i = 0; i < pdesc->size; i++) {
3897 QAPI_LIST_PREPEND(val_list, stats_data[i]);
3899 stats->value->u.list = val_list;
3900 stats->value->type = QTYPE_QLIST;
3903 QAPI_LIST_PREPEND(stats_list, stats);
3904 return stats_list;
3907 static StatsSchemaValueList *add_kvmschema_entry(struct kvm_stats_desc *pdesc,
3908 StatsSchemaValueList *list,
3909 Error **errp)
3911 StatsSchemaValueList *schema_entry = g_new0(StatsSchemaValueList, 1);
3912 schema_entry->value = g_new0(StatsSchemaValue, 1);
3914 switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
3915 case KVM_STATS_TYPE_CUMULATIVE:
3916 schema_entry->value->type = STATS_TYPE_CUMULATIVE;
3917 break;
3918 case KVM_STATS_TYPE_INSTANT:
3919 schema_entry->value->type = STATS_TYPE_INSTANT;
3920 break;
3921 case KVM_STATS_TYPE_PEAK:
3922 schema_entry->value->type = STATS_TYPE_PEAK;
3923 break;
3924 case KVM_STATS_TYPE_LINEAR_HIST:
3925 schema_entry->value->type = STATS_TYPE_LINEAR_HISTOGRAM;
3926 schema_entry->value->bucket_size = pdesc->bucket_size;
3927 schema_entry->value->has_bucket_size = true;
3928 break;
3929 case KVM_STATS_TYPE_LOG_HIST:
3930 schema_entry->value->type = STATS_TYPE_LOG2_HISTOGRAM;
3931 break;
3932 default:
3933 goto exit;
3936 switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
3937 case KVM_STATS_UNIT_NONE:
3938 break;
3939 case KVM_STATS_UNIT_BOOLEAN:
3940 schema_entry->value->has_unit = true;
3941 schema_entry->value->unit = STATS_UNIT_BOOLEAN;
3942 break;
3943 case KVM_STATS_UNIT_BYTES:
3944 schema_entry->value->has_unit = true;
3945 schema_entry->value->unit = STATS_UNIT_BYTES;
3946 break;
3947 case KVM_STATS_UNIT_CYCLES:
3948 schema_entry->value->has_unit = true;
3949 schema_entry->value->unit = STATS_UNIT_CYCLES;
3950 break;
3951 case KVM_STATS_UNIT_SECONDS:
3952 schema_entry->value->has_unit = true;
3953 schema_entry->value->unit = STATS_UNIT_SECONDS;
3954 break;
3955 default:
3956 goto exit;
3959 schema_entry->value->exponent = pdesc->exponent;
3960 if (pdesc->exponent) {
3961 switch (pdesc->flags & KVM_STATS_BASE_MASK) {
3962 case KVM_STATS_BASE_POW10:
3963 schema_entry->value->has_base = true;
3964 schema_entry->value->base = 10;
3965 break;
3966 case KVM_STATS_BASE_POW2:
3967 schema_entry->value->has_base = true;
3968 schema_entry->value->base = 2;
3969 break;
3970 default:
3971 goto exit;
3975 schema_entry->value->name = g_strdup(pdesc->name);
3976 schema_entry->next = list;
3977 return schema_entry;
3978 exit:
3979 g_free(schema_entry->value);
3980 g_free(schema_entry);
3981 return list;
3984 /* Cached stats descriptors */
3985 typedef struct StatsDescriptors {
3986 const char *ident; /* cache key, currently the StatsTarget */
3987 struct kvm_stats_desc *kvm_stats_desc;
3988 struct kvm_stats_header kvm_stats_header;
3989 QTAILQ_ENTRY(StatsDescriptors) next;
3990 } StatsDescriptors;
3992 static QTAILQ_HEAD(, StatsDescriptors) stats_descriptors =
3993 QTAILQ_HEAD_INITIALIZER(stats_descriptors);
3996 * Return the descriptors for 'target', that either have already been read
3997 * or are retrieved from 'stats_fd'.
3999 static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd,
4000 Error **errp)
4002 StatsDescriptors *descriptors;
4003 const char *ident;
4004 struct kvm_stats_desc *kvm_stats_desc;
4005 struct kvm_stats_header *kvm_stats_header;
4006 size_t size_desc;
4007 ssize_t ret;
4009 ident = StatsTarget_str(target);
4010 QTAILQ_FOREACH(descriptors, &stats_descriptors, next) {
4011 if (g_str_equal(descriptors->ident, ident)) {
4012 return descriptors;
4016 descriptors = g_new0(StatsDescriptors, 1);
4018 /* Read stats header */
4019 kvm_stats_header = &descriptors->kvm_stats_header;
4020 ret = pread(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header), 0);
4021 if (ret != sizeof(*kvm_stats_header)) {
4022 error_setg(errp, "KVM stats: failed to read stats header: "
4023 "expected %zu actual %zu",
4024 sizeof(*kvm_stats_header), ret);
4025 g_free(descriptors);
4026 return NULL;
4028 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4030 /* Read stats descriptors */
4031 kvm_stats_desc = g_malloc0_n(kvm_stats_header->num_desc, size_desc);
4032 ret = pread(stats_fd, kvm_stats_desc,
4033 size_desc * kvm_stats_header->num_desc,
4034 kvm_stats_header->desc_offset);
4036 if (ret != size_desc * kvm_stats_header->num_desc) {
4037 error_setg(errp, "KVM stats: failed to read stats descriptors: "
4038 "expected %zu actual %zu",
4039 size_desc * kvm_stats_header->num_desc, ret);
4040 g_free(descriptors);
4041 g_free(kvm_stats_desc);
4042 return NULL;
4044 descriptors->kvm_stats_desc = kvm_stats_desc;
4045 descriptors->ident = ident;
4046 QTAILQ_INSERT_TAIL(&stats_descriptors, descriptors, next);
4047 return descriptors;
4050 static void query_stats(StatsResultList **result, StatsTarget target,
4051 strList *names, int stats_fd, CPUState *cpu,
4052 Error **errp)
4054 struct kvm_stats_desc *kvm_stats_desc;
4055 struct kvm_stats_header *kvm_stats_header;
4056 StatsDescriptors *descriptors;
4057 g_autofree uint64_t *stats_data = NULL;
4058 struct kvm_stats_desc *pdesc;
4059 StatsList *stats_list = NULL;
4060 size_t size_desc, size_data = 0;
4061 ssize_t ret;
4062 int i;
4064 descriptors = find_stats_descriptors(target, stats_fd, errp);
4065 if (!descriptors) {
4066 return;
4069 kvm_stats_header = &descriptors->kvm_stats_header;
4070 kvm_stats_desc = descriptors->kvm_stats_desc;
4071 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4073 /* Tally the total data size; read schema data */
4074 for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4075 pdesc = (void *)kvm_stats_desc + i * size_desc;
4076 size_data += pdesc->size * sizeof(*stats_data);
4079 stats_data = g_malloc0(size_data);
4080 ret = pread(stats_fd, stats_data, size_data, kvm_stats_header->data_offset);
4082 if (ret != size_data) {
4083 error_setg(errp, "KVM stats: failed to read data: "
4084 "expected %zu actual %zu", size_data, ret);
4085 return;
4088 for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4089 uint64_t *stats;
4090 pdesc = (void *)kvm_stats_desc + i * size_desc;
4092 /* Add entry to the list */
4093 stats = (void *)stats_data + pdesc->offset;
4094 if (!apply_str_list_filter(pdesc->name, names)) {
4095 continue;
4097 stats_list = add_kvmstat_entry(pdesc, stats, stats_list, errp);
4100 if (!stats_list) {
4101 return;
4104 switch (target) {
4105 case STATS_TARGET_VM:
4106 add_stats_entry(result, STATS_PROVIDER_KVM, NULL, stats_list);
4107 break;
4108 case STATS_TARGET_VCPU:
4109 add_stats_entry(result, STATS_PROVIDER_KVM,
4110 cpu->parent_obj.canonical_path,
4111 stats_list);
4112 break;
4113 default:
4114 g_assert_not_reached();
4118 static void query_stats_schema(StatsSchemaList **result, StatsTarget target,
4119 int stats_fd, Error **errp)
4121 struct kvm_stats_desc *kvm_stats_desc;
4122 struct kvm_stats_header *kvm_stats_header;
4123 StatsDescriptors *descriptors;
4124 struct kvm_stats_desc *pdesc;
4125 StatsSchemaValueList *stats_list = NULL;
4126 size_t size_desc;
4127 int i;
4129 descriptors = find_stats_descriptors(target, stats_fd, errp);
4130 if (!descriptors) {
4131 return;
4134 kvm_stats_header = &descriptors->kvm_stats_header;
4135 kvm_stats_desc = descriptors->kvm_stats_desc;
4136 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4138 /* Tally the total data size; read schema data */
4139 for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4140 pdesc = (void *)kvm_stats_desc + i * size_desc;
4141 stats_list = add_kvmschema_entry(pdesc, stats_list, errp);
4144 add_stats_schema(result, STATS_PROVIDER_KVM, target, stats_list);
4147 static void query_stats_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
4149 int stats_fd = cpu->kvm_vcpu_stats_fd;
4150 Error *local_err = NULL;
4152 if (stats_fd == -1) {
4153 error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
4154 error_propagate(kvm_stats_args->errp, local_err);
4155 return;
4157 query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU,
4158 kvm_stats_args->names, stats_fd, cpu,
4159 kvm_stats_args->errp);
4162 static void query_stats_schema_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
4164 int stats_fd = cpu->kvm_vcpu_stats_fd;
4165 Error *local_err = NULL;
4167 if (stats_fd == -1) {
4168 error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
4169 error_propagate(kvm_stats_args->errp, local_err);
4170 return;
4172 query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd,
4173 kvm_stats_args->errp);
4176 static void query_stats_cb(StatsResultList **result, StatsTarget target,
4177 strList *names, strList *targets, Error **errp)
4179 KVMState *s = kvm_state;
4180 CPUState *cpu;
4181 int stats_fd;
4183 switch (target) {
4184 case STATS_TARGET_VM:
4186 stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
4187 if (stats_fd == -1) {
4188 error_setg_errno(errp, errno, "KVM stats: ioctl failed");
4189 return;
4191 query_stats(result, target, names, stats_fd, NULL, errp);
4192 close(stats_fd);
4193 break;
4195 case STATS_TARGET_VCPU:
4197 StatsArgs stats_args;
4198 stats_args.result.stats = result;
4199 stats_args.names = names;
4200 stats_args.errp = errp;
4201 CPU_FOREACH(cpu) {
4202 if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) {
4203 continue;
4205 query_stats_vcpu(cpu, &stats_args);
4207 break;
4209 default:
4210 break;
4214 void query_stats_schemas_cb(StatsSchemaList **result, Error **errp)
4216 StatsArgs stats_args;
4217 KVMState *s = kvm_state;
4218 int stats_fd;
4220 stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
4221 if (stats_fd == -1) {
4222 error_setg_errno(errp, errno, "KVM stats: ioctl failed");
4223 return;
4225 query_stats_schema(result, STATS_TARGET_VM, stats_fd, errp);
4226 close(stats_fd);
4228 if (first_cpu) {
4229 stats_args.result.schema = result;
4230 stats_args.errp = errp;
4231 query_stats_schema_vcpu(first_cpu, &stats_args);