kvm: i386: require KVM_CAP_SET_VCPU_EVENTS and KVM_CAP_X86_ROBUST_SINGLESTEP
[qemu/ar7.git] / accel / kvm / kvm-all.c
blobe39a810a4e92333a251711a223eb57b05dc2033d
1 /*
2 * QEMU KVM support
4 * Copyright IBM, Corp. 2008
5 * Red Hat, Inc. 2008
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include <sys/ioctl.h>
18 #include <poll.h>
20 #include <linux/kvm.h>
22 #include "qemu/atomic.h"
23 #include "qemu/option.h"
24 #include "qemu/config-file.h"
25 #include "qemu/error-report.h"
26 #include "qapi/error.h"
27 #include "hw/pci/msi.h"
28 #include "hw/pci/msix.h"
29 #include "hw/s390x/adapter.h"
30 #include "exec/gdbstub.h"
31 #include "sysemu/kvm_int.h"
32 #include "sysemu/runstate.h"
33 #include "sysemu/cpus.h"
34 #include "sysemu/accel-blocker.h"
35 #include "qemu/bswap.h"
36 #include "exec/memory.h"
37 #include "exec/ram_addr.h"
38 #include "qemu/event_notifier.h"
39 #include "qemu/main-loop.h"
40 #include "trace.h"
41 #include "hw/irq.h"
42 #include "qapi/visitor.h"
43 #include "qapi/qapi-types-common.h"
44 #include "qapi/qapi-visit-common.h"
45 #include "sysemu/reset.h"
46 #include "qemu/guest-random.h"
47 #include "sysemu/hw_accel.h"
48 #include "kvm-cpus.h"
49 #include "sysemu/dirtylimit.h"
50 #include "qemu/range.h"
52 #include "hw/boards.h"
53 #include "sysemu/stats.h"
55 /* This check must be after config-host.h is included */
56 #ifdef CONFIG_EVENTFD
57 #include <sys/eventfd.h>
58 #endif
60 /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
61 * need to use the real host PAGE_SIZE, as that's what KVM will use.
63 #ifdef PAGE_SIZE
64 #undef PAGE_SIZE
65 #endif
66 #define PAGE_SIZE qemu_real_host_page_size()
68 #ifndef KVM_GUESTDBG_BLOCKIRQ
69 #define KVM_GUESTDBG_BLOCKIRQ 0
70 #endif
72 //#define DEBUG_KVM
74 #ifdef DEBUG_KVM
75 #define DPRINTF(fmt, ...) \
76 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
77 #else
78 #define DPRINTF(fmt, ...) \
79 do { } while (0)
80 #endif
82 struct KVMParkedVcpu {
83 unsigned long vcpu_id;
84 int kvm_fd;
85 QLIST_ENTRY(KVMParkedVcpu) node;
88 KVMState *kvm_state;
89 bool kvm_kernel_irqchip;
90 bool kvm_split_irqchip;
91 bool kvm_async_interrupts_allowed;
92 bool kvm_halt_in_kernel_allowed;
93 bool kvm_resamplefds_allowed;
94 bool kvm_msi_via_irqfd_allowed;
95 bool kvm_gsi_routing_allowed;
96 bool kvm_gsi_direct_mapping;
97 bool kvm_allowed;
98 bool kvm_readonly_mem_allowed;
99 bool kvm_vm_attributes_allowed;
100 bool kvm_msi_use_devid;
101 bool kvm_has_guest_debug;
102 static int kvm_sstep_flags;
103 static bool kvm_immediate_exit;
104 static hwaddr kvm_max_slot_size = ~0;
106 static const KVMCapabilityInfo kvm_required_capabilites[] = {
107 KVM_CAP_INFO(USER_MEMORY),
108 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
109 KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
110 KVM_CAP_INFO(INTERNAL_ERROR_DATA),
111 KVM_CAP_INFO(IOEVENTFD),
112 KVM_CAP_INFO(IOEVENTFD_ANY_LENGTH),
113 KVM_CAP_LAST_INFO
116 static NotifierList kvm_irqchip_change_notifiers =
117 NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers);
119 struct KVMResampleFd {
120 int gsi;
121 EventNotifier *resample_event;
122 QLIST_ENTRY(KVMResampleFd) node;
124 typedef struct KVMResampleFd KVMResampleFd;
127 * Only used with split irqchip where we need to do the resample fd
128 * kick for the kernel from userspace.
130 static QLIST_HEAD(, KVMResampleFd) kvm_resample_fd_list =
131 QLIST_HEAD_INITIALIZER(kvm_resample_fd_list);
133 static QemuMutex kml_slots_lock;
135 #define kvm_slots_lock() qemu_mutex_lock(&kml_slots_lock)
136 #define kvm_slots_unlock() qemu_mutex_unlock(&kml_slots_lock)
138 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem);
140 static inline void kvm_resample_fd_remove(int gsi)
142 KVMResampleFd *rfd;
144 QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
145 if (rfd->gsi == gsi) {
146 QLIST_REMOVE(rfd, node);
147 g_free(rfd);
148 break;
153 static inline void kvm_resample_fd_insert(int gsi, EventNotifier *event)
155 KVMResampleFd *rfd = g_new0(KVMResampleFd, 1);
157 rfd->gsi = gsi;
158 rfd->resample_event = event;
160 QLIST_INSERT_HEAD(&kvm_resample_fd_list, rfd, node);
163 void kvm_resample_fd_notify(int gsi)
165 KVMResampleFd *rfd;
167 QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
168 if (rfd->gsi == gsi) {
169 event_notifier_set(rfd->resample_event);
170 trace_kvm_resample_fd_notify(gsi);
171 return;
176 unsigned int kvm_get_max_memslots(void)
178 KVMState *s = KVM_STATE(current_accel());
180 return s->nr_slots;
183 unsigned int kvm_get_free_memslots(void)
185 unsigned int used_slots = 0;
186 KVMState *s = kvm_state;
187 int i;
189 kvm_slots_lock();
190 for (i = 0; i < s->nr_as; i++) {
191 if (!s->as[i].ml) {
192 continue;
194 used_slots = MAX(used_slots, s->as[i].ml->nr_used_slots);
196 kvm_slots_unlock();
198 return s->nr_slots - used_slots;
201 /* Called with KVMMemoryListener.slots_lock held */
202 static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
204 KVMState *s = kvm_state;
205 int i;
207 for (i = 0; i < s->nr_slots; i++) {
208 if (kml->slots[i].memory_size == 0) {
209 return &kml->slots[i];
213 return NULL;
216 /* Called with KVMMemoryListener.slots_lock held */
217 static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
219 KVMSlot *slot = kvm_get_free_slot(kml);
221 if (slot) {
222 return slot;
225 fprintf(stderr, "%s: no free slot available\n", __func__);
226 abort();
229 static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
230 hwaddr start_addr,
231 hwaddr size)
233 KVMState *s = kvm_state;
234 int i;
236 for (i = 0; i < s->nr_slots; i++) {
237 KVMSlot *mem = &kml->slots[i];
239 if (start_addr == mem->start_addr && size == mem->memory_size) {
240 return mem;
244 return NULL;
248 * Calculate and align the start address and the size of the section.
249 * Return the size. If the size is 0, the aligned section is empty.
251 static hwaddr kvm_align_section(MemoryRegionSection *section,
252 hwaddr *start)
254 hwaddr size = int128_get64(section->size);
255 hwaddr delta, aligned;
257 /* kvm works in page size chunks, but the function may be called
258 with sub-page size and unaligned start address. Pad the start
259 address to next and truncate size to previous page boundary. */
260 aligned = ROUND_UP(section->offset_within_address_space,
261 qemu_real_host_page_size());
262 delta = aligned - section->offset_within_address_space;
263 *start = aligned;
264 if (delta > size) {
265 return 0;
268 return (size - delta) & qemu_real_host_page_mask();
271 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
272 hwaddr *phys_addr)
274 KVMMemoryListener *kml = &s->memory_listener;
275 int i, ret = 0;
277 kvm_slots_lock();
278 for (i = 0; i < s->nr_slots; i++) {
279 KVMSlot *mem = &kml->slots[i];
281 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
282 *phys_addr = mem->start_addr + (ram - mem->ram);
283 ret = 1;
284 break;
287 kvm_slots_unlock();
289 return ret;
292 static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new)
294 KVMState *s = kvm_state;
295 struct kvm_userspace_memory_region mem;
296 int ret;
298 mem.slot = slot->slot | (kml->as_id << 16);
299 mem.guest_phys_addr = slot->start_addr;
300 mem.userspace_addr = (unsigned long)slot->ram;
301 mem.flags = slot->flags;
303 if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) {
304 /* Set the slot size to 0 before setting the slot to the desired
305 * value. This is needed based on KVM commit 75d61fbc. */
306 mem.memory_size = 0;
307 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
308 if (ret < 0) {
309 goto err;
312 mem.memory_size = slot->memory_size;
313 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
314 slot->old_flags = mem.flags;
315 err:
316 trace_kvm_set_user_memory(mem.slot, mem.flags, mem.guest_phys_addr,
317 mem.memory_size, mem.userspace_addr, ret);
318 if (ret < 0) {
319 error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d,"
320 " start=0x%" PRIx64 ", size=0x%" PRIx64 ": %s",
321 __func__, mem.slot, slot->start_addr,
322 (uint64_t)mem.memory_size, strerror(errno));
324 return ret;
327 static int do_kvm_destroy_vcpu(CPUState *cpu)
329 KVMState *s = kvm_state;
330 long mmap_size;
331 struct KVMParkedVcpu *vcpu = NULL;
332 int ret = 0;
334 DPRINTF("kvm_destroy_vcpu\n");
336 ret = kvm_arch_destroy_vcpu(cpu);
337 if (ret < 0) {
338 goto err;
341 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
342 if (mmap_size < 0) {
343 ret = mmap_size;
344 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
345 goto err;
348 ret = munmap(cpu->kvm_run, mmap_size);
349 if (ret < 0) {
350 goto err;
353 if (cpu->kvm_dirty_gfns) {
354 ret = munmap(cpu->kvm_dirty_gfns, s->kvm_dirty_ring_bytes);
355 if (ret < 0) {
356 goto err;
360 vcpu = g_malloc0(sizeof(*vcpu));
361 vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
362 vcpu->kvm_fd = cpu->kvm_fd;
363 QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
364 err:
365 return ret;
368 void kvm_destroy_vcpu(CPUState *cpu)
370 if (do_kvm_destroy_vcpu(cpu) < 0) {
371 error_report("kvm_destroy_vcpu failed");
372 exit(EXIT_FAILURE);
376 static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
378 struct KVMParkedVcpu *cpu;
380 QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
381 if (cpu->vcpu_id == vcpu_id) {
382 int kvm_fd;
384 QLIST_REMOVE(cpu, node);
385 kvm_fd = cpu->kvm_fd;
386 g_free(cpu);
387 return kvm_fd;
391 return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
394 int kvm_init_vcpu(CPUState *cpu, Error **errp)
396 KVMState *s = kvm_state;
397 long mmap_size;
398 int ret;
400 trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
402 ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu));
403 if (ret < 0) {
404 error_setg_errno(errp, -ret, "kvm_init_vcpu: kvm_get_vcpu failed (%lu)",
405 kvm_arch_vcpu_id(cpu));
406 goto err;
409 cpu->kvm_fd = ret;
410 cpu->kvm_state = s;
411 cpu->vcpu_dirty = true;
412 cpu->dirty_pages = 0;
413 cpu->throttle_us_per_full = 0;
415 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
416 if (mmap_size < 0) {
417 ret = mmap_size;
418 error_setg_errno(errp, -mmap_size,
419 "kvm_init_vcpu: KVM_GET_VCPU_MMAP_SIZE failed");
420 goto err;
423 cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
424 cpu->kvm_fd, 0);
425 if (cpu->kvm_run == MAP_FAILED) {
426 ret = -errno;
427 error_setg_errno(errp, ret,
428 "kvm_init_vcpu: mmap'ing vcpu state failed (%lu)",
429 kvm_arch_vcpu_id(cpu));
430 goto err;
433 if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
434 s->coalesced_mmio_ring =
435 (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
438 if (s->kvm_dirty_ring_size) {
439 /* Use MAP_SHARED to share pages with the kernel */
440 cpu->kvm_dirty_gfns = mmap(NULL, s->kvm_dirty_ring_bytes,
441 PROT_READ | PROT_WRITE, MAP_SHARED,
442 cpu->kvm_fd,
443 PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET);
444 if (cpu->kvm_dirty_gfns == MAP_FAILED) {
445 ret = -errno;
446 DPRINTF("mmap'ing vcpu dirty gfns failed: %d\n", ret);
447 goto err;
451 ret = kvm_arch_init_vcpu(cpu);
452 if (ret < 0) {
453 error_setg_errno(errp, -ret,
454 "kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)",
455 kvm_arch_vcpu_id(cpu));
457 cpu->kvm_vcpu_stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
459 err:
460 return ret;
464 * dirty pages logging control
467 static int kvm_mem_flags(MemoryRegion *mr)
469 bool readonly = mr->readonly || memory_region_is_romd(mr);
470 int flags = 0;
472 if (memory_region_get_dirty_log_mask(mr) != 0) {
473 flags |= KVM_MEM_LOG_DIRTY_PAGES;
475 if (readonly && kvm_readonly_mem_allowed) {
476 flags |= KVM_MEM_READONLY;
478 return flags;
481 /* Called with KVMMemoryListener.slots_lock held */
482 static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
483 MemoryRegion *mr)
485 mem->flags = kvm_mem_flags(mr);
487 /* If nothing changed effectively, no need to issue ioctl */
488 if (mem->flags == mem->old_flags) {
489 return 0;
492 kvm_slot_init_dirty_bitmap(mem);
493 return kvm_set_user_memory_region(kml, mem, false);
496 static int kvm_section_update_flags(KVMMemoryListener *kml,
497 MemoryRegionSection *section)
499 hwaddr start_addr, size, slot_size;
500 KVMSlot *mem;
501 int ret = 0;
503 size = kvm_align_section(section, &start_addr);
504 if (!size) {
505 return 0;
508 kvm_slots_lock();
510 while (size && !ret) {
511 slot_size = MIN(kvm_max_slot_size, size);
512 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
513 if (!mem) {
514 /* We don't have a slot if we want to trap every access. */
515 goto out;
518 ret = kvm_slot_update_flags(kml, mem, section->mr);
519 start_addr += slot_size;
520 size -= slot_size;
523 out:
524 kvm_slots_unlock();
525 return ret;
528 static void kvm_log_start(MemoryListener *listener,
529 MemoryRegionSection *section,
530 int old, int new)
532 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
533 int r;
535 if (old != 0) {
536 return;
539 r = kvm_section_update_flags(kml, section);
540 if (r < 0) {
541 abort();
545 static void kvm_log_stop(MemoryListener *listener,
546 MemoryRegionSection *section,
547 int old, int new)
549 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
550 int r;
552 if (new != 0) {
553 return;
556 r = kvm_section_update_flags(kml, section);
557 if (r < 0) {
558 abort();
562 /* get kvm's dirty pages bitmap and update qemu's */
563 static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
565 ram_addr_t start = slot->ram_start_offset;
566 ram_addr_t pages = slot->memory_size / qemu_real_host_page_size();
568 cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
571 static void kvm_slot_reset_dirty_pages(KVMSlot *slot)
573 memset(slot->dirty_bmap, 0, slot->dirty_bmap_size);
576 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
578 /* Allocate the dirty bitmap for a slot */
579 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem)
581 if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || mem->dirty_bmap) {
582 return;
586 * XXX bad kernel interface alert
587 * For dirty bitmap, kernel allocates array of size aligned to
588 * bits-per-long. But for case when the kernel is 64bits and
589 * the userspace is 32bits, userspace can't align to the same
590 * bits-per-long, since sizeof(long) is different between kernel
591 * and user space. This way, userspace will provide buffer which
592 * may be 4 bytes less than the kernel will use, resulting in
593 * userspace memory corruption (which is not detectable by valgrind
594 * too, in most cases).
595 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
596 * a hope that sizeof(long) won't become >8 any time soon.
598 * Note: the granule of kvm dirty log is qemu_real_host_page_size.
599 * And mem->memory_size is aligned to it (otherwise this mem can't
600 * be registered to KVM).
602 hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size(),
603 /*HOST_LONG_BITS*/ 64) / 8;
604 mem->dirty_bmap = g_malloc0(bitmap_size);
605 mem->dirty_bmap_size = bitmap_size;
609 * Sync dirty bitmap from kernel to KVMSlot.dirty_bmap, return true if
610 * succeeded, false otherwise
612 static bool kvm_slot_get_dirty_log(KVMState *s, KVMSlot *slot)
614 struct kvm_dirty_log d = {};
615 int ret;
617 d.dirty_bitmap = slot->dirty_bmap;
618 d.slot = slot->slot | (slot->as_id << 16);
619 ret = kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d);
621 if (ret == -ENOENT) {
622 /* kernel does not have dirty bitmap in this slot */
623 ret = 0;
625 if (ret) {
626 error_report_once("%s: KVM_GET_DIRTY_LOG failed with %d",
627 __func__, ret);
629 return ret == 0;
632 /* Should be with all slots_lock held for the address spaces. */
633 static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id,
634 uint32_t slot_id, uint64_t offset)
636 KVMMemoryListener *kml;
637 KVMSlot *mem;
639 if (as_id >= s->nr_as) {
640 return;
643 kml = s->as[as_id].ml;
644 mem = &kml->slots[slot_id];
646 if (!mem->memory_size || offset >=
647 (mem->memory_size / qemu_real_host_page_size())) {
648 return;
651 set_bit(offset, mem->dirty_bmap);
654 static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
657 * Read the flags before the value. Pairs with barrier in
658 * KVM's kvm_dirty_ring_push() function.
660 return qatomic_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY;
663 static void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
666 * Use a store-release so that the CPU that executes KVM_RESET_DIRTY_RINGS
667 * sees the full content of the ring:
669 * CPU0 CPU1 CPU2
670 * ------------------------------------------------------------------------------
671 * fill gfn0
672 * store-rel flags for gfn0
673 * load-acq flags for gfn0
674 * store-rel RESET for gfn0
675 * ioctl(RESET_RINGS)
676 * load-acq flags for gfn0
677 * check if flags have RESET
679 * The synchronization goes from CPU2 to CPU0 to CPU1.
681 qatomic_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET);
685 * Should be with all slots_lock held for the address spaces. It returns the
686 * dirty page we've collected on this dirty ring.
688 static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu)
690 struct kvm_dirty_gfn *dirty_gfns = cpu->kvm_dirty_gfns, *cur;
691 uint32_t ring_size = s->kvm_dirty_ring_size;
692 uint32_t count = 0, fetch = cpu->kvm_fetch_index;
695 * It's possible that we race with vcpu creation code where the vcpu is
696 * put onto the vcpus list but not yet initialized the dirty ring
697 * structures. If so, skip it.
699 if (!cpu->created) {
700 return 0;
703 assert(dirty_gfns && ring_size);
704 trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index);
706 while (true) {
707 cur = &dirty_gfns[fetch % ring_size];
708 if (!dirty_gfn_is_dirtied(cur)) {
709 break;
711 kvm_dirty_ring_mark_page(s, cur->slot >> 16, cur->slot & 0xffff,
712 cur->offset);
713 dirty_gfn_set_collected(cur);
714 trace_kvm_dirty_ring_page(cpu->cpu_index, fetch, cur->offset);
715 fetch++;
716 count++;
718 cpu->kvm_fetch_index = fetch;
719 cpu->dirty_pages += count;
721 return count;
724 /* Must be with slots_lock held */
725 static uint64_t kvm_dirty_ring_reap_locked(KVMState *s, CPUState* cpu)
727 int ret;
728 uint64_t total = 0;
729 int64_t stamp;
731 stamp = get_clock();
733 if (cpu) {
734 total = kvm_dirty_ring_reap_one(s, cpu);
735 } else {
736 CPU_FOREACH(cpu) {
737 total += kvm_dirty_ring_reap_one(s, cpu);
741 if (total) {
742 ret = kvm_vm_ioctl(s, KVM_RESET_DIRTY_RINGS);
743 assert(ret == total);
746 stamp = get_clock() - stamp;
748 if (total) {
749 trace_kvm_dirty_ring_reap(total, stamp / 1000);
752 return total;
756 * Currently for simplicity, we must hold BQL before calling this. We can
757 * consider to drop the BQL if we're clear with all the race conditions.
759 static uint64_t kvm_dirty_ring_reap(KVMState *s, CPUState *cpu)
761 uint64_t total;
764 * We need to lock all kvm slots for all address spaces here,
765 * because:
767 * (1) We need to mark dirty for dirty bitmaps in multiple slots
768 * and for tons of pages, so it's better to take the lock here
769 * once rather than once per page. And more importantly,
771 * (2) We must _NOT_ publish dirty bits to the other threads
772 * (e.g., the migration thread) via the kvm memory slot dirty
773 * bitmaps before correctly re-protect those dirtied pages.
774 * Otherwise we can have potential risk of data corruption if
775 * the page data is read in the other thread before we do
776 * reset below.
778 kvm_slots_lock();
779 total = kvm_dirty_ring_reap_locked(s, cpu);
780 kvm_slots_unlock();
782 return total;
785 static void do_kvm_cpu_synchronize_kick(CPUState *cpu, run_on_cpu_data arg)
787 /* No need to do anything */
791 * Kick all vcpus out in a synchronized way. When returned, we
792 * guarantee that every vcpu has been kicked and at least returned to
793 * userspace once.
795 static void kvm_cpu_synchronize_kick_all(void)
797 CPUState *cpu;
799 CPU_FOREACH(cpu) {
800 run_on_cpu(cpu, do_kvm_cpu_synchronize_kick, RUN_ON_CPU_NULL);
805 * Flush all the existing dirty pages to the KVM slot buffers. When
806 * this call returns, we guarantee that all the touched dirty pages
807 * before calling this function have been put into the per-kvmslot
808 * dirty bitmap.
810 * This function must be called with BQL held.
812 static void kvm_dirty_ring_flush(void)
814 trace_kvm_dirty_ring_flush(0);
816 * The function needs to be serialized. Since this function
817 * should always be with BQL held, serialization is guaranteed.
818 * However, let's be sure of it.
820 assert(qemu_mutex_iothread_locked());
822 * First make sure to flush the hardware buffers by kicking all
823 * vcpus out in a synchronous way.
825 kvm_cpu_synchronize_kick_all();
826 kvm_dirty_ring_reap(kvm_state, NULL);
827 trace_kvm_dirty_ring_flush(1);
831 * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
833 * This function will first try to fetch dirty bitmap from the kernel,
834 * and then updates qemu's dirty bitmap.
836 * NOTE: caller must be with kml->slots_lock held.
838 * @kml: the KVM memory listener object
839 * @section: the memory section to sync the dirty bitmap with
841 static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
842 MemoryRegionSection *section)
844 KVMState *s = kvm_state;
845 KVMSlot *mem;
846 hwaddr start_addr, size;
847 hwaddr slot_size;
849 size = kvm_align_section(section, &start_addr);
850 while (size) {
851 slot_size = MIN(kvm_max_slot_size, size);
852 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
853 if (!mem) {
854 /* We don't have a slot if we want to trap every access. */
855 return;
857 if (kvm_slot_get_dirty_log(s, mem)) {
858 kvm_slot_sync_dirty_pages(mem);
860 start_addr += slot_size;
861 size -= slot_size;
865 /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
866 #define KVM_CLEAR_LOG_SHIFT 6
867 #define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size() << KVM_CLEAR_LOG_SHIFT)
868 #define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
870 static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
871 uint64_t size)
873 KVMState *s = kvm_state;
874 uint64_t end, bmap_start, start_delta, bmap_npages;
875 struct kvm_clear_dirty_log d;
876 unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size();
877 int ret;
880 * We need to extend either the start or the size or both to
881 * satisfy the KVM interface requirement. Firstly, do the start
882 * page alignment on 64 host pages
884 bmap_start = start & KVM_CLEAR_LOG_MASK;
885 start_delta = start - bmap_start;
886 bmap_start /= psize;
889 * The kernel interface has restriction on the size too, that either:
891 * (1) the size is 64 host pages aligned (just like the start), or
892 * (2) the size fills up until the end of the KVM memslot.
894 bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN)
895 << KVM_CLEAR_LOG_SHIFT;
896 end = mem->memory_size / psize;
897 if (bmap_npages > end - bmap_start) {
898 bmap_npages = end - bmap_start;
900 start_delta /= psize;
903 * Prepare the bitmap to clear dirty bits. Here we must guarantee
904 * that we won't clear any unknown dirty bits otherwise we might
905 * accidentally clear some set bits which are not yet synced from
906 * the kernel into QEMU's bitmap, then we'll lose track of the
907 * guest modifications upon those pages (which can directly lead
908 * to guest data loss or panic after migration).
910 * Layout of the KVMSlot.dirty_bmap:
912 * |<-------- bmap_npages -----------..>|
913 * [1]
914 * start_delta size
915 * |----------------|-------------|------------------|------------|
916 * ^ ^ ^ ^
917 * | | | |
918 * start bmap_start (start) end
919 * of memslot of memslot
921 * [1] bmap_npages can be aligned to either 64 pages or the end of slot
924 assert(bmap_start % BITS_PER_LONG == 0);
925 /* We should never do log_clear before log_sync */
926 assert(mem->dirty_bmap);
927 if (start_delta || bmap_npages - size / psize) {
928 /* Slow path - we need to manipulate a temp bitmap */
929 bmap_clear = bitmap_new(bmap_npages);
930 bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap,
931 bmap_start, start_delta + size / psize);
933 * We need to fill the holes at start because that was not
934 * specified by the caller and we extended the bitmap only for
935 * 64 pages alignment
937 bitmap_clear(bmap_clear, 0, start_delta);
938 d.dirty_bitmap = bmap_clear;
939 } else {
941 * Fast path - both start and size align well with BITS_PER_LONG
942 * (or the end of memory slot)
944 d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start);
947 d.first_page = bmap_start;
948 /* It should never overflow. If it happens, say something */
949 assert(bmap_npages <= UINT32_MAX);
950 d.num_pages = bmap_npages;
951 d.slot = mem->slot | (as_id << 16);
953 ret = kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d);
954 if (ret < 0 && ret != -ENOENT) {
955 error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
956 "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d",
957 __func__, d.slot, (uint64_t)d.first_page,
958 (uint32_t)d.num_pages, ret);
959 } else {
960 ret = 0;
961 trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages);
965 * After we have updated the remote dirty bitmap, we update the
966 * cached bitmap as well for the memslot, then if another user
967 * clears the same region we know we shouldn't clear it again on
968 * the remote otherwise it's data loss as well.
970 bitmap_clear(mem->dirty_bmap, bmap_start + start_delta,
971 size / psize);
972 /* This handles the NULL case well */
973 g_free(bmap_clear);
974 return ret;
979 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
981 * NOTE: this will be a no-op if we haven't enabled manual dirty log
982 * protection in the host kernel because in that case this operation
983 * will be done within log_sync().
985 * @kml: the kvm memory listener
986 * @section: the memory range to clear dirty bitmap
988 static int kvm_physical_log_clear(KVMMemoryListener *kml,
989 MemoryRegionSection *section)
991 KVMState *s = kvm_state;
992 uint64_t start, size, offset, count;
993 KVMSlot *mem;
994 int ret = 0, i;
996 if (!s->manual_dirty_log_protect) {
997 /* No need to do explicit clear */
998 return ret;
1001 start = section->offset_within_address_space;
1002 size = int128_get64(section->size);
1004 if (!size) {
1005 /* Nothing more we can do... */
1006 return ret;
1009 kvm_slots_lock();
1011 for (i = 0; i < s->nr_slots; i++) {
1012 mem = &kml->slots[i];
1013 /* Discard slots that are empty or do not overlap the section */
1014 if (!mem->memory_size ||
1015 mem->start_addr > start + size - 1 ||
1016 start > mem->start_addr + mem->memory_size - 1) {
1017 continue;
1020 if (start >= mem->start_addr) {
1021 /* The slot starts before section or is aligned to it. */
1022 offset = start - mem->start_addr;
1023 count = MIN(mem->memory_size - offset, size);
1024 } else {
1025 /* The slot starts after section. */
1026 offset = 0;
1027 count = MIN(mem->memory_size, size - (mem->start_addr - start));
1029 ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count);
1030 if (ret < 0) {
1031 break;
1035 kvm_slots_unlock();
1037 return ret;
1040 static void kvm_coalesce_mmio_region(MemoryListener *listener,
1041 MemoryRegionSection *secion,
1042 hwaddr start, hwaddr size)
1044 KVMState *s = kvm_state;
1046 if (s->coalesced_mmio) {
1047 struct kvm_coalesced_mmio_zone zone;
1049 zone.addr = start;
1050 zone.size = size;
1051 zone.pad = 0;
1053 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
1057 static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
1058 MemoryRegionSection *secion,
1059 hwaddr start, hwaddr size)
1061 KVMState *s = kvm_state;
1063 if (s->coalesced_mmio) {
1064 struct kvm_coalesced_mmio_zone zone;
1066 zone.addr = start;
1067 zone.size = size;
1068 zone.pad = 0;
1070 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
1074 static void kvm_coalesce_pio_add(MemoryListener *listener,
1075 MemoryRegionSection *section,
1076 hwaddr start, hwaddr size)
1078 KVMState *s = kvm_state;
1080 if (s->coalesced_pio) {
1081 struct kvm_coalesced_mmio_zone zone;
1083 zone.addr = start;
1084 zone.size = size;
1085 zone.pio = 1;
1087 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
1091 static void kvm_coalesce_pio_del(MemoryListener *listener,
1092 MemoryRegionSection *section,
1093 hwaddr start, hwaddr size)
1095 KVMState *s = kvm_state;
1097 if (s->coalesced_pio) {
1098 struct kvm_coalesced_mmio_zone zone;
1100 zone.addr = start;
1101 zone.size = size;
1102 zone.pio = 1;
1104 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
1108 int kvm_check_extension(KVMState *s, unsigned int extension)
1110 int ret;
1112 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
1113 if (ret < 0) {
1114 ret = 0;
1117 return ret;
1120 int kvm_vm_check_extension(KVMState *s, unsigned int extension)
1122 int ret;
1124 ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
1125 if (ret < 0) {
1126 /* VM wide version not implemented, use global one instead */
1127 ret = kvm_check_extension(s, extension);
1130 return ret;
1133 typedef struct HWPoisonPage {
1134 ram_addr_t ram_addr;
1135 QLIST_ENTRY(HWPoisonPage) list;
1136 } HWPoisonPage;
1138 static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
1139 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
1141 static void kvm_unpoison_all(void *param)
1143 HWPoisonPage *page, *next_page;
1145 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
1146 QLIST_REMOVE(page, list);
1147 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
1148 g_free(page);
1152 void kvm_hwpoison_page_add(ram_addr_t ram_addr)
1154 HWPoisonPage *page;
1156 QLIST_FOREACH(page, &hwpoison_page_list, list) {
1157 if (page->ram_addr == ram_addr) {
1158 return;
1161 page = g_new(HWPoisonPage, 1);
1162 page->ram_addr = ram_addr;
1163 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
1166 static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
1168 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
1169 /* The kernel expects ioeventfd values in HOST_BIG_ENDIAN
1170 * endianness, but the memory core hands them in target endianness.
1171 * For example, PPC is always treated as big-endian even if running
1172 * on KVM and on PPC64LE. Correct here.
1174 switch (size) {
1175 case 2:
1176 val = bswap16(val);
1177 break;
1178 case 4:
1179 val = bswap32(val);
1180 break;
1182 #endif
1183 return val;
1186 static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
1187 bool assign, uint32_t size, bool datamatch)
1189 int ret;
1190 struct kvm_ioeventfd iofd = {
1191 .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1192 .addr = addr,
1193 .len = size,
1194 .flags = 0,
1195 .fd = fd,
1198 trace_kvm_set_ioeventfd_mmio(fd, (uint64_t)addr, val, assign, size,
1199 datamatch);
1200 if (!kvm_enabled()) {
1201 return -ENOSYS;
1204 if (datamatch) {
1205 iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1207 if (!assign) {
1208 iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1211 ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
1213 if (ret < 0) {
1214 return -errno;
1217 return 0;
1220 static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
1221 bool assign, uint32_t size, bool datamatch)
1223 struct kvm_ioeventfd kick = {
1224 .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1225 .addr = addr,
1226 .flags = KVM_IOEVENTFD_FLAG_PIO,
1227 .len = size,
1228 .fd = fd,
1230 int r;
1231 trace_kvm_set_ioeventfd_pio(fd, addr, val, assign, size, datamatch);
1232 if (!kvm_enabled()) {
1233 return -ENOSYS;
1235 if (datamatch) {
1236 kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1238 if (!assign) {
1239 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1241 r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
1242 if (r < 0) {
1243 return r;
1245 return 0;
1249 static const KVMCapabilityInfo *
1250 kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
1252 while (list->name) {
1253 if (!kvm_check_extension(s, list->value)) {
1254 return list;
1256 list++;
1258 return NULL;
1261 void kvm_set_max_memslot_size(hwaddr max_slot_size)
1263 g_assert(
1264 ROUND_UP(max_slot_size, qemu_real_host_page_size()) == max_slot_size
1266 kvm_max_slot_size = max_slot_size;
1269 /* Called with KVMMemoryListener.slots_lock held */
1270 static void kvm_set_phys_mem(KVMMemoryListener *kml,
1271 MemoryRegionSection *section, bool add)
1273 KVMSlot *mem;
1274 int err;
1275 MemoryRegion *mr = section->mr;
1276 bool writable = !mr->readonly && !mr->rom_device;
1277 hwaddr start_addr, size, slot_size, mr_offset;
1278 ram_addr_t ram_start_offset;
1279 void *ram;
1281 if (!memory_region_is_ram(mr)) {
1282 if (writable || !kvm_readonly_mem_allowed) {
1283 return;
1284 } else if (!mr->romd_mode) {
1285 /* If the memory device is not in romd_mode, then we actually want
1286 * to remove the kvm memory slot so all accesses will trap. */
1287 add = false;
1291 size = kvm_align_section(section, &start_addr);
1292 if (!size) {
1293 return;
1296 /* The offset of the kvmslot within the memory region */
1297 mr_offset = section->offset_within_region + start_addr -
1298 section->offset_within_address_space;
1300 /* use aligned delta to align the ram address and offset */
1301 ram = memory_region_get_ram_ptr(mr) + mr_offset;
1302 ram_start_offset = memory_region_get_ram_addr(mr) + mr_offset;
1304 if (!add) {
1305 do {
1306 slot_size = MIN(kvm_max_slot_size, size);
1307 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
1308 if (!mem) {
1309 return;
1311 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1313 * NOTE: We should be aware of the fact that here we're only
1314 * doing a best effort to sync dirty bits. No matter whether
1315 * we're using dirty log or dirty ring, we ignored two facts:
1317 * (1) dirty bits can reside in hardware buffers (PML)
1319 * (2) after we collected dirty bits here, pages can be dirtied
1320 * again before we do the final KVM_SET_USER_MEMORY_REGION to
1321 * remove the slot.
1323 * Not easy. Let's cross the fingers until it's fixed.
1325 if (kvm_state->kvm_dirty_ring_size) {
1326 kvm_dirty_ring_reap_locked(kvm_state, NULL);
1327 if (kvm_state->kvm_dirty_ring_with_bitmap) {
1328 kvm_slot_sync_dirty_pages(mem);
1329 kvm_slot_get_dirty_log(kvm_state, mem);
1331 } else {
1332 kvm_slot_get_dirty_log(kvm_state, mem);
1334 kvm_slot_sync_dirty_pages(mem);
1337 /* unregister the slot */
1338 g_free(mem->dirty_bmap);
1339 mem->dirty_bmap = NULL;
1340 mem->memory_size = 0;
1341 mem->flags = 0;
1342 err = kvm_set_user_memory_region(kml, mem, false);
1343 if (err) {
1344 fprintf(stderr, "%s: error unregistering slot: %s\n",
1345 __func__, strerror(-err));
1346 abort();
1348 start_addr += slot_size;
1349 size -= slot_size;
1350 kml->nr_used_slots--;
1351 } while (size);
1352 return;
1355 /* register the new slot */
1356 do {
1357 slot_size = MIN(kvm_max_slot_size, size);
1358 mem = kvm_alloc_slot(kml);
1359 mem->as_id = kml->as_id;
1360 mem->memory_size = slot_size;
1361 mem->start_addr = start_addr;
1362 mem->ram_start_offset = ram_start_offset;
1363 mem->ram = ram;
1364 mem->flags = kvm_mem_flags(mr);
1365 kvm_slot_init_dirty_bitmap(mem);
1366 err = kvm_set_user_memory_region(kml, mem, true);
1367 if (err) {
1368 fprintf(stderr, "%s: error registering slot: %s\n", __func__,
1369 strerror(-err));
1370 abort();
1372 start_addr += slot_size;
1373 ram_start_offset += slot_size;
1374 ram += slot_size;
1375 size -= slot_size;
1376 kml->nr_used_slots++;
1377 } while (size);
1380 static void *kvm_dirty_ring_reaper_thread(void *data)
1382 KVMState *s = data;
1383 struct KVMDirtyRingReaper *r = &s->reaper;
1385 rcu_register_thread();
1387 trace_kvm_dirty_ring_reaper("init");
1389 while (true) {
1390 r->reaper_state = KVM_DIRTY_RING_REAPER_WAIT;
1391 trace_kvm_dirty_ring_reaper("wait");
1393 * TODO: provide a smarter timeout rather than a constant?
1395 sleep(1);
1397 /* keep sleeping so that dirtylimit not be interfered by reaper */
1398 if (dirtylimit_in_service()) {
1399 continue;
1402 trace_kvm_dirty_ring_reaper("wakeup");
1403 r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;
1405 qemu_mutex_lock_iothread();
1406 kvm_dirty_ring_reap(s, NULL);
1407 qemu_mutex_unlock_iothread();
1409 r->reaper_iteration++;
1412 trace_kvm_dirty_ring_reaper("exit");
1414 rcu_unregister_thread();
1416 return NULL;
1419 static void kvm_dirty_ring_reaper_init(KVMState *s)
1421 struct KVMDirtyRingReaper *r = &s->reaper;
1423 qemu_thread_create(&r->reaper_thr, "kvm-reaper",
1424 kvm_dirty_ring_reaper_thread,
1425 s, QEMU_THREAD_JOINABLE);
1428 static int kvm_dirty_ring_init(KVMState *s)
1430 uint32_t ring_size = s->kvm_dirty_ring_size;
1431 uint64_t ring_bytes = ring_size * sizeof(struct kvm_dirty_gfn);
1432 unsigned int capability = KVM_CAP_DIRTY_LOG_RING;
1433 int ret;
1435 s->kvm_dirty_ring_size = 0;
1436 s->kvm_dirty_ring_bytes = 0;
1438 /* Bail if the dirty ring size isn't specified */
1439 if (!ring_size) {
1440 return 0;
1444 * Read the max supported pages. Fall back to dirty logging mode
1445 * if the dirty ring isn't supported.
1447 ret = kvm_vm_check_extension(s, capability);
1448 if (ret <= 0) {
1449 capability = KVM_CAP_DIRTY_LOG_RING_ACQ_REL;
1450 ret = kvm_vm_check_extension(s, capability);
1453 if (ret <= 0) {
1454 warn_report("KVM dirty ring not available, using bitmap method");
1455 return 0;
1458 if (ring_bytes > ret) {
1459 error_report("KVM dirty ring size %" PRIu32 " too big "
1460 "(maximum is %ld). Please use a smaller value.",
1461 ring_size, (long)ret / sizeof(struct kvm_dirty_gfn));
1462 return -EINVAL;
1465 ret = kvm_vm_enable_cap(s, capability, 0, ring_bytes);
1466 if (ret) {
1467 error_report("Enabling of KVM dirty ring failed: %s. "
1468 "Suggested minimum value is 1024.", strerror(-ret));
1469 return -EIO;
1472 /* Enable the backup bitmap if it is supported */
1473 ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP);
1474 if (ret > 0) {
1475 ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP, 0);
1476 if (ret) {
1477 error_report("Enabling of KVM dirty ring's backup bitmap failed: "
1478 "%s. ", strerror(-ret));
1479 return -EIO;
1482 s->kvm_dirty_ring_with_bitmap = true;
1485 s->kvm_dirty_ring_size = ring_size;
1486 s->kvm_dirty_ring_bytes = ring_bytes;
1488 return 0;
1491 static void kvm_region_add(MemoryListener *listener,
1492 MemoryRegionSection *section)
1494 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1495 KVMMemoryUpdate *update;
1497 update = g_new0(KVMMemoryUpdate, 1);
1498 update->section = *section;
1500 QSIMPLEQ_INSERT_TAIL(&kml->transaction_add, update, next);
1503 static void kvm_region_del(MemoryListener *listener,
1504 MemoryRegionSection *section)
1506 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1507 KVMMemoryUpdate *update;
1509 update = g_new0(KVMMemoryUpdate, 1);
1510 update->section = *section;
1512 QSIMPLEQ_INSERT_TAIL(&kml->transaction_del, update, next);
1515 static void kvm_region_commit(MemoryListener *listener)
1517 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener,
1518 listener);
1519 KVMMemoryUpdate *u1, *u2;
1520 bool need_inhibit = false;
1522 if (QSIMPLEQ_EMPTY(&kml->transaction_add) &&
1523 QSIMPLEQ_EMPTY(&kml->transaction_del)) {
1524 return;
1528 * We have to be careful when regions to add overlap with ranges to remove.
1529 * We have to simulate atomic KVM memslot updates by making sure no ioctl()
1530 * is currently active.
1532 * The lists are order by addresses, so it's easy to find overlaps.
1534 u1 = QSIMPLEQ_FIRST(&kml->transaction_del);
1535 u2 = QSIMPLEQ_FIRST(&kml->transaction_add);
1536 while (u1 && u2) {
1537 Range r1, r2;
1539 range_init_nofail(&r1, u1->section.offset_within_address_space,
1540 int128_get64(u1->section.size));
1541 range_init_nofail(&r2, u2->section.offset_within_address_space,
1542 int128_get64(u2->section.size));
1544 if (range_overlaps_range(&r1, &r2)) {
1545 need_inhibit = true;
1546 break;
1548 if (range_lob(&r1) < range_lob(&r2)) {
1549 u1 = QSIMPLEQ_NEXT(u1, next);
1550 } else {
1551 u2 = QSIMPLEQ_NEXT(u2, next);
1555 kvm_slots_lock();
1556 if (need_inhibit) {
1557 accel_ioctl_inhibit_begin();
1560 /* Remove all memslots before adding the new ones. */
1561 while (!QSIMPLEQ_EMPTY(&kml->transaction_del)) {
1562 u1 = QSIMPLEQ_FIRST(&kml->transaction_del);
1563 QSIMPLEQ_REMOVE_HEAD(&kml->transaction_del, next);
1565 kvm_set_phys_mem(kml, &u1->section, false);
1566 memory_region_unref(u1->section.mr);
1568 g_free(u1);
1570 while (!QSIMPLEQ_EMPTY(&kml->transaction_add)) {
1571 u1 = QSIMPLEQ_FIRST(&kml->transaction_add);
1572 QSIMPLEQ_REMOVE_HEAD(&kml->transaction_add, next);
1574 memory_region_ref(u1->section.mr);
1575 kvm_set_phys_mem(kml, &u1->section, true);
1577 g_free(u1);
1580 if (need_inhibit) {
1581 accel_ioctl_inhibit_end();
1583 kvm_slots_unlock();
1586 static void kvm_log_sync(MemoryListener *listener,
1587 MemoryRegionSection *section)
1589 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1591 kvm_slots_lock();
1592 kvm_physical_sync_dirty_bitmap(kml, section);
1593 kvm_slots_unlock();
1596 static void kvm_log_sync_global(MemoryListener *l, bool last_stage)
1598 KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener);
1599 KVMState *s = kvm_state;
1600 KVMSlot *mem;
1601 int i;
1603 /* Flush all kernel dirty addresses into KVMSlot dirty bitmap */
1604 kvm_dirty_ring_flush();
1607 * TODO: make this faster when nr_slots is big while there are
1608 * only a few used slots (small VMs).
1610 kvm_slots_lock();
1611 for (i = 0; i < s->nr_slots; i++) {
1612 mem = &kml->slots[i];
1613 if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1614 kvm_slot_sync_dirty_pages(mem);
1616 if (s->kvm_dirty_ring_with_bitmap && last_stage &&
1617 kvm_slot_get_dirty_log(s, mem)) {
1618 kvm_slot_sync_dirty_pages(mem);
1622 * This is not needed by KVM_GET_DIRTY_LOG because the
1623 * ioctl will unconditionally overwrite the whole region.
1624 * However kvm dirty ring has no such side effect.
1626 kvm_slot_reset_dirty_pages(mem);
1629 kvm_slots_unlock();
1632 static void kvm_log_clear(MemoryListener *listener,
1633 MemoryRegionSection *section)
1635 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1636 int r;
1638 r = kvm_physical_log_clear(kml, section);
1639 if (r < 0) {
1640 error_report_once("%s: kvm log clear failed: mr=%s "
1641 "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__,
1642 section->mr->name, section->offset_within_region,
1643 int128_get64(section->size));
1644 abort();
1648 static void kvm_mem_ioeventfd_add(MemoryListener *listener,
1649 MemoryRegionSection *section,
1650 bool match_data, uint64_t data,
1651 EventNotifier *e)
1653 int fd = event_notifier_get_fd(e);
1654 int r;
1656 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1657 data, true, int128_get64(section->size),
1658 match_data);
1659 if (r < 0) {
1660 fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1661 __func__, strerror(-r), -r);
1662 abort();
1666 static void kvm_mem_ioeventfd_del(MemoryListener *listener,
1667 MemoryRegionSection *section,
1668 bool match_data, uint64_t data,
1669 EventNotifier *e)
1671 int fd = event_notifier_get_fd(e);
1672 int r;
1674 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1675 data, false, int128_get64(section->size),
1676 match_data);
1677 if (r < 0) {
1678 fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1679 __func__, strerror(-r), -r);
1680 abort();
1684 static void kvm_io_ioeventfd_add(MemoryListener *listener,
1685 MemoryRegionSection *section,
1686 bool match_data, uint64_t data,
1687 EventNotifier *e)
1689 int fd = event_notifier_get_fd(e);
1690 int r;
1692 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1693 data, true, int128_get64(section->size),
1694 match_data);
1695 if (r < 0) {
1696 fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1697 __func__, strerror(-r), -r);
1698 abort();
1702 static void kvm_io_ioeventfd_del(MemoryListener *listener,
1703 MemoryRegionSection *section,
1704 bool match_data, uint64_t data,
1705 EventNotifier *e)
1708 int fd = event_notifier_get_fd(e);
1709 int r;
1711 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1712 data, false, int128_get64(section->size),
1713 match_data);
1714 if (r < 0) {
1715 fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1716 __func__, strerror(-r), -r);
1717 abort();
1721 void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
1722 AddressSpace *as, int as_id, const char *name)
1724 int i;
1726 kml->slots = g_new0(KVMSlot, s->nr_slots);
1727 kml->as_id = as_id;
1729 for (i = 0; i < s->nr_slots; i++) {
1730 kml->slots[i].slot = i;
1733 QSIMPLEQ_INIT(&kml->transaction_add);
1734 QSIMPLEQ_INIT(&kml->transaction_del);
1736 kml->listener.region_add = kvm_region_add;
1737 kml->listener.region_del = kvm_region_del;
1738 kml->listener.commit = kvm_region_commit;
1739 kml->listener.log_start = kvm_log_start;
1740 kml->listener.log_stop = kvm_log_stop;
1741 kml->listener.priority = MEMORY_LISTENER_PRIORITY_ACCEL;
1742 kml->listener.name = name;
1744 if (s->kvm_dirty_ring_size) {
1745 kml->listener.log_sync_global = kvm_log_sync_global;
1746 } else {
1747 kml->listener.log_sync = kvm_log_sync;
1748 kml->listener.log_clear = kvm_log_clear;
1751 memory_listener_register(&kml->listener, as);
1753 for (i = 0; i < s->nr_as; ++i) {
1754 if (!s->as[i].as) {
1755 s->as[i].as = as;
1756 s->as[i].ml = kml;
1757 break;
1762 static MemoryListener kvm_io_listener = {
1763 .name = "kvm-io",
1764 .coalesced_io_add = kvm_coalesce_pio_add,
1765 .coalesced_io_del = kvm_coalesce_pio_del,
1766 .eventfd_add = kvm_io_ioeventfd_add,
1767 .eventfd_del = kvm_io_ioeventfd_del,
1768 .priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND,
1771 int kvm_set_irq(KVMState *s, int irq, int level)
1773 struct kvm_irq_level event;
1774 int ret;
1776 assert(kvm_async_interrupts_enabled());
1778 event.level = level;
1779 event.irq = irq;
1780 ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
1781 if (ret < 0) {
1782 perror("kvm_set_irq");
1783 abort();
1786 return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
1789 #ifdef KVM_CAP_IRQ_ROUTING
1790 typedef struct KVMMSIRoute {
1791 struct kvm_irq_routing_entry kroute;
1792 QTAILQ_ENTRY(KVMMSIRoute) entry;
1793 } KVMMSIRoute;
1795 static void set_gsi(KVMState *s, unsigned int gsi)
1797 set_bit(gsi, s->used_gsi_bitmap);
1800 static void clear_gsi(KVMState *s, unsigned int gsi)
1802 clear_bit(gsi, s->used_gsi_bitmap);
1805 void kvm_init_irq_routing(KVMState *s)
1807 int gsi_count;
1809 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
1810 if (gsi_count > 0) {
1811 /* Round up so we can search ints using ffs */
1812 s->used_gsi_bitmap = bitmap_new(gsi_count);
1813 s->gsi_count = gsi_count;
1816 s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
1817 s->nr_allocated_irq_routes = 0;
1819 kvm_arch_init_irq_routing(s);
1822 void kvm_irqchip_commit_routes(KVMState *s)
1824 int ret;
1826 if (kvm_gsi_direct_mapping()) {
1827 return;
1830 if (!kvm_gsi_routing_enabled()) {
1831 return;
1834 s->irq_routes->flags = 0;
1835 trace_kvm_irqchip_commit_routes();
1836 ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
1837 assert(ret == 0);
1840 static void kvm_add_routing_entry(KVMState *s,
1841 struct kvm_irq_routing_entry *entry)
1843 struct kvm_irq_routing_entry *new;
1844 int n, size;
1846 if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
1847 n = s->nr_allocated_irq_routes * 2;
1848 if (n < 64) {
1849 n = 64;
1851 size = sizeof(struct kvm_irq_routing);
1852 size += n * sizeof(*new);
1853 s->irq_routes = g_realloc(s->irq_routes, size);
1854 s->nr_allocated_irq_routes = n;
1856 n = s->irq_routes->nr++;
1857 new = &s->irq_routes->entries[n];
1859 *new = *entry;
1861 set_gsi(s, entry->gsi);
1864 static int kvm_update_routing_entry(KVMState *s,
1865 struct kvm_irq_routing_entry *new_entry)
1867 struct kvm_irq_routing_entry *entry;
1868 int n;
1870 for (n = 0; n < s->irq_routes->nr; n++) {
1871 entry = &s->irq_routes->entries[n];
1872 if (entry->gsi != new_entry->gsi) {
1873 continue;
1876 if(!memcmp(entry, new_entry, sizeof *entry)) {
1877 return 0;
1880 *entry = *new_entry;
1882 return 0;
1885 return -ESRCH;
1888 void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
1890 struct kvm_irq_routing_entry e = {};
1892 assert(pin < s->gsi_count);
1894 e.gsi = irq;
1895 e.type = KVM_IRQ_ROUTING_IRQCHIP;
1896 e.flags = 0;
1897 e.u.irqchip.irqchip = irqchip;
1898 e.u.irqchip.pin = pin;
1899 kvm_add_routing_entry(s, &e);
1902 void kvm_irqchip_release_virq(KVMState *s, int virq)
1904 struct kvm_irq_routing_entry *e;
1905 int i;
1907 if (kvm_gsi_direct_mapping()) {
1908 return;
1911 for (i = 0; i < s->irq_routes->nr; i++) {
1912 e = &s->irq_routes->entries[i];
1913 if (e->gsi == virq) {
1914 s->irq_routes->nr--;
1915 *e = s->irq_routes->entries[s->irq_routes->nr];
1918 clear_gsi(s, virq);
1919 kvm_arch_release_virq_post(virq);
1920 trace_kvm_irqchip_release_virq(virq);
1923 void kvm_irqchip_add_change_notifier(Notifier *n)
1925 notifier_list_add(&kvm_irqchip_change_notifiers, n);
1928 void kvm_irqchip_remove_change_notifier(Notifier *n)
1930 notifier_remove(n);
1933 void kvm_irqchip_change_notify(void)
1935 notifier_list_notify(&kvm_irqchip_change_notifiers, NULL);
1938 static int kvm_irqchip_get_virq(KVMState *s)
1940 int next_virq;
1942 /* Return the lowest unused GSI in the bitmap */
1943 next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
1944 if (next_virq >= s->gsi_count) {
1945 return -ENOSPC;
1946 } else {
1947 return next_virq;
1951 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1953 struct kvm_msi msi;
1955 msi.address_lo = (uint32_t)msg.address;
1956 msi.address_hi = msg.address >> 32;
1957 msi.data = le32_to_cpu(msg.data);
1958 msi.flags = 0;
1959 memset(msi.pad, 0, sizeof(msi.pad));
1961 return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
1964 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
1966 struct kvm_irq_routing_entry kroute = {};
1967 int virq;
1968 KVMState *s = c->s;
1969 MSIMessage msg = {0, 0};
1971 if (pci_available && dev) {
1972 msg = pci_get_msi_message(dev, vector);
1975 if (kvm_gsi_direct_mapping()) {
1976 return kvm_arch_msi_data_to_gsi(msg.data);
1979 if (!kvm_gsi_routing_enabled()) {
1980 return -ENOSYS;
1983 virq = kvm_irqchip_get_virq(s);
1984 if (virq < 0) {
1985 return virq;
1988 kroute.gsi = virq;
1989 kroute.type = KVM_IRQ_ROUTING_MSI;
1990 kroute.flags = 0;
1991 kroute.u.msi.address_lo = (uint32_t)msg.address;
1992 kroute.u.msi.address_hi = msg.address >> 32;
1993 kroute.u.msi.data = le32_to_cpu(msg.data);
1994 if (pci_available && kvm_msi_devid_required()) {
1995 kroute.flags = KVM_MSI_VALID_DEVID;
1996 kroute.u.msi.devid = pci_requester_id(dev);
1998 if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
1999 kvm_irqchip_release_virq(s, virq);
2000 return -EINVAL;
2003 trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A",
2004 vector, virq);
2006 kvm_add_routing_entry(s, &kroute);
2007 kvm_arch_add_msi_route_post(&kroute, vector, dev);
2008 c->changes++;
2010 return virq;
2013 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
2014 PCIDevice *dev)
2016 struct kvm_irq_routing_entry kroute = {};
2018 if (kvm_gsi_direct_mapping()) {
2019 return 0;
2022 if (!kvm_irqchip_in_kernel()) {
2023 return -ENOSYS;
2026 kroute.gsi = virq;
2027 kroute.type = KVM_IRQ_ROUTING_MSI;
2028 kroute.flags = 0;
2029 kroute.u.msi.address_lo = (uint32_t)msg.address;
2030 kroute.u.msi.address_hi = msg.address >> 32;
2031 kroute.u.msi.data = le32_to_cpu(msg.data);
2032 if (pci_available && kvm_msi_devid_required()) {
2033 kroute.flags = KVM_MSI_VALID_DEVID;
2034 kroute.u.msi.devid = pci_requester_id(dev);
2036 if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
2037 return -EINVAL;
2040 trace_kvm_irqchip_update_msi_route(virq);
2042 return kvm_update_routing_entry(s, &kroute);
2045 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
2046 EventNotifier *resample, int virq,
2047 bool assign)
2049 int fd = event_notifier_get_fd(event);
2050 int rfd = resample ? event_notifier_get_fd(resample) : -1;
2052 struct kvm_irqfd irqfd = {
2053 .fd = fd,
2054 .gsi = virq,
2055 .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
2058 if (rfd != -1) {
2059 assert(assign);
2060 if (kvm_irqchip_is_split()) {
2062 * When the slow irqchip (e.g. IOAPIC) is in the
2063 * userspace, KVM kernel resamplefd will not work because
2064 * the EOI of the interrupt will be delivered to userspace
2065 * instead, so the KVM kernel resamplefd kick will be
2066 * skipped. The userspace here mimics what the kernel
2067 * provides with resamplefd, remember the resamplefd and
2068 * kick it when we receive EOI of this IRQ.
2070 * This is hackery because IOAPIC is mostly bypassed
2071 * (except EOI broadcasts) when irqfd is used. However
2072 * this can bring much performance back for split irqchip
2073 * with INTx IRQs (for VFIO, this gives 93% perf of the
2074 * full fast path, which is 46% perf boost comparing to
2075 * the INTx slow path).
2077 kvm_resample_fd_insert(virq, resample);
2078 } else {
2079 irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
2080 irqfd.resamplefd = rfd;
2082 } else if (!assign) {
2083 if (kvm_irqchip_is_split()) {
2084 kvm_resample_fd_remove(virq);
2088 return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
2091 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
2093 struct kvm_irq_routing_entry kroute = {};
2094 int virq;
2096 if (!kvm_gsi_routing_enabled()) {
2097 return -ENOSYS;
2100 virq = kvm_irqchip_get_virq(s);
2101 if (virq < 0) {
2102 return virq;
2105 kroute.gsi = virq;
2106 kroute.type = KVM_IRQ_ROUTING_S390_ADAPTER;
2107 kroute.flags = 0;
2108 kroute.u.adapter.summary_addr = adapter->summary_addr;
2109 kroute.u.adapter.ind_addr = adapter->ind_addr;
2110 kroute.u.adapter.summary_offset = adapter->summary_offset;
2111 kroute.u.adapter.ind_offset = adapter->ind_offset;
2112 kroute.u.adapter.adapter_id = adapter->adapter_id;
2114 kvm_add_routing_entry(s, &kroute);
2116 return virq;
2119 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
2121 struct kvm_irq_routing_entry kroute = {};
2122 int virq;
2124 if (!kvm_gsi_routing_enabled()) {
2125 return -ENOSYS;
2127 if (!kvm_check_extension(s, KVM_CAP_HYPERV_SYNIC)) {
2128 return -ENOSYS;
2130 virq = kvm_irqchip_get_virq(s);
2131 if (virq < 0) {
2132 return virq;
2135 kroute.gsi = virq;
2136 kroute.type = KVM_IRQ_ROUTING_HV_SINT;
2137 kroute.flags = 0;
2138 kroute.u.hv_sint.vcpu = vcpu;
2139 kroute.u.hv_sint.sint = sint;
2141 kvm_add_routing_entry(s, &kroute);
2142 kvm_irqchip_commit_routes(s);
2144 return virq;
2147 #else /* !KVM_CAP_IRQ_ROUTING */
2149 void kvm_init_irq_routing(KVMState *s)
2153 void kvm_irqchip_release_virq(KVMState *s, int virq)
2157 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
2159 abort();
2162 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
2164 return -ENOSYS;
2167 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
2169 return -ENOSYS;
2172 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
2174 return -ENOSYS;
2177 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
2178 EventNotifier *resample, int virq,
2179 bool assign)
2181 abort();
2184 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
2186 return -ENOSYS;
2188 #endif /* !KVM_CAP_IRQ_ROUTING */
2190 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
2191 EventNotifier *rn, int virq)
2193 return kvm_irqchip_assign_irqfd(s, n, rn, virq, true);
2196 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
2197 int virq)
2199 return kvm_irqchip_assign_irqfd(s, n, NULL, virq, false);
2202 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
2203 EventNotifier *rn, qemu_irq irq)
2205 gpointer key, gsi;
2206 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
2208 if (!found) {
2209 return -ENXIO;
2211 return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi));
2214 int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
2215 qemu_irq irq)
2217 gpointer key, gsi;
2218 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
2220 if (!found) {
2221 return -ENXIO;
2223 return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi));
2226 void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
2228 g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
2231 static void kvm_irqchip_create(KVMState *s)
2233 int ret;
2235 assert(s->kernel_irqchip_split != ON_OFF_AUTO_AUTO);
2236 if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
2238 } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
2239 ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
2240 if (ret < 0) {
2241 fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
2242 exit(1);
2244 } else {
2245 return;
2248 if (kvm_check_extension(s, KVM_CAP_IRQFD) <= 0) {
2249 fprintf(stderr, "kvm: irqfd not implemented\n");
2250 exit(1);
2253 /* First probe and see if there's a arch-specific hook to create the
2254 * in-kernel irqchip for us */
2255 ret = kvm_arch_irqchip_create(s);
2256 if (ret == 0) {
2257 if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) {
2258 error_report("Split IRQ chip mode not supported.");
2259 exit(1);
2260 } else {
2261 ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
2264 if (ret < 0) {
2265 fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
2266 exit(1);
2269 kvm_kernel_irqchip = true;
2270 /* If we have an in-kernel IRQ chip then we must have asynchronous
2271 * interrupt delivery (though the reverse is not necessarily true)
2273 kvm_async_interrupts_allowed = true;
2274 kvm_halt_in_kernel_allowed = true;
2276 kvm_init_irq_routing(s);
2278 s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
2281 /* Find number of supported CPUs using the recommended
2282 * procedure from the kernel API documentation to cope with
2283 * older kernels that may be missing capabilities.
2285 static int kvm_recommended_vcpus(KVMState *s)
2287 int ret = kvm_vm_check_extension(s, KVM_CAP_NR_VCPUS);
2288 return (ret) ? ret : 4;
2291 static int kvm_max_vcpus(KVMState *s)
2293 int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
2294 return (ret) ? ret : kvm_recommended_vcpus(s);
2297 static int kvm_max_vcpu_id(KVMState *s)
2299 int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID);
2300 return (ret) ? ret : kvm_max_vcpus(s);
2303 bool kvm_vcpu_id_is_valid(int vcpu_id)
2305 KVMState *s = KVM_STATE(current_accel());
2306 return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s);
2309 bool kvm_dirty_ring_enabled(void)
2311 return kvm_state->kvm_dirty_ring_size ? true : false;
2314 static void query_stats_cb(StatsResultList **result, StatsTarget target,
2315 strList *names, strList *targets, Error **errp);
2316 static void query_stats_schemas_cb(StatsSchemaList **result, Error **errp);
2318 uint32_t kvm_dirty_ring_size(void)
2320 return kvm_state->kvm_dirty_ring_size;
2323 static int kvm_init(MachineState *ms)
2325 MachineClass *mc = MACHINE_GET_CLASS(ms);
2326 static const char upgrade_note[] =
2327 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
2328 "(see http://sourceforge.net/projects/kvm).\n";
2329 const struct {
2330 const char *name;
2331 int num;
2332 } num_cpus[] = {
2333 { "SMP", ms->smp.cpus },
2334 { "hotpluggable", ms->smp.max_cpus },
2335 { /* end of list */ }
2336 }, *nc = num_cpus;
2337 int soft_vcpus_limit, hard_vcpus_limit;
2338 KVMState *s;
2339 const KVMCapabilityInfo *missing_cap;
2340 int ret;
2341 int type;
2342 uint64_t dirty_log_manual_caps;
2344 qemu_mutex_init(&kml_slots_lock);
2346 s = KVM_STATE(ms->accelerator);
2349 * On systems where the kernel can support different base page
2350 * sizes, host page size may be different from TARGET_PAGE_SIZE,
2351 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
2352 * page size for the system though.
2354 assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size());
2356 s->sigmask_len = 8;
2357 accel_blocker_init();
2359 #ifdef KVM_CAP_SET_GUEST_DEBUG
2360 QTAILQ_INIT(&s->kvm_sw_breakpoints);
2361 #endif
2362 QLIST_INIT(&s->kvm_parked_vcpus);
2363 s->fd = qemu_open_old("/dev/kvm", O_RDWR);
2364 if (s->fd == -1) {
2365 fprintf(stderr, "Could not access KVM kernel module: %m\n");
2366 ret = -errno;
2367 goto err;
2370 ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
2371 if (ret < KVM_API_VERSION) {
2372 if (ret >= 0) {
2373 ret = -EINVAL;
2375 fprintf(stderr, "kvm version too old\n");
2376 goto err;
2379 if (ret > KVM_API_VERSION) {
2380 ret = -EINVAL;
2381 fprintf(stderr, "kvm version not supported\n");
2382 goto err;
2385 kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT);
2386 s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
2388 /* If unspecified, use the default value */
2389 if (!s->nr_slots) {
2390 s->nr_slots = 32;
2393 s->nr_as = kvm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE);
2394 if (s->nr_as <= 1) {
2395 s->nr_as = 1;
2397 s->as = g_new0(struct KVMAs, s->nr_as);
2399 if (object_property_find(OBJECT(current_machine), "kvm-type")) {
2400 g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine),
2401 "kvm-type",
2402 &error_abort);
2403 type = mc->kvm_type(ms, kvm_type);
2404 } else if (mc->kvm_type) {
2405 type = mc->kvm_type(ms, NULL);
2406 } else {
2407 type = kvm_arch_get_default_type(ms);
2410 if (type < 0) {
2411 ret = -EINVAL;
2412 goto err;
2415 do {
2416 ret = kvm_ioctl(s, KVM_CREATE_VM, type);
2417 } while (ret == -EINTR);
2419 if (ret < 0) {
2420 fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret,
2421 strerror(-ret));
2423 #ifdef TARGET_S390X
2424 if (ret == -EINVAL) {
2425 fprintf(stderr,
2426 "Host kernel setup problem detected. Please verify:\n");
2427 fprintf(stderr, "- for kernels supporting the switch_amode or"
2428 " user_mode parameters, whether\n");
2429 fprintf(stderr,
2430 " user space is running in primary address space\n");
2431 fprintf(stderr,
2432 "- for kernels supporting the vm.allocate_pgste sysctl, "
2433 "whether it is enabled\n");
2435 #elif defined(TARGET_PPC)
2436 if (ret == -EINVAL) {
2437 fprintf(stderr,
2438 "PPC KVM module is not loaded. Try modprobe kvm_%s.\n",
2439 (type == 2) ? "pr" : "hv");
2441 #endif
2442 goto err;
2445 s->vmfd = ret;
2447 /* check the vcpu limits */
2448 soft_vcpus_limit = kvm_recommended_vcpus(s);
2449 hard_vcpus_limit = kvm_max_vcpus(s);
2451 while (nc->name) {
2452 if (nc->num > soft_vcpus_limit) {
2453 warn_report("Number of %s cpus requested (%d) exceeds "
2454 "the recommended cpus supported by KVM (%d)",
2455 nc->name, nc->num, soft_vcpus_limit);
2457 if (nc->num > hard_vcpus_limit) {
2458 fprintf(stderr, "Number of %s cpus requested (%d) exceeds "
2459 "the maximum cpus supported by KVM (%d)\n",
2460 nc->name, nc->num, hard_vcpus_limit);
2461 exit(1);
2464 nc++;
2467 missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
2468 if (!missing_cap) {
2469 missing_cap =
2470 kvm_check_extension_list(s, kvm_arch_required_capabilities);
2472 if (missing_cap) {
2473 ret = -EINVAL;
2474 fprintf(stderr, "kvm does not support %s\n%s",
2475 missing_cap->name, upgrade_note);
2476 goto err;
2479 s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
2480 s->coalesced_pio = s->coalesced_mmio &&
2481 kvm_check_extension(s, KVM_CAP_COALESCED_PIO);
2484 * Enable KVM dirty ring if supported, otherwise fall back to
2485 * dirty logging mode
2487 ret = kvm_dirty_ring_init(s);
2488 if (ret < 0) {
2489 goto err;
2493 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is
2494 * enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no
2495 * page is wr-protected initially, which is against how kvm dirty ring is
2496 * usage - kvm dirty ring requires all pages are wr-protected at the very
2497 * beginning. Enabling this feature for dirty ring causes data corruption.
2499 * TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log,
2500 * we may expect a higher stall time when starting the migration. In the
2501 * future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too:
2502 * instead of clearing dirty bit, it can be a way to explicitly wr-protect
2503 * guest pages.
2505 if (!s->kvm_dirty_ring_size) {
2506 dirty_log_manual_caps =
2507 kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
2508 dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
2509 KVM_DIRTY_LOG_INITIALLY_SET);
2510 s->manual_dirty_log_protect = dirty_log_manual_caps;
2511 if (dirty_log_manual_caps) {
2512 ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0,
2513 dirty_log_manual_caps);
2514 if (ret) {
2515 warn_report("Trying to enable capability %"PRIu64" of "
2516 "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. "
2517 "Falling back to the legacy mode. ",
2518 dirty_log_manual_caps);
2519 s->manual_dirty_log_protect = 0;
2524 #ifdef KVM_CAP_VCPU_EVENTS
2525 s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
2526 #endif
2527 s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE);
2529 s->irq_set_ioctl = KVM_IRQ_LINE;
2530 if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
2531 s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
2534 kvm_readonly_mem_allowed =
2535 (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
2537 kvm_resamplefds_allowed =
2538 (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
2540 kvm_vm_attributes_allowed =
2541 (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
2543 #ifdef KVM_CAP_SET_GUEST_DEBUG
2544 kvm_has_guest_debug =
2545 (kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG) > 0);
2546 #endif
2548 kvm_sstep_flags = 0;
2549 if (kvm_has_guest_debug) {
2550 kvm_sstep_flags = SSTEP_ENABLE;
2552 #if defined KVM_CAP_SET_GUEST_DEBUG2
2553 int guest_debug_flags =
2554 kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG2);
2556 if (guest_debug_flags & KVM_GUESTDBG_BLOCKIRQ) {
2557 kvm_sstep_flags |= SSTEP_NOIRQ;
2559 #endif
2562 kvm_state = s;
2564 ret = kvm_arch_init(ms, s);
2565 if (ret < 0) {
2566 goto err;
2569 if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) {
2570 s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
2573 qemu_register_reset(kvm_unpoison_all, NULL);
2575 if (s->kernel_irqchip_allowed) {
2576 kvm_irqchip_create(s);
2579 s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
2580 s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
2581 s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
2582 s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
2584 kvm_memory_listener_register(s, &s->memory_listener,
2585 &address_space_memory, 0, "kvm-memory");
2586 memory_listener_register(&kvm_io_listener,
2587 &address_space_io);
2589 s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
2590 if (!s->sync_mmu) {
2591 ret = ram_block_discard_disable(true);
2592 assert(!ret);
2595 if (s->kvm_dirty_ring_size) {
2596 kvm_dirty_ring_reaper_init(s);
2599 if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) {
2600 add_stats_callbacks(STATS_PROVIDER_KVM, query_stats_cb,
2601 query_stats_schemas_cb);
2604 return 0;
2606 err:
2607 assert(ret < 0);
2608 if (s->vmfd >= 0) {
2609 close(s->vmfd);
2611 if (s->fd != -1) {
2612 close(s->fd);
2614 g_free(s->as);
2615 g_free(s->memory_listener.slots);
2617 return ret;
2620 void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
2622 s->sigmask_len = sigmask_len;
2625 static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
2626 int size, uint32_t count)
2628 int i;
2629 uint8_t *ptr = data;
2631 for (i = 0; i < count; i++) {
2632 address_space_rw(&address_space_io, port, attrs,
2633 ptr, size,
2634 direction == KVM_EXIT_IO_OUT);
2635 ptr += size;
2639 static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
2641 int i;
2643 fprintf(stderr, "KVM internal error. Suberror: %d\n",
2644 run->internal.suberror);
2646 for (i = 0; i < run->internal.ndata; ++i) {
2647 fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n",
2648 i, (uint64_t)run->internal.data[i]);
2650 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
2651 fprintf(stderr, "emulation failure\n");
2652 if (!kvm_arch_stop_on_emulation_error(cpu)) {
2653 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2654 return EXCP_INTERRUPT;
2657 /* FIXME: Should trigger a qmp message to let management know
2658 * something went wrong.
2660 return -1;
2663 void kvm_flush_coalesced_mmio_buffer(void)
2665 KVMState *s = kvm_state;
2667 if (!s || s->coalesced_flush_in_progress) {
2668 return;
2671 s->coalesced_flush_in_progress = true;
2673 if (s->coalesced_mmio_ring) {
2674 struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
2675 while (ring->first != ring->last) {
2676 struct kvm_coalesced_mmio *ent;
2678 ent = &ring->coalesced_mmio[ring->first];
2680 if (ent->pio == 1) {
2681 address_space_write(&address_space_io, ent->phys_addr,
2682 MEMTXATTRS_UNSPECIFIED, ent->data,
2683 ent->len);
2684 } else {
2685 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
2687 smp_wmb();
2688 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
2692 s->coalesced_flush_in_progress = false;
2695 bool kvm_cpu_check_are_resettable(void)
2697 return kvm_arch_cpu_check_are_resettable();
2700 static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
2702 if (!cpu->vcpu_dirty) {
2703 int ret = kvm_arch_get_registers(cpu);
2704 if (ret) {
2705 error_report("Failed to get registers: %s", strerror(-ret));
2706 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2707 vm_stop(RUN_STATE_INTERNAL_ERROR);
2710 cpu->vcpu_dirty = true;
2714 void kvm_cpu_synchronize_state(CPUState *cpu)
2716 if (!cpu->vcpu_dirty) {
2717 run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
2721 static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
2723 int ret = kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
2724 if (ret) {
2725 error_report("Failed to put registers after reset: %s", strerror(-ret));
2726 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2727 vm_stop(RUN_STATE_INTERNAL_ERROR);
2730 cpu->vcpu_dirty = false;
2733 void kvm_cpu_synchronize_post_reset(CPUState *cpu)
2735 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
2738 static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
2740 int ret = kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
2741 if (ret) {
2742 error_report("Failed to put registers after init: %s", strerror(-ret));
2743 exit(1);
2746 cpu->vcpu_dirty = false;
2749 void kvm_cpu_synchronize_post_init(CPUState *cpu)
2751 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
2754 static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
2756 cpu->vcpu_dirty = true;
2759 void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
2761 run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
2764 #ifdef KVM_HAVE_MCE_INJECTION
2765 static __thread void *pending_sigbus_addr;
2766 static __thread int pending_sigbus_code;
2767 static __thread bool have_sigbus_pending;
2768 #endif
2770 static void kvm_cpu_kick(CPUState *cpu)
2772 qatomic_set(&cpu->kvm_run->immediate_exit, 1);
2775 static void kvm_cpu_kick_self(void)
2777 if (kvm_immediate_exit) {
2778 kvm_cpu_kick(current_cpu);
2779 } else {
2780 qemu_cpu_kick_self();
2784 static void kvm_eat_signals(CPUState *cpu)
2786 struct timespec ts = { 0, 0 };
2787 siginfo_t siginfo;
2788 sigset_t waitset;
2789 sigset_t chkset;
2790 int r;
2792 if (kvm_immediate_exit) {
2793 qatomic_set(&cpu->kvm_run->immediate_exit, 0);
2794 /* Write kvm_run->immediate_exit before the cpu->exit_request
2795 * write in kvm_cpu_exec.
2797 smp_wmb();
2798 return;
2801 sigemptyset(&waitset);
2802 sigaddset(&waitset, SIG_IPI);
2804 do {
2805 r = sigtimedwait(&waitset, &siginfo, &ts);
2806 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
2807 perror("sigtimedwait");
2808 exit(1);
2811 r = sigpending(&chkset);
2812 if (r == -1) {
2813 perror("sigpending");
2814 exit(1);
2816 } while (sigismember(&chkset, SIG_IPI));
2819 int kvm_cpu_exec(CPUState *cpu)
2821 struct kvm_run *run = cpu->kvm_run;
2822 int ret, run_ret;
2824 DPRINTF("kvm_cpu_exec()\n");
2826 if (kvm_arch_process_async_events(cpu)) {
2827 qatomic_set(&cpu->exit_request, 0);
2828 return EXCP_HLT;
2831 qemu_mutex_unlock_iothread();
2832 cpu_exec_start(cpu);
2834 do {
2835 MemTxAttrs attrs;
2837 if (cpu->vcpu_dirty) {
2838 ret = kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
2839 if (ret) {
2840 error_report("Failed to put registers after init: %s",
2841 strerror(-ret));
2842 ret = -1;
2843 break;
2846 cpu->vcpu_dirty = false;
2849 kvm_arch_pre_run(cpu, run);
2850 if (qatomic_read(&cpu->exit_request)) {
2851 DPRINTF("interrupt exit requested\n");
2853 * KVM requires us to reenter the kernel after IO exits to complete
2854 * instruction emulation. This self-signal will ensure that we
2855 * leave ASAP again.
2857 kvm_cpu_kick_self();
2860 /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
2861 * Matching barrier in kvm_eat_signals.
2863 smp_rmb();
2865 run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
2867 attrs = kvm_arch_post_run(cpu, run);
2869 #ifdef KVM_HAVE_MCE_INJECTION
2870 if (unlikely(have_sigbus_pending)) {
2871 qemu_mutex_lock_iothread();
2872 kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
2873 pending_sigbus_addr);
2874 have_sigbus_pending = false;
2875 qemu_mutex_unlock_iothread();
2877 #endif
2879 if (run_ret < 0) {
2880 if (run_ret == -EINTR || run_ret == -EAGAIN) {
2881 DPRINTF("io window exit\n");
2882 kvm_eat_signals(cpu);
2883 ret = EXCP_INTERRUPT;
2884 break;
2886 fprintf(stderr, "error: kvm run failed %s\n",
2887 strerror(-run_ret));
2888 #ifdef TARGET_PPC
2889 if (run_ret == -EBUSY) {
2890 fprintf(stderr,
2891 "This is probably because your SMT is enabled.\n"
2892 "VCPU can only run on primary threads with all "
2893 "secondary threads offline.\n");
2895 #endif
2896 ret = -1;
2897 break;
2900 trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
2901 switch (run->exit_reason) {
2902 case KVM_EXIT_IO:
2903 DPRINTF("handle_io\n");
2904 /* Called outside BQL */
2905 kvm_handle_io(run->io.port, attrs,
2906 (uint8_t *)run + run->io.data_offset,
2907 run->io.direction,
2908 run->io.size,
2909 run->io.count);
2910 ret = 0;
2911 break;
2912 case KVM_EXIT_MMIO:
2913 DPRINTF("handle_mmio\n");
2914 /* Called outside BQL */
2915 address_space_rw(&address_space_memory,
2916 run->mmio.phys_addr, attrs,
2917 run->mmio.data,
2918 run->mmio.len,
2919 run->mmio.is_write);
2920 ret = 0;
2921 break;
2922 case KVM_EXIT_IRQ_WINDOW_OPEN:
2923 DPRINTF("irq_window_open\n");
2924 ret = EXCP_INTERRUPT;
2925 break;
2926 case KVM_EXIT_SHUTDOWN:
2927 DPRINTF("shutdown\n");
2928 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
2929 ret = EXCP_INTERRUPT;
2930 break;
2931 case KVM_EXIT_UNKNOWN:
2932 fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
2933 (uint64_t)run->hw.hardware_exit_reason);
2934 ret = -1;
2935 break;
2936 case KVM_EXIT_INTERNAL_ERROR:
2937 ret = kvm_handle_internal_error(cpu, run);
2938 break;
2939 case KVM_EXIT_DIRTY_RING_FULL:
2941 * We shouldn't continue if the dirty ring of this vcpu is
2942 * still full. Got kicked by KVM_RESET_DIRTY_RINGS.
2944 trace_kvm_dirty_ring_full(cpu->cpu_index);
2945 qemu_mutex_lock_iothread();
2947 * We throttle vCPU by making it sleep once it exit from kernel
2948 * due to dirty ring full. In the dirtylimit scenario, reaping
2949 * all vCPUs after a single vCPU dirty ring get full result in
2950 * the miss of sleep, so just reap the ring-fulled vCPU.
2952 if (dirtylimit_in_service()) {
2953 kvm_dirty_ring_reap(kvm_state, cpu);
2954 } else {
2955 kvm_dirty_ring_reap(kvm_state, NULL);
2957 qemu_mutex_unlock_iothread();
2958 dirtylimit_vcpu_execute(cpu);
2959 ret = 0;
2960 break;
2961 case KVM_EXIT_SYSTEM_EVENT:
2962 switch (run->system_event.type) {
2963 case KVM_SYSTEM_EVENT_SHUTDOWN:
2964 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
2965 ret = EXCP_INTERRUPT;
2966 break;
2967 case KVM_SYSTEM_EVENT_RESET:
2968 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
2969 ret = EXCP_INTERRUPT;
2970 break;
2971 case KVM_SYSTEM_EVENT_CRASH:
2972 kvm_cpu_synchronize_state(cpu);
2973 qemu_mutex_lock_iothread();
2974 qemu_system_guest_panicked(cpu_get_crash_info(cpu));
2975 qemu_mutex_unlock_iothread();
2976 ret = 0;
2977 break;
2978 default:
2979 DPRINTF("kvm_arch_handle_exit\n");
2980 ret = kvm_arch_handle_exit(cpu, run);
2981 break;
2983 break;
2984 default:
2985 DPRINTF("kvm_arch_handle_exit\n");
2986 ret = kvm_arch_handle_exit(cpu, run);
2987 break;
2989 } while (ret == 0);
2991 cpu_exec_end(cpu);
2992 qemu_mutex_lock_iothread();
2994 if (ret < 0) {
2995 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2996 vm_stop(RUN_STATE_INTERNAL_ERROR);
2999 qatomic_set(&cpu->exit_request, 0);
3000 return ret;
3003 int kvm_ioctl(KVMState *s, int type, ...)
3005 int ret;
3006 void *arg;
3007 va_list ap;
3009 va_start(ap, type);
3010 arg = va_arg(ap, void *);
3011 va_end(ap);
3013 trace_kvm_ioctl(type, arg);
3014 ret = ioctl(s->fd, type, arg);
3015 if (ret == -1) {
3016 ret = -errno;
3018 return ret;
3021 int kvm_vm_ioctl(KVMState *s, int type, ...)
3023 int ret;
3024 void *arg;
3025 va_list ap;
3027 va_start(ap, type);
3028 arg = va_arg(ap, void *);
3029 va_end(ap);
3031 trace_kvm_vm_ioctl(type, arg);
3032 accel_ioctl_begin();
3033 ret = ioctl(s->vmfd, type, arg);
3034 accel_ioctl_end();
3035 if (ret == -1) {
3036 ret = -errno;
3038 return ret;
3041 int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
3043 int ret;
3044 void *arg;
3045 va_list ap;
3047 va_start(ap, type);
3048 arg = va_arg(ap, void *);
3049 va_end(ap);
3051 trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
3052 accel_cpu_ioctl_begin(cpu);
3053 ret = ioctl(cpu->kvm_fd, type, arg);
3054 accel_cpu_ioctl_end(cpu);
3055 if (ret == -1) {
3056 ret = -errno;
3058 return ret;
3061 int kvm_device_ioctl(int fd, int type, ...)
3063 int ret;
3064 void *arg;
3065 va_list ap;
3067 va_start(ap, type);
3068 arg = va_arg(ap, void *);
3069 va_end(ap);
3071 trace_kvm_device_ioctl(fd, type, arg);
3072 accel_ioctl_begin();
3073 ret = ioctl(fd, type, arg);
3074 accel_ioctl_end();
3075 if (ret == -1) {
3076 ret = -errno;
3078 return ret;
3081 int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr)
3083 int ret;
3084 struct kvm_device_attr attribute = {
3085 .group = group,
3086 .attr = attr,
3089 if (!kvm_vm_attributes_allowed) {
3090 return 0;
3093 ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute);
3094 /* kvm returns 0 on success for HAS_DEVICE_ATTR */
3095 return ret ? 0 : 1;
3098 int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
3100 struct kvm_device_attr attribute = {
3101 .group = group,
3102 .attr = attr,
3103 .flags = 0,
3106 return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1;
3109 int kvm_device_access(int fd, int group, uint64_t attr,
3110 void *val, bool write, Error **errp)
3112 struct kvm_device_attr kvmattr;
3113 int err;
3115 kvmattr.flags = 0;
3116 kvmattr.group = group;
3117 kvmattr.attr = attr;
3118 kvmattr.addr = (uintptr_t)val;
3120 err = kvm_device_ioctl(fd,
3121 write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
3122 &kvmattr);
3123 if (err < 0) {
3124 error_setg_errno(errp, -err,
3125 "KVM_%s_DEVICE_ATTR failed: Group %d "
3126 "attr 0x%016" PRIx64,
3127 write ? "SET" : "GET", group, attr);
3129 return err;
3132 bool kvm_has_sync_mmu(void)
3134 return kvm_state->sync_mmu;
3137 int kvm_has_vcpu_events(void)
3139 return kvm_state->vcpu_events;
3142 int kvm_max_nested_state_length(void)
3144 return kvm_state->max_nested_state_len;
3147 int kvm_has_gsi_routing(void)
3149 #ifdef KVM_CAP_IRQ_ROUTING
3150 return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
3151 #else
3152 return false;
3153 #endif
3156 bool kvm_arm_supports_user_irq(void)
3158 return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ);
3161 #ifdef KVM_CAP_SET_GUEST_DEBUG
3162 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, vaddr pc)
3164 struct kvm_sw_breakpoint *bp;
3166 QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
3167 if (bp->pc == pc) {
3168 return bp;
3171 return NULL;
3174 int kvm_sw_breakpoints_active(CPUState *cpu)
3176 return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
3179 struct kvm_set_guest_debug_data {
3180 struct kvm_guest_debug dbg;
3181 int err;
3184 static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
3186 struct kvm_set_guest_debug_data *dbg_data =
3187 (struct kvm_set_guest_debug_data *) data.host_ptr;
3189 dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
3190 &dbg_data->dbg);
3193 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
3195 struct kvm_set_guest_debug_data data;
3197 data.dbg.control = reinject_trap;
3199 if (cpu->singlestep_enabled) {
3200 data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
3202 if (cpu->singlestep_enabled & SSTEP_NOIRQ) {
3203 data.dbg.control |= KVM_GUESTDBG_BLOCKIRQ;
3206 kvm_arch_update_guest_debug(cpu, &data.dbg);
3208 run_on_cpu(cpu, kvm_invoke_set_guest_debug,
3209 RUN_ON_CPU_HOST_PTR(&data));
3210 return data.err;
3213 bool kvm_supports_guest_debug(void)
3215 /* probed during kvm_init() */
3216 return kvm_has_guest_debug;
3219 int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
3221 struct kvm_sw_breakpoint *bp;
3222 int err;
3224 if (type == GDB_BREAKPOINT_SW) {
3225 bp = kvm_find_sw_breakpoint(cpu, addr);
3226 if (bp) {
3227 bp->use_count++;
3228 return 0;
3231 bp = g_new(struct kvm_sw_breakpoint, 1);
3232 bp->pc = addr;
3233 bp->use_count = 1;
3234 err = kvm_arch_insert_sw_breakpoint(cpu, bp);
3235 if (err) {
3236 g_free(bp);
3237 return err;
3240 QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
3241 } else {
3242 err = kvm_arch_insert_hw_breakpoint(addr, len, type);
3243 if (err) {
3244 return err;
3248 CPU_FOREACH(cpu) {
3249 err = kvm_update_guest_debug(cpu, 0);
3250 if (err) {
3251 return err;
3254 return 0;
3257 int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
3259 struct kvm_sw_breakpoint *bp;
3260 int err;
3262 if (type == GDB_BREAKPOINT_SW) {
3263 bp = kvm_find_sw_breakpoint(cpu, addr);
3264 if (!bp) {
3265 return -ENOENT;
3268 if (bp->use_count > 1) {
3269 bp->use_count--;
3270 return 0;
3273 err = kvm_arch_remove_sw_breakpoint(cpu, bp);
3274 if (err) {
3275 return err;
3278 QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
3279 g_free(bp);
3280 } else {
3281 err = kvm_arch_remove_hw_breakpoint(addr, len, type);
3282 if (err) {
3283 return err;
3287 CPU_FOREACH(cpu) {
3288 err = kvm_update_guest_debug(cpu, 0);
3289 if (err) {
3290 return err;
3293 return 0;
3296 void kvm_remove_all_breakpoints(CPUState *cpu)
3298 struct kvm_sw_breakpoint *bp, *next;
3299 KVMState *s = cpu->kvm_state;
3300 CPUState *tmpcpu;
3302 QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
3303 if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
3304 /* Try harder to find a CPU that currently sees the breakpoint. */
3305 CPU_FOREACH(tmpcpu) {
3306 if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
3307 break;
3311 QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
3312 g_free(bp);
3314 kvm_arch_remove_all_hw_breakpoints();
3316 CPU_FOREACH(cpu) {
3317 kvm_update_guest_debug(cpu, 0);
3321 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
3323 static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
3325 KVMState *s = kvm_state;
3326 struct kvm_signal_mask *sigmask;
3327 int r;
3329 sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
3331 sigmask->len = s->sigmask_len;
3332 memcpy(sigmask->sigset, sigset, sizeof(*sigset));
3333 r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
3334 g_free(sigmask);
3336 return r;
3339 static void kvm_ipi_signal(int sig)
3341 if (current_cpu) {
3342 assert(kvm_immediate_exit);
3343 kvm_cpu_kick(current_cpu);
3347 void kvm_init_cpu_signals(CPUState *cpu)
3349 int r;
3350 sigset_t set;
3351 struct sigaction sigact;
3353 memset(&sigact, 0, sizeof(sigact));
3354 sigact.sa_handler = kvm_ipi_signal;
3355 sigaction(SIG_IPI, &sigact, NULL);
3357 pthread_sigmask(SIG_BLOCK, NULL, &set);
3358 #if defined KVM_HAVE_MCE_INJECTION
3359 sigdelset(&set, SIGBUS);
3360 pthread_sigmask(SIG_SETMASK, &set, NULL);
3361 #endif
3362 sigdelset(&set, SIG_IPI);
3363 if (kvm_immediate_exit) {
3364 r = pthread_sigmask(SIG_SETMASK, &set, NULL);
3365 } else {
3366 r = kvm_set_signal_mask(cpu, &set);
3368 if (r) {
3369 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
3370 exit(1);
3374 /* Called asynchronously in VCPU thread. */
3375 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
3377 #ifdef KVM_HAVE_MCE_INJECTION
3378 if (have_sigbus_pending) {
3379 return 1;
3381 have_sigbus_pending = true;
3382 pending_sigbus_addr = addr;
3383 pending_sigbus_code = code;
3384 qatomic_set(&cpu->exit_request, 1);
3385 return 0;
3386 #else
3387 return 1;
3388 #endif
3391 /* Called synchronously (via signalfd) in main thread. */
3392 int kvm_on_sigbus(int code, void *addr)
3394 #ifdef KVM_HAVE_MCE_INJECTION
3395 /* Action required MCE kills the process if SIGBUS is blocked. Because
3396 * that's what happens in the I/O thread, where we handle MCE via signalfd,
3397 * we can only get action optional here.
3399 assert(code != BUS_MCEERR_AR);
3400 kvm_arch_on_sigbus_vcpu(first_cpu, code, addr);
3401 return 0;
3402 #else
3403 return 1;
3404 #endif
3407 int kvm_create_device(KVMState *s, uint64_t type, bool test)
3409 int ret;
3410 struct kvm_create_device create_dev;
3412 create_dev.type = type;
3413 create_dev.fd = -1;
3414 create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
3416 if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
3417 return -ENOTSUP;
3420 ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
3421 if (ret) {
3422 return ret;
3425 return test ? 0 : create_dev.fd;
3428 bool kvm_device_supported(int vmfd, uint64_t type)
3430 struct kvm_create_device create_dev = {
3431 .type = type,
3432 .fd = -1,
3433 .flags = KVM_CREATE_DEVICE_TEST,
3436 if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) {
3437 return false;
3440 return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0);
3443 int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
3445 struct kvm_one_reg reg;
3446 int r;
3448 reg.id = id;
3449 reg.addr = (uintptr_t) source;
3450 r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
3451 if (r) {
3452 trace_kvm_failed_reg_set(id, strerror(-r));
3454 return r;
3457 int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
3459 struct kvm_one_reg reg;
3460 int r;
3462 reg.id = id;
3463 reg.addr = (uintptr_t) target;
3464 r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
3465 if (r) {
3466 trace_kvm_failed_reg_get(id, strerror(-r));
3468 return r;
3471 static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as,
3472 hwaddr start_addr, hwaddr size)
3474 KVMState *kvm = KVM_STATE(ms->accelerator);
3475 int i;
3477 for (i = 0; i < kvm->nr_as; ++i) {
3478 if (kvm->as[i].as == as && kvm->as[i].ml) {
3479 size = MIN(kvm_max_slot_size, size);
3480 return NULL != kvm_lookup_matching_slot(kvm->as[i].ml,
3481 start_addr, size);
3485 return false;
3488 static void kvm_get_kvm_shadow_mem(Object *obj, Visitor *v,
3489 const char *name, void *opaque,
3490 Error **errp)
3492 KVMState *s = KVM_STATE(obj);
3493 int64_t value = s->kvm_shadow_mem;
3495 visit_type_int(v, name, &value, errp);
3498 static void kvm_set_kvm_shadow_mem(Object *obj, Visitor *v,
3499 const char *name, void *opaque,
3500 Error **errp)
3502 KVMState *s = KVM_STATE(obj);
3503 int64_t value;
3505 if (s->fd != -1) {
3506 error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3507 return;
3510 if (!visit_type_int(v, name, &value, errp)) {
3511 return;
3514 s->kvm_shadow_mem = value;
3517 static void kvm_set_kernel_irqchip(Object *obj, Visitor *v,
3518 const char *name, void *opaque,
3519 Error **errp)
3521 KVMState *s = KVM_STATE(obj);
3522 OnOffSplit mode;
3524 if (s->fd != -1) {
3525 error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3526 return;
3529 if (!visit_type_OnOffSplit(v, name, &mode, errp)) {
3530 return;
3532 switch (mode) {
3533 case ON_OFF_SPLIT_ON:
3534 s->kernel_irqchip_allowed = true;
3535 s->kernel_irqchip_required = true;
3536 s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3537 break;
3538 case ON_OFF_SPLIT_OFF:
3539 s->kernel_irqchip_allowed = false;
3540 s->kernel_irqchip_required = false;
3541 s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3542 break;
3543 case ON_OFF_SPLIT_SPLIT:
3544 s->kernel_irqchip_allowed = true;
3545 s->kernel_irqchip_required = true;
3546 s->kernel_irqchip_split = ON_OFF_AUTO_ON;
3547 break;
3548 default:
3549 /* The value was checked in visit_type_OnOffSplit() above. If
3550 * we get here, then something is wrong in QEMU.
3552 abort();
3556 bool kvm_kernel_irqchip_allowed(void)
3558 return kvm_state->kernel_irqchip_allowed;
3561 bool kvm_kernel_irqchip_required(void)
3563 return kvm_state->kernel_irqchip_required;
3566 bool kvm_kernel_irqchip_split(void)
3568 return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON;
3571 static void kvm_get_dirty_ring_size(Object *obj, Visitor *v,
3572 const char *name, void *opaque,
3573 Error **errp)
3575 KVMState *s = KVM_STATE(obj);
3576 uint32_t value = s->kvm_dirty_ring_size;
3578 visit_type_uint32(v, name, &value, errp);
3581 static void kvm_set_dirty_ring_size(Object *obj, Visitor *v,
3582 const char *name, void *opaque,
3583 Error **errp)
3585 KVMState *s = KVM_STATE(obj);
3586 uint32_t value;
3588 if (s->fd != -1) {
3589 error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3590 return;
3593 if (!visit_type_uint32(v, name, &value, errp)) {
3594 return;
3596 if (value & (value - 1)) {
3597 error_setg(errp, "dirty-ring-size must be a power of two.");
3598 return;
3601 s->kvm_dirty_ring_size = value;
3604 static void kvm_accel_instance_init(Object *obj)
3606 KVMState *s = KVM_STATE(obj);
3608 s->fd = -1;
3609 s->vmfd = -1;
3610 s->kvm_shadow_mem = -1;
3611 s->kernel_irqchip_allowed = true;
3612 s->kernel_irqchip_split = ON_OFF_AUTO_AUTO;
3613 /* KVM dirty ring is by default off */
3614 s->kvm_dirty_ring_size = 0;
3615 s->kvm_dirty_ring_with_bitmap = false;
3616 s->kvm_eager_split_size = 0;
3617 s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN;
3618 s->notify_window = 0;
3619 s->xen_version = 0;
3620 s->xen_gnttab_max_frames = 64;
3621 s->xen_evtchn_max_pirq = 256;
3625 * kvm_gdbstub_sstep_flags():
3627 * Returns: SSTEP_* flags that KVM supports for guest debug. The
3628 * support is probed during kvm_init()
3630 static int kvm_gdbstub_sstep_flags(void)
3632 return kvm_sstep_flags;
3635 static void kvm_accel_class_init(ObjectClass *oc, void *data)
3637 AccelClass *ac = ACCEL_CLASS(oc);
3638 ac->name = "KVM";
3639 ac->init_machine = kvm_init;
3640 ac->has_memory = kvm_accel_has_memory;
3641 ac->allowed = &kvm_allowed;
3642 ac->gdbstub_supported_sstep_flags = kvm_gdbstub_sstep_flags;
3644 object_class_property_add(oc, "kernel-irqchip", "on|off|split",
3645 NULL, kvm_set_kernel_irqchip,
3646 NULL, NULL);
3647 object_class_property_set_description(oc, "kernel-irqchip",
3648 "Configure KVM in-kernel irqchip");
3650 object_class_property_add(oc, "kvm-shadow-mem", "int",
3651 kvm_get_kvm_shadow_mem, kvm_set_kvm_shadow_mem,
3652 NULL, NULL);
3653 object_class_property_set_description(oc, "kvm-shadow-mem",
3654 "KVM shadow MMU size");
3656 object_class_property_add(oc, "dirty-ring-size", "uint32",
3657 kvm_get_dirty_ring_size, kvm_set_dirty_ring_size,
3658 NULL, NULL);
3659 object_class_property_set_description(oc, "dirty-ring-size",
3660 "Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)");
3662 kvm_arch_accel_class_init(oc);
3665 static const TypeInfo kvm_accel_type = {
3666 .name = TYPE_KVM_ACCEL,
3667 .parent = TYPE_ACCEL,
3668 .instance_init = kvm_accel_instance_init,
3669 .class_init = kvm_accel_class_init,
3670 .instance_size = sizeof(KVMState),
3673 static void kvm_type_init(void)
3675 type_register_static(&kvm_accel_type);
3678 type_init(kvm_type_init);
3680 typedef struct StatsArgs {
3681 union StatsResultsType {
3682 StatsResultList **stats;
3683 StatsSchemaList **schema;
3684 } result;
3685 strList *names;
3686 Error **errp;
3687 } StatsArgs;
3689 static StatsList *add_kvmstat_entry(struct kvm_stats_desc *pdesc,
3690 uint64_t *stats_data,
3691 StatsList *stats_list,
3692 Error **errp)
3695 Stats *stats;
3696 uint64List *val_list = NULL;
3698 /* Only add stats that we understand. */
3699 switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
3700 case KVM_STATS_TYPE_CUMULATIVE:
3701 case KVM_STATS_TYPE_INSTANT:
3702 case KVM_STATS_TYPE_PEAK:
3703 case KVM_STATS_TYPE_LINEAR_HIST:
3704 case KVM_STATS_TYPE_LOG_HIST:
3705 break;
3706 default:
3707 return stats_list;
3710 switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
3711 case KVM_STATS_UNIT_NONE:
3712 case KVM_STATS_UNIT_BYTES:
3713 case KVM_STATS_UNIT_CYCLES:
3714 case KVM_STATS_UNIT_SECONDS:
3715 case KVM_STATS_UNIT_BOOLEAN:
3716 break;
3717 default:
3718 return stats_list;
3721 switch (pdesc->flags & KVM_STATS_BASE_MASK) {
3722 case KVM_STATS_BASE_POW10:
3723 case KVM_STATS_BASE_POW2:
3724 break;
3725 default:
3726 return stats_list;
3729 /* Alloc and populate data list */
3730 stats = g_new0(Stats, 1);
3731 stats->name = g_strdup(pdesc->name);
3732 stats->value = g_new0(StatsValue, 1);;
3734 if ((pdesc->flags & KVM_STATS_UNIT_MASK) == KVM_STATS_UNIT_BOOLEAN) {
3735 stats->value->u.boolean = *stats_data;
3736 stats->value->type = QTYPE_QBOOL;
3737 } else if (pdesc->size == 1) {
3738 stats->value->u.scalar = *stats_data;
3739 stats->value->type = QTYPE_QNUM;
3740 } else {
3741 int i;
3742 for (i = 0; i < pdesc->size; i++) {
3743 QAPI_LIST_PREPEND(val_list, stats_data[i]);
3745 stats->value->u.list = val_list;
3746 stats->value->type = QTYPE_QLIST;
3749 QAPI_LIST_PREPEND(stats_list, stats);
3750 return stats_list;
3753 static StatsSchemaValueList *add_kvmschema_entry(struct kvm_stats_desc *pdesc,
3754 StatsSchemaValueList *list,
3755 Error **errp)
3757 StatsSchemaValueList *schema_entry = g_new0(StatsSchemaValueList, 1);
3758 schema_entry->value = g_new0(StatsSchemaValue, 1);
3760 switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
3761 case KVM_STATS_TYPE_CUMULATIVE:
3762 schema_entry->value->type = STATS_TYPE_CUMULATIVE;
3763 break;
3764 case KVM_STATS_TYPE_INSTANT:
3765 schema_entry->value->type = STATS_TYPE_INSTANT;
3766 break;
3767 case KVM_STATS_TYPE_PEAK:
3768 schema_entry->value->type = STATS_TYPE_PEAK;
3769 break;
3770 case KVM_STATS_TYPE_LINEAR_HIST:
3771 schema_entry->value->type = STATS_TYPE_LINEAR_HISTOGRAM;
3772 schema_entry->value->bucket_size = pdesc->bucket_size;
3773 schema_entry->value->has_bucket_size = true;
3774 break;
3775 case KVM_STATS_TYPE_LOG_HIST:
3776 schema_entry->value->type = STATS_TYPE_LOG2_HISTOGRAM;
3777 break;
3778 default:
3779 goto exit;
3782 switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
3783 case KVM_STATS_UNIT_NONE:
3784 break;
3785 case KVM_STATS_UNIT_BOOLEAN:
3786 schema_entry->value->has_unit = true;
3787 schema_entry->value->unit = STATS_UNIT_BOOLEAN;
3788 break;
3789 case KVM_STATS_UNIT_BYTES:
3790 schema_entry->value->has_unit = true;
3791 schema_entry->value->unit = STATS_UNIT_BYTES;
3792 break;
3793 case KVM_STATS_UNIT_CYCLES:
3794 schema_entry->value->has_unit = true;
3795 schema_entry->value->unit = STATS_UNIT_CYCLES;
3796 break;
3797 case KVM_STATS_UNIT_SECONDS:
3798 schema_entry->value->has_unit = true;
3799 schema_entry->value->unit = STATS_UNIT_SECONDS;
3800 break;
3801 default:
3802 goto exit;
3805 schema_entry->value->exponent = pdesc->exponent;
3806 if (pdesc->exponent) {
3807 switch (pdesc->flags & KVM_STATS_BASE_MASK) {
3808 case KVM_STATS_BASE_POW10:
3809 schema_entry->value->has_base = true;
3810 schema_entry->value->base = 10;
3811 break;
3812 case KVM_STATS_BASE_POW2:
3813 schema_entry->value->has_base = true;
3814 schema_entry->value->base = 2;
3815 break;
3816 default:
3817 goto exit;
3821 schema_entry->value->name = g_strdup(pdesc->name);
3822 schema_entry->next = list;
3823 return schema_entry;
3824 exit:
3825 g_free(schema_entry->value);
3826 g_free(schema_entry);
3827 return list;
3830 /* Cached stats descriptors */
3831 typedef struct StatsDescriptors {
3832 const char *ident; /* cache key, currently the StatsTarget */
3833 struct kvm_stats_desc *kvm_stats_desc;
3834 struct kvm_stats_header kvm_stats_header;
3835 QTAILQ_ENTRY(StatsDescriptors) next;
3836 } StatsDescriptors;
3838 static QTAILQ_HEAD(, StatsDescriptors) stats_descriptors =
3839 QTAILQ_HEAD_INITIALIZER(stats_descriptors);
3842 * Return the descriptors for 'target', that either have already been read
3843 * or are retrieved from 'stats_fd'.
3845 static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd,
3846 Error **errp)
3848 StatsDescriptors *descriptors;
3849 const char *ident;
3850 struct kvm_stats_desc *kvm_stats_desc;
3851 struct kvm_stats_header *kvm_stats_header;
3852 size_t size_desc;
3853 ssize_t ret;
3855 ident = StatsTarget_str(target);
3856 QTAILQ_FOREACH(descriptors, &stats_descriptors, next) {
3857 if (g_str_equal(descriptors->ident, ident)) {
3858 return descriptors;
3862 descriptors = g_new0(StatsDescriptors, 1);
3864 /* Read stats header */
3865 kvm_stats_header = &descriptors->kvm_stats_header;
3866 ret = pread(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header), 0);
3867 if (ret != sizeof(*kvm_stats_header)) {
3868 error_setg(errp, "KVM stats: failed to read stats header: "
3869 "expected %zu actual %zu",
3870 sizeof(*kvm_stats_header), ret);
3871 g_free(descriptors);
3872 return NULL;
3874 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
3876 /* Read stats descriptors */
3877 kvm_stats_desc = g_malloc0_n(kvm_stats_header->num_desc, size_desc);
3878 ret = pread(stats_fd, kvm_stats_desc,
3879 size_desc * kvm_stats_header->num_desc,
3880 kvm_stats_header->desc_offset);
3882 if (ret != size_desc * kvm_stats_header->num_desc) {
3883 error_setg(errp, "KVM stats: failed to read stats descriptors: "
3884 "expected %zu actual %zu",
3885 size_desc * kvm_stats_header->num_desc, ret);
3886 g_free(descriptors);
3887 g_free(kvm_stats_desc);
3888 return NULL;
3890 descriptors->kvm_stats_desc = kvm_stats_desc;
3891 descriptors->ident = ident;
3892 QTAILQ_INSERT_TAIL(&stats_descriptors, descriptors, next);
3893 return descriptors;
3896 static void query_stats(StatsResultList **result, StatsTarget target,
3897 strList *names, int stats_fd, CPUState *cpu,
3898 Error **errp)
3900 struct kvm_stats_desc *kvm_stats_desc;
3901 struct kvm_stats_header *kvm_stats_header;
3902 StatsDescriptors *descriptors;
3903 g_autofree uint64_t *stats_data = NULL;
3904 struct kvm_stats_desc *pdesc;
3905 StatsList *stats_list = NULL;
3906 size_t size_desc, size_data = 0;
3907 ssize_t ret;
3908 int i;
3910 descriptors = find_stats_descriptors(target, stats_fd, errp);
3911 if (!descriptors) {
3912 return;
3915 kvm_stats_header = &descriptors->kvm_stats_header;
3916 kvm_stats_desc = descriptors->kvm_stats_desc;
3917 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
3919 /* Tally the total data size; read schema data */
3920 for (i = 0; i < kvm_stats_header->num_desc; ++i) {
3921 pdesc = (void *)kvm_stats_desc + i * size_desc;
3922 size_data += pdesc->size * sizeof(*stats_data);
3925 stats_data = g_malloc0(size_data);
3926 ret = pread(stats_fd, stats_data, size_data, kvm_stats_header->data_offset);
3928 if (ret != size_data) {
3929 error_setg(errp, "KVM stats: failed to read data: "
3930 "expected %zu actual %zu", size_data, ret);
3931 return;
3934 for (i = 0; i < kvm_stats_header->num_desc; ++i) {
3935 uint64_t *stats;
3936 pdesc = (void *)kvm_stats_desc + i * size_desc;
3938 /* Add entry to the list */
3939 stats = (void *)stats_data + pdesc->offset;
3940 if (!apply_str_list_filter(pdesc->name, names)) {
3941 continue;
3943 stats_list = add_kvmstat_entry(pdesc, stats, stats_list, errp);
3946 if (!stats_list) {
3947 return;
3950 switch (target) {
3951 case STATS_TARGET_VM:
3952 add_stats_entry(result, STATS_PROVIDER_KVM, NULL, stats_list);
3953 break;
3954 case STATS_TARGET_VCPU:
3955 add_stats_entry(result, STATS_PROVIDER_KVM,
3956 cpu->parent_obj.canonical_path,
3957 stats_list);
3958 break;
3959 default:
3960 g_assert_not_reached();
3964 static void query_stats_schema(StatsSchemaList **result, StatsTarget target,
3965 int stats_fd, Error **errp)
3967 struct kvm_stats_desc *kvm_stats_desc;
3968 struct kvm_stats_header *kvm_stats_header;
3969 StatsDescriptors *descriptors;
3970 struct kvm_stats_desc *pdesc;
3971 StatsSchemaValueList *stats_list = NULL;
3972 size_t size_desc;
3973 int i;
3975 descriptors = find_stats_descriptors(target, stats_fd, errp);
3976 if (!descriptors) {
3977 return;
3980 kvm_stats_header = &descriptors->kvm_stats_header;
3981 kvm_stats_desc = descriptors->kvm_stats_desc;
3982 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
3984 /* Tally the total data size; read schema data */
3985 for (i = 0; i < kvm_stats_header->num_desc; ++i) {
3986 pdesc = (void *)kvm_stats_desc + i * size_desc;
3987 stats_list = add_kvmschema_entry(pdesc, stats_list, errp);
3990 add_stats_schema(result, STATS_PROVIDER_KVM, target, stats_list);
3993 static void query_stats_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
3995 int stats_fd = cpu->kvm_vcpu_stats_fd;
3996 Error *local_err = NULL;
3998 if (stats_fd == -1) {
3999 error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
4000 error_propagate(kvm_stats_args->errp, local_err);
4001 return;
4003 query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU,
4004 kvm_stats_args->names, stats_fd, cpu,
4005 kvm_stats_args->errp);
4008 static void query_stats_schema_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
4010 int stats_fd = cpu->kvm_vcpu_stats_fd;
4011 Error *local_err = NULL;
4013 if (stats_fd == -1) {
4014 error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
4015 error_propagate(kvm_stats_args->errp, local_err);
4016 return;
4018 query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd,
4019 kvm_stats_args->errp);
4022 static void query_stats_cb(StatsResultList **result, StatsTarget target,
4023 strList *names, strList *targets, Error **errp)
4025 KVMState *s = kvm_state;
4026 CPUState *cpu;
4027 int stats_fd;
4029 switch (target) {
4030 case STATS_TARGET_VM:
4032 stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
4033 if (stats_fd == -1) {
4034 error_setg_errno(errp, errno, "KVM stats: ioctl failed");
4035 return;
4037 query_stats(result, target, names, stats_fd, NULL, errp);
4038 close(stats_fd);
4039 break;
4041 case STATS_TARGET_VCPU:
4043 StatsArgs stats_args;
4044 stats_args.result.stats = result;
4045 stats_args.names = names;
4046 stats_args.errp = errp;
4047 CPU_FOREACH(cpu) {
4048 if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) {
4049 continue;
4051 query_stats_vcpu(cpu, &stats_args);
4053 break;
4055 default:
4056 break;
4060 void query_stats_schemas_cb(StatsSchemaList **result, Error **errp)
4062 StatsArgs stats_args;
4063 KVMState *s = kvm_state;
4064 int stats_fd;
4066 stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
4067 if (stats_fd == -1) {
4068 error_setg_errno(errp, errno, "KVM stats: ioctl failed");
4069 return;
4071 query_stats_schema(result, STATS_TARGET_VM, stats_fd, errp);
4072 close(stats_fd);
4074 if (first_cpu) {
4075 stats_args.result.schema = result;
4076 stats_args.errp = errp;
4077 query_stats_schema_vcpu(first_cpu, &stats_args);