2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * Copyright (C) 2006 Qumranet, Inc.
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
20 #include <linux/kvm_host.h>
21 #include <linux/kvm.h>
22 #include <linux/module.h>
23 #include <linux/errno.h>
24 #include <linux/percpu.h>
25 #include <linux/gfp.h>
27 #include <linux/miscdevice.h>
28 #include <linux/vmalloc.h>
29 #include <linux/reboot.h>
30 #include <linux/debugfs.h>
31 #include <linux/highmem.h>
32 #include <linux/file.h>
33 #include <linux/sysdev.h>
34 #include <linux/cpu.h>
35 #include <linux/sched.h>
36 #include <linux/cpumask.h>
37 #include <linux/smp.h>
38 #include <linux/anon_inodes.h>
39 #include <linux/profile.h>
40 #include <linux/kvm_para.h>
41 #include <linux/pagemap.h>
42 #include <linux/mman.h>
43 #include <linux/swap.h>
44 #include <linux/bitops.h>
45 #include <linux/spinlock.h>
47 #include <asm/processor.h>
49 #include <asm/uaccess.h>
50 #include <asm/pgtable.h>
51 #include <asm-generic/bitops/le.h>
53 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
54 #include "coalesced_mmio.h"
57 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
58 #include <linux/pci.h>
59 #include <linux/interrupt.h>
63 #define CREATE_TRACE_POINTS
64 #include <trace/events/kvm.h>
66 MODULE_AUTHOR("Qumranet");
67 MODULE_LICENSE("GPL");
72 * kvm->slots_lock --> kvm->lock --> kvm->irq_lock
75 DEFINE_SPINLOCK(kvm_lock
);
78 static cpumask_var_t cpus_hardware_enabled
;
80 struct kmem_cache
*kvm_vcpu_cache
;
81 EXPORT_SYMBOL_GPL(kvm_vcpu_cache
);
83 static __read_mostly
struct preempt_ops kvm_preempt_ops
;
85 struct dentry
*kvm_debugfs_dir
;
87 static long kvm_vcpu_ioctl(struct file
*file
, unsigned int ioctl
,
90 static bool kvm_rebooting
;
92 static bool largepages_enabled
= true;
94 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
95 static struct kvm_assigned_dev_kernel
*kvm_find_assigned_dev(struct list_head
*head
,
98 struct list_head
*ptr
;
99 struct kvm_assigned_dev_kernel
*match
;
101 list_for_each(ptr
, head
) {
102 match
= list_entry(ptr
, struct kvm_assigned_dev_kernel
, list
);
103 if (match
->assigned_dev_id
== assigned_dev_id
)
109 static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
110 *assigned_dev
, int irq
)
113 struct msix_entry
*host_msix_entries
;
115 host_msix_entries
= assigned_dev
->host_msix_entries
;
118 for (i
= 0; i
< assigned_dev
->entries_nr
; i
++)
119 if (irq
== host_msix_entries
[i
].vector
) {
124 printk(KERN_WARNING
"Fail to find correlated MSI-X entry!\n");
131 static void kvm_assigned_dev_interrupt_work_handler(struct work_struct
*work
)
133 struct kvm_assigned_dev_kernel
*assigned_dev
;
137 assigned_dev
= container_of(work
, struct kvm_assigned_dev_kernel
,
139 kvm
= assigned_dev
->kvm
;
141 mutex_lock(&kvm
->irq_lock
);
142 spin_lock_irq(&assigned_dev
->assigned_dev_lock
);
143 if (assigned_dev
->irq_requested_type
& KVM_DEV_IRQ_HOST_MSIX
) {
144 struct kvm_guest_msix_entry
*guest_entries
=
145 assigned_dev
->guest_msix_entries
;
146 for (i
= 0; i
< assigned_dev
->entries_nr
; i
++) {
147 if (!(guest_entries
[i
].flags
&
148 KVM_ASSIGNED_MSIX_PENDING
))
150 guest_entries
[i
].flags
&= ~KVM_ASSIGNED_MSIX_PENDING
;
151 kvm_set_irq(assigned_dev
->kvm
,
152 assigned_dev
->irq_source_id
,
153 guest_entries
[i
].vector
, 1);
156 kvm_set_irq(assigned_dev
->kvm
, assigned_dev
->irq_source_id
,
157 assigned_dev
->guest_irq
, 1);
159 spin_unlock_irq(&assigned_dev
->assigned_dev_lock
);
160 mutex_unlock(&assigned_dev
->kvm
->irq_lock
);
163 static irqreturn_t
kvm_assigned_dev_intr(int irq
, void *dev_id
)
166 struct kvm_assigned_dev_kernel
*assigned_dev
=
167 (struct kvm_assigned_dev_kernel
*) dev_id
;
169 spin_lock_irqsave(&assigned_dev
->assigned_dev_lock
, flags
);
170 if (assigned_dev
->irq_requested_type
& KVM_DEV_IRQ_HOST_MSIX
) {
171 int index
= find_index_from_host_irq(assigned_dev
, irq
);
174 assigned_dev
->guest_msix_entries
[index
].flags
|=
175 KVM_ASSIGNED_MSIX_PENDING
;
178 schedule_work(&assigned_dev
->interrupt_work
);
180 if (assigned_dev
->irq_requested_type
& KVM_DEV_IRQ_GUEST_INTX
) {
181 disable_irq_nosync(irq
);
182 assigned_dev
->host_irq_disabled
= true;
186 spin_unlock_irqrestore(&assigned_dev
->assigned_dev_lock
, flags
);
190 /* Ack the irq line for an assigned device */
191 static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier
*kian
)
193 struct kvm_assigned_dev_kernel
*dev
;
199 dev
= container_of(kian
, struct kvm_assigned_dev_kernel
,
202 kvm_set_irq(dev
->kvm
, dev
->irq_source_id
, dev
->guest_irq
, 0);
204 /* The guest irq may be shared so this ack may be
205 * from another device.
207 spin_lock_irqsave(&dev
->assigned_dev_lock
, flags
);
208 if (dev
->host_irq_disabled
) {
209 enable_irq(dev
->host_irq
);
210 dev
->host_irq_disabled
= false;
212 spin_unlock_irqrestore(&dev
->assigned_dev_lock
, flags
);
215 static void deassign_guest_irq(struct kvm
*kvm
,
216 struct kvm_assigned_dev_kernel
*assigned_dev
)
218 kvm_unregister_irq_ack_notifier(kvm
, &assigned_dev
->ack_notifier
);
219 assigned_dev
->ack_notifier
.gsi
= -1;
221 if (assigned_dev
->irq_source_id
!= -1)
222 kvm_free_irq_source_id(kvm
, assigned_dev
->irq_source_id
);
223 assigned_dev
->irq_source_id
= -1;
224 assigned_dev
->irq_requested_type
&= ~(KVM_DEV_IRQ_GUEST_MASK
);
227 /* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
228 static void deassign_host_irq(struct kvm
*kvm
,
229 struct kvm_assigned_dev_kernel
*assigned_dev
)
232 * In kvm_free_device_irq, cancel_work_sync return true if:
233 * 1. work is scheduled, and then cancelled.
234 * 2. work callback is executed.
236 * The first one ensured that the irq is disabled and no more events
237 * would happen. But for the second one, the irq may be enabled (e.g.
238 * for MSI). So we disable irq here to prevent further events.
240 * Notice this maybe result in nested disable if the interrupt type is
241 * INTx, but it's OK for we are going to free it.
243 * If this function is a part of VM destroy, please ensure that till
244 * now, the kvm state is still legal for probably we also have to wait
245 * interrupt_work done.
247 if (assigned_dev
->irq_requested_type
& KVM_DEV_IRQ_HOST_MSIX
) {
249 for (i
= 0; i
< assigned_dev
->entries_nr
; i
++)
250 disable_irq_nosync(assigned_dev
->
251 host_msix_entries
[i
].vector
);
253 cancel_work_sync(&assigned_dev
->interrupt_work
);
255 for (i
= 0; i
< assigned_dev
->entries_nr
; i
++)
256 free_irq(assigned_dev
->host_msix_entries
[i
].vector
,
257 (void *)assigned_dev
);
259 assigned_dev
->entries_nr
= 0;
260 kfree(assigned_dev
->host_msix_entries
);
261 kfree(assigned_dev
->guest_msix_entries
);
262 pci_disable_msix(assigned_dev
->dev
);
264 /* Deal with MSI and INTx */
265 disable_irq_nosync(assigned_dev
->host_irq
);
266 cancel_work_sync(&assigned_dev
->interrupt_work
);
268 free_irq(assigned_dev
->host_irq
, (void *)assigned_dev
);
270 if (assigned_dev
->irq_requested_type
& KVM_DEV_IRQ_HOST_MSI
)
271 pci_disable_msi(assigned_dev
->dev
);
274 assigned_dev
->irq_requested_type
&= ~(KVM_DEV_IRQ_HOST_MASK
);
277 static int kvm_deassign_irq(struct kvm
*kvm
,
278 struct kvm_assigned_dev_kernel
*assigned_dev
,
279 unsigned long irq_requested_type
)
281 unsigned long guest_irq_type
, host_irq_type
;
283 if (!irqchip_in_kernel(kvm
))
285 /* no irq assignment to deassign */
286 if (!assigned_dev
->irq_requested_type
)
289 host_irq_type
= irq_requested_type
& KVM_DEV_IRQ_HOST_MASK
;
290 guest_irq_type
= irq_requested_type
& KVM_DEV_IRQ_GUEST_MASK
;
293 deassign_host_irq(kvm
, assigned_dev
);
295 deassign_guest_irq(kvm
, assigned_dev
);
300 static void kvm_free_assigned_irq(struct kvm
*kvm
,
301 struct kvm_assigned_dev_kernel
*assigned_dev
)
303 kvm_deassign_irq(kvm
, assigned_dev
, assigned_dev
->irq_requested_type
);
306 static void kvm_free_assigned_device(struct kvm
*kvm
,
307 struct kvm_assigned_dev_kernel
310 kvm_free_assigned_irq(kvm
, assigned_dev
);
312 pci_reset_function(assigned_dev
->dev
);
314 pci_release_regions(assigned_dev
->dev
);
315 pci_disable_device(assigned_dev
->dev
);
316 pci_dev_put(assigned_dev
->dev
);
318 list_del(&assigned_dev
->list
);
322 void kvm_free_all_assigned_devices(struct kvm
*kvm
)
324 struct list_head
*ptr
, *ptr2
;
325 struct kvm_assigned_dev_kernel
*assigned_dev
;
327 list_for_each_safe(ptr
, ptr2
, &kvm
->arch
.assigned_dev_head
) {
328 assigned_dev
= list_entry(ptr
,
329 struct kvm_assigned_dev_kernel
,
332 kvm_free_assigned_device(kvm
, assigned_dev
);
336 static int assigned_device_enable_host_intx(struct kvm
*kvm
,
337 struct kvm_assigned_dev_kernel
*dev
)
339 dev
->host_irq
= dev
->dev
->irq
;
340 /* Even though this is PCI, we don't want to use shared
341 * interrupts. Sharing host devices with guest-assigned devices
342 * on the same interrupt line is not a happy situation: there
343 * are going to be long delays in accepting, acking, etc.
345 if (request_irq(dev
->host_irq
, kvm_assigned_dev_intr
,
346 0, "kvm_assigned_intx_device", (void *)dev
))
351 #ifdef __KVM_HAVE_MSI
352 static int assigned_device_enable_host_msi(struct kvm
*kvm
,
353 struct kvm_assigned_dev_kernel
*dev
)
357 if (!dev
->dev
->msi_enabled
) {
358 r
= pci_enable_msi(dev
->dev
);
363 dev
->host_irq
= dev
->dev
->irq
;
364 if (request_irq(dev
->host_irq
, kvm_assigned_dev_intr
, 0,
365 "kvm_assigned_msi_device", (void *)dev
)) {
366 pci_disable_msi(dev
->dev
);
374 #ifdef __KVM_HAVE_MSIX
375 static int assigned_device_enable_host_msix(struct kvm
*kvm
,
376 struct kvm_assigned_dev_kernel
*dev
)
380 /* host_msix_entries and guest_msix_entries should have been
382 if (dev
->entries_nr
== 0)
385 r
= pci_enable_msix(dev
->dev
, dev
->host_msix_entries
, dev
->entries_nr
);
389 for (i
= 0; i
< dev
->entries_nr
; i
++) {
390 r
= request_irq(dev
->host_msix_entries
[i
].vector
,
391 kvm_assigned_dev_intr
, 0,
392 "kvm_assigned_msix_device",
394 /* FIXME: free requested_irq's on failure */
404 static int assigned_device_enable_guest_intx(struct kvm
*kvm
,
405 struct kvm_assigned_dev_kernel
*dev
,
406 struct kvm_assigned_irq
*irq
)
408 dev
->guest_irq
= irq
->guest_irq
;
409 dev
->ack_notifier
.gsi
= irq
->guest_irq
;
413 #ifdef __KVM_HAVE_MSI
414 static int assigned_device_enable_guest_msi(struct kvm
*kvm
,
415 struct kvm_assigned_dev_kernel
*dev
,
416 struct kvm_assigned_irq
*irq
)
418 dev
->guest_irq
= irq
->guest_irq
;
419 dev
->ack_notifier
.gsi
= -1;
420 dev
->host_irq_disabled
= false;
424 #ifdef __KVM_HAVE_MSIX
425 static int assigned_device_enable_guest_msix(struct kvm
*kvm
,
426 struct kvm_assigned_dev_kernel
*dev
,
427 struct kvm_assigned_irq
*irq
)
429 dev
->guest_irq
= irq
->guest_irq
;
430 dev
->ack_notifier
.gsi
= -1;
431 dev
->host_irq_disabled
= false;
436 static int assign_host_irq(struct kvm
*kvm
,
437 struct kvm_assigned_dev_kernel
*dev
,
442 if (dev
->irq_requested_type
& KVM_DEV_IRQ_HOST_MASK
)
445 switch (host_irq_type
) {
446 case KVM_DEV_IRQ_HOST_INTX
:
447 r
= assigned_device_enable_host_intx(kvm
, dev
);
449 #ifdef __KVM_HAVE_MSI
450 case KVM_DEV_IRQ_HOST_MSI
:
451 r
= assigned_device_enable_host_msi(kvm
, dev
);
454 #ifdef __KVM_HAVE_MSIX
455 case KVM_DEV_IRQ_HOST_MSIX
:
456 r
= assigned_device_enable_host_msix(kvm
, dev
);
464 dev
->irq_requested_type
|= host_irq_type
;
469 static int assign_guest_irq(struct kvm
*kvm
,
470 struct kvm_assigned_dev_kernel
*dev
,
471 struct kvm_assigned_irq
*irq
,
472 unsigned long guest_irq_type
)
477 if (dev
->irq_requested_type
& KVM_DEV_IRQ_GUEST_MASK
)
480 id
= kvm_request_irq_source_id(kvm
);
484 dev
->irq_source_id
= id
;
486 switch (guest_irq_type
) {
487 case KVM_DEV_IRQ_GUEST_INTX
:
488 r
= assigned_device_enable_guest_intx(kvm
, dev
, irq
);
490 #ifdef __KVM_HAVE_MSI
491 case KVM_DEV_IRQ_GUEST_MSI
:
492 r
= assigned_device_enable_guest_msi(kvm
, dev
, irq
);
495 #ifdef __KVM_HAVE_MSIX
496 case KVM_DEV_IRQ_GUEST_MSIX
:
497 r
= assigned_device_enable_guest_msix(kvm
, dev
, irq
);
505 dev
->irq_requested_type
|= guest_irq_type
;
506 kvm_register_irq_ack_notifier(kvm
, &dev
->ack_notifier
);
508 kvm_free_irq_source_id(kvm
, dev
->irq_source_id
);
513 /* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */
514 static int kvm_vm_ioctl_assign_irq(struct kvm
*kvm
,
515 struct kvm_assigned_irq
*assigned_irq
)
518 struct kvm_assigned_dev_kernel
*match
;
519 unsigned long host_irq_type
, guest_irq_type
;
521 if (!capable(CAP_SYS_RAWIO
))
524 if (!irqchip_in_kernel(kvm
))
527 mutex_lock(&kvm
->lock
);
529 match
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
530 assigned_irq
->assigned_dev_id
);
534 host_irq_type
= (assigned_irq
->flags
& KVM_DEV_IRQ_HOST_MASK
);
535 guest_irq_type
= (assigned_irq
->flags
& KVM_DEV_IRQ_GUEST_MASK
);
538 /* can only assign one type at a time */
539 if (hweight_long(host_irq_type
) > 1)
541 if (hweight_long(guest_irq_type
) > 1)
543 if (host_irq_type
== 0 && guest_irq_type
== 0)
548 r
= assign_host_irq(kvm
, match
, host_irq_type
);
553 r
= assign_guest_irq(kvm
, match
, assigned_irq
, guest_irq_type
);
555 mutex_unlock(&kvm
->lock
);
559 static int kvm_vm_ioctl_deassign_dev_irq(struct kvm
*kvm
,
560 struct kvm_assigned_irq
564 struct kvm_assigned_dev_kernel
*match
;
566 mutex_lock(&kvm
->lock
);
568 match
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
569 assigned_irq
->assigned_dev_id
);
573 r
= kvm_deassign_irq(kvm
, match
, assigned_irq
->flags
);
575 mutex_unlock(&kvm
->lock
);
579 static int kvm_vm_ioctl_assign_device(struct kvm
*kvm
,
580 struct kvm_assigned_pci_dev
*assigned_dev
)
583 struct kvm_assigned_dev_kernel
*match
;
586 down_read(&kvm
->slots_lock
);
587 mutex_lock(&kvm
->lock
);
589 match
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
590 assigned_dev
->assigned_dev_id
);
592 /* device already assigned */
597 match
= kzalloc(sizeof(struct kvm_assigned_dev_kernel
), GFP_KERNEL
);
599 printk(KERN_INFO
"%s: Couldn't allocate memory\n",
604 dev
= pci_get_bus_and_slot(assigned_dev
->busnr
,
605 assigned_dev
->devfn
);
607 printk(KERN_INFO
"%s: host device not found\n", __func__
);
611 if (pci_enable_device(dev
)) {
612 printk(KERN_INFO
"%s: Could not enable PCI device\n", __func__
);
616 r
= pci_request_regions(dev
, "kvm_assigned_device");
618 printk(KERN_INFO
"%s: Could not get access to device regions\n",
623 pci_reset_function(dev
);
625 match
->assigned_dev_id
= assigned_dev
->assigned_dev_id
;
626 match
->host_busnr
= assigned_dev
->busnr
;
627 match
->host_devfn
= assigned_dev
->devfn
;
628 match
->flags
= assigned_dev
->flags
;
630 spin_lock_init(&match
->assigned_dev_lock
);
631 match
->irq_source_id
= -1;
633 match
->ack_notifier
.irq_acked
= kvm_assigned_dev_ack_irq
;
634 INIT_WORK(&match
->interrupt_work
,
635 kvm_assigned_dev_interrupt_work_handler
);
637 list_add(&match
->list
, &kvm
->arch
.assigned_dev_head
);
639 if (assigned_dev
->flags
& KVM_DEV_ASSIGN_ENABLE_IOMMU
) {
640 if (!kvm
->arch
.iommu_domain
) {
641 r
= kvm_iommu_map_guest(kvm
);
645 r
= kvm_assign_device(kvm
, match
);
651 mutex_unlock(&kvm
->lock
);
652 up_read(&kvm
->slots_lock
);
655 list_del(&match
->list
);
656 pci_release_regions(dev
);
658 pci_disable_device(dev
);
663 mutex_unlock(&kvm
->lock
);
664 up_read(&kvm
->slots_lock
);
669 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
670 static int kvm_vm_ioctl_deassign_device(struct kvm
*kvm
,
671 struct kvm_assigned_pci_dev
*assigned_dev
)
674 struct kvm_assigned_dev_kernel
*match
;
676 mutex_lock(&kvm
->lock
);
678 match
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
679 assigned_dev
->assigned_dev_id
);
681 printk(KERN_INFO
"%s: device hasn't been assigned before, "
682 "so cannot be deassigned\n", __func__
);
687 if (match
->flags
& KVM_DEV_ASSIGN_ENABLE_IOMMU
)
688 kvm_deassign_device(kvm
, match
);
690 kvm_free_assigned_device(kvm
, match
);
693 mutex_unlock(&kvm
->lock
);
698 inline int kvm_is_mmio_pfn(pfn_t pfn
)
700 if (pfn_valid(pfn
)) {
701 struct page
*page
= compound_head(pfn_to_page(pfn
));
702 return PageReserved(page
);
709 * Switches to specified vcpu, until a matching vcpu_put()
711 void vcpu_load(struct kvm_vcpu
*vcpu
)
715 mutex_lock(&vcpu
->mutex
);
717 preempt_notifier_register(&vcpu
->preempt_notifier
);
718 kvm_arch_vcpu_load(vcpu
, cpu
);
722 void vcpu_put(struct kvm_vcpu
*vcpu
)
725 kvm_arch_vcpu_put(vcpu
);
726 preempt_notifier_unregister(&vcpu
->preempt_notifier
);
728 mutex_unlock(&vcpu
->mutex
);
731 static void ack_flush(void *_completed
)
735 static bool make_all_cpus_request(struct kvm
*kvm
, unsigned int req
)
740 struct kvm_vcpu
*vcpu
;
742 zalloc_cpumask_var(&cpus
, GFP_ATOMIC
);
744 spin_lock(&kvm
->requests_lock
);
745 me
= smp_processor_id();
746 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
747 if (test_and_set_bit(req
, &vcpu
->requests
))
750 if (cpus
!= NULL
&& cpu
!= -1 && cpu
!= me
)
751 cpumask_set_cpu(cpu
, cpus
);
753 if (unlikely(cpus
== NULL
))
754 smp_call_function_many(cpu_online_mask
, ack_flush
, NULL
, 1);
755 else if (!cpumask_empty(cpus
))
756 smp_call_function_many(cpus
, ack_flush
, NULL
, 1);
759 spin_unlock(&kvm
->requests_lock
);
760 free_cpumask_var(cpus
);
764 void kvm_flush_remote_tlbs(struct kvm
*kvm
)
766 if (make_all_cpus_request(kvm
, KVM_REQ_TLB_FLUSH
))
767 ++kvm
->stat
.remote_tlb_flush
;
770 void kvm_reload_remote_mmus(struct kvm
*kvm
)
772 make_all_cpus_request(kvm
, KVM_REQ_MMU_RELOAD
);
775 int kvm_vcpu_init(struct kvm_vcpu
*vcpu
, struct kvm
*kvm
, unsigned id
)
780 mutex_init(&vcpu
->mutex
);
784 init_waitqueue_head(&vcpu
->wq
);
786 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
791 vcpu
->run
= page_address(page
);
793 r
= kvm_arch_vcpu_init(vcpu
);
799 free_page((unsigned long)vcpu
->run
);
803 EXPORT_SYMBOL_GPL(kvm_vcpu_init
);
805 void kvm_vcpu_uninit(struct kvm_vcpu
*vcpu
)
807 kvm_arch_vcpu_uninit(vcpu
);
808 free_page((unsigned long)vcpu
->run
);
810 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit
);
812 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
813 static inline struct kvm
*mmu_notifier_to_kvm(struct mmu_notifier
*mn
)
815 return container_of(mn
, struct kvm
, mmu_notifier
);
818 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier
*mn
,
819 struct mm_struct
*mm
,
820 unsigned long address
)
822 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
826 * When ->invalidate_page runs, the linux pte has been zapped
827 * already but the page is still allocated until
828 * ->invalidate_page returns. So if we increase the sequence
829 * here the kvm page fault will notice if the spte can't be
830 * established because the page is going to be freed. If
831 * instead the kvm page fault establishes the spte before
832 * ->invalidate_page runs, kvm_unmap_hva will release it
835 * The sequence increase only need to be seen at spin_unlock
836 * time, and not at spin_lock time.
838 * Increasing the sequence after the spin_unlock would be
839 * unsafe because the kvm page fault could then establish the
840 * pte after kvm_unmap_hva returned, without noticing the page
841 * is going to be freed.
843 spin_lock(&kvm
->mmu_lock
);
844 kvm
->mmu_notifier_seq
++;
845 need_tlb_flush
= kvm_unmap_hva(kvm
, address
);
846 spin_unlock(&kvm
->mmu_lock
);
848 /* we've to flush the tlb before the pages can be freed */
850 kvm_flush_remote_tlbs(kvm
);
854 static void kvm_mmu_notifier_change_pte(struct mmu_notifier
*mn
,
855 struct mm_struct
*mm
,
856 unsigned long address
,
859 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
861 spin_lock(&kvm
->mmu_lock
);
862 kvm
->mmu_notifier_seq
++;
863 kvm_set_spte_hva(kvm
, address
, pte
);
864 spin_unlock(&kvm
->mmu_lock
);
867 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier
*mn
,
868 struct mm_struct
*mm
,
872 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
873 int need_tlb_flush
= 0;
875 spin_lock(&kvm
->mmu_lock
);
877 * The count increase must become visible at unlock time as no
878 * spte can be established without taking the mmu_lock and
879 * count is also read inside the mmu_lock critical section.
881 kvm
->mmu_notifier_count
++;
882 for (; start
< end
; start
+= PAGE_SIZE
)
883 need_tlb_flush
|= kvm_unmap_hva(kvm
, start
);
884 spin_unlock(&kvm
->mmu_lock
);
886 /* we've to flush the tlb before the pages can be freed */
888 kvm_flush_remote_tlbs(kvm
);
891 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier
*mn
,
892 struct mm_struct
*mm
,
896 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
898 spin_lock(&kvm
->mmu_lock
);
900 * This sequence increase will notify the kvm page fault that
901 * the page that is going to be mapped in the spte could have
904 kvm
->mmu_notifier_seq
++;
906 * The above sequence increase must be visible before the
907 * below count decrease but both values are read by the kvm
908 * page fault under mmu_lock spinlock so we don't need to add
909 * a smb_wmb() here in between the two.
911 kvm
->mmu_notifier_count
--;
912 spin_unlock(&kvm
->mmu_lock
);
914 BUG_ON(kvm
->mmu_notifier_count
< 0);
917 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier
*mn
,
918 struct mm_struct
*mm
,
919 unsigned long address
)
921 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
924 spin_lock(&kvm
->mmu_lock
);
925 young
= kvm_age_hva(kvm
, address
);
926 spin_unlock(&kvm
->mmu_lock
);
929 kvm_flush_remote_tlbs(kvm
);
934 static void kvm_mmu_notifier_release(struct mmu_notifier
*mn
,
935 struct mm_struct
*mm
)
937 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
938 kvm_arch_flush_shadow(kvm
);
941 static const struct mmu_notifier_ops kvm_mmu_notifier_ops
= {
942 .invalidate_page
= kvm_mmu_notifier_invalidate_page
,
943 .invalidate_range_start
= kvm_mmu_notifier_invalidate_range_start
,
944 .invalidate_range_end
= kvm_mmu_notifier_invalidate_range_end
,
945 .clear_flush_young
= kvm_mmu_notifier_clear_flush_young
,
946 .change_pte
= kvm_mmu_notifier_change_pte
,
947 .release
= kvm_mmu_notifier_release
,
949 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
951 static struct kvm
*kvm_create_vm(void)
953 struct kvm
*kvm
= kvm_arch_create_vm();
954 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
960 #ifdef CONFIG_HAVE_KVM_IRQCHIP
961 INIT_LIST_HEAD(&kvm
->irq_routing
);
962 INIT_HLIST_HEAD(&kvm
->mask_notifier_list
);
965 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
966 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
969 return ERR_PTR(-ENOMEM
);
971 kvm
->coalesced_mmio_ring
=
972 (struct kvm_coalesced_mmio_ring
*)page_address(page
);
975 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
978 kvm
->mmu_notifier
.ops
= &kvm_mmu_notifier_ops
;
979 err
= mmu_notifier_register(&kvm
->mmu_notifier
, current
->mm
);
981 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
990 kvm
->mm
= current
->mm
;
991 atomic_inc(&kvm
->mm
->mm_count
);
992 spin_lock_init(&kvm
->mmu_lock
);
993 spin_lock_init(&kvm
->requests_lock
);
994 kvm_io_bus_init(&kvm
->pio_bus
);
995 kvm_eventfd_init(kvm
);
996 mutex_init(&kvm
->lock
);
997 mutex_init(&kvm
->irq_lock
);
998 kvm_io_bus_init(&kvm
->mmio_bus
);
999 init_rwsem(&kvm
->slots_lock
);
1000 atomic_set(&kvm
->users_count
, 1);
1001 spin_lock(&kvm_lock
);
1002 list_add(&kvm
->vm_list
, &vm_list
);
1003 spin_unlock(&kvm_lock
);
1004 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1005 kvm_coalesced_mmio_init(kvm
);
1012 * Free any memory in @free but not in @dont.
1014 static void kvm_free_physmem_slot(struct kvm_memory_slot
*free
,
1015 struct kvm_memory_slot
*dont
)
1019 if (!dont
|| free
->rmap
!= dont
->rmap
)
1022 if (!dont
|| free
->dirty_bitmap
!= dont
->dirty_bitmap
)
1023 vfree(free
->dirty_bitmap
);
1026 for (i
= 0; i
< KVM_NR_PAGE_SIZES
- 1; ++i
) {
1027 if (!dont
|| free
->lpage_info
[i
] != dont
->lpage_info
[i
]) {
1028 vfree(free
->lpage_info
[i
]);
1029 free
->lpage_info
[i
] = NULL
;
1034 free
->dirty_bitmap
= NULL
;
1038 void kvm_free_physmem(struct kvm
*kvm
)
1042 for (i
= 0; i
< kvm
->nmemslots
; ++i
)
1043 kvm_free_physmem_slot(&kvm
->memslots
[i
], NULL
);
1046 static void kvm_destroy_vm(struct kvm
*kvm
)
1048 struct mm_struct
*mm
= kvm
->mm
;
1050 kvm_arch_sync_events(kvm
);
1051 spin_lock(&kvm_lock
);
1052 list_del(&kvm
->vm_list
);
1053 spin_unlock(&kvm_lock
);
1054 kvm_free_irq_routing(kvm
);
1055 kvm_io_bus_destroy(&kvm
->pio_bus
);
1056 kvm_io_bus_destroy(&kvm
->mmio_bus
);
1057 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1058 if (kvm
->coalesced_mmio_ring
!= NULL
)
1059 free_page((unsigned long)kvm
->coalesced_mmio_ring
);
1061 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1062 mmu_notifier_unregister(&kvm
->mmu_notifier
, kvm
->mm
);
1064 kvm_arch_flush_shadow(kvm
);
1066 kvm_arch_destroy_vm(kvm
);
1070 void kvm_get_kvm(struct kvm
*kvm
)
1072 atomic_inc(&kvm
->users_count
);
1074 EXPORT_SYMBOL_GPL(kvm_get_kvm
);
1076 void kvm_put_kvm(struct kvm
*kvm
)
1078 if (atomic_dec_and_test(&kvm
->users_count
))
1079 kvm_destroy_vm(kvm
);
1081 EXPORT_SYMBOL_GPL(kvm_put_kvm
);
1084 static int kvm_vm_release(struct inode
*inode
, struct file
*filp
)
1086 struct kvm
*kvm
= filp
->private_data
;
1088 kvm_irqfd_release(kvm
);
1095 * Allocate some memory and give it an address in the guest physical address
1098 * Discontiguous memory is allowed, mostly for framebuffers.
1100 * Must be called holding mmap_sem for write.
1102 int __kvm_set_memory_region(struct kvm
*kvm
,
1103 struct kvm_userspace_memory_region
*mem
,
1108 unsigned long npages
;
1110 struct kvm_memory_slot
*memslot
;
1111 struct kvm_memory_slot old
, new;
1114 /* General sanity checks */
1115 if (mem
->memory_size
& (PAGE_SIZE
- 1))
1117 if (mem
->guest_phys_addr
& (PAGE_SIZE
- 1))
1119 if (user_alloc
&& (mem
->userspace_addr
& (PAGE_SIZE
- 1)))
1121 if (mem
->slot
>= KVM_MEMORY_SLOTS
+ KVM_PRIVATE_MEM_SLOTS
)
1123 if (mem
->guest_phys_addr
+ mem
->memory_size
< mem
->guest_phys_addr
)
1126 memslot
= &kvm
->memslots
[mem
->slot
];
1127 base_gfn
= mem
->guest_phys_addr
>> PAGE_SHIFT
;
1128 npages
= mem
->memory_size
>> PAGE_SHIFT
;
1131 mem
->flags
&= ~KVM_MEM_LOG_DIRTY_PAGES
;
1133 new = old
= *memslot
;
1135 new.base_gfn
= base_gfn
;
1136 new.npages
= npages
;
1137 new.flags
= mem
->flags
;
1139 /* Disallow changing a memory slot's size. */
1141 if (npages
&& old
.npages
&& npages
!= old
.npages
)
1144 /* Check for overlaps */
1146 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
1147 struct kvm_memory_slot
*s
= &kvm
->memslots
[i
];
1149 if (s
== memslot
|| !s
->npages
)
1151 if (!((base_gfn
+ npages
<= s
->base_gfn
) ||
1152 (base_gfn
>= s
->base_gfn
+ s
->npages
)))
1156 /* Free page dirty bitmap if unneeded */
1157 if (!(new.flags
& KVM_MEM_LOG_DIRTY_PAGES
))
1158 new.dirty_bitmap
= NULL
;
1162 /* Allocate if a slot is being created */
1164 if (npages
&& !new.rmap
) {
1165 new.rmap
= vmalloc(npages
* sizeof(struct page
*));
1170 memset(new.rmap
, 0, npages
* sizeof(*new.rmap
));
1172 new.user_alloc
= user_alloc
;
1174 * hva_to_rmmap() serialzies with the mmu_lock and to be
1175 * safe it has to ignore memslots with !user_alloc &&
1179 new.userspace_addr
= mem
->userspace_addr
;
1181 new.userspace_addr
= 0;
1186 for (i
= 0; i
< KVM_NR_PAGE_SIZES
- 1; ++i
) {
1192 /* Avoid unused variable warning if no large pages */
1195 if (new.lpage_info
[i
])
1198 lpages
= 1 + (base_gfn
+ npages
- 1) /
1199 KVM_PAGES_PER_HPAGE(level
);
1200 lpages
-= base_gfn
/ KVM_PAGES_PER_HPAGE(level
);
1202 new.lpage_info
[i
] = vmalloc(lpages
* sizeof(*new.lpage_info
[i
]));
1204 if (!new.lpage_info
[i
])
1207 memset(new.lpage_info
[i
], 0,
1208 lpages
* sizeof(*new.lpage_info
[i
]));
1210 if (base_gfn
% KVM_PAGES_PER_HPAGE(level
))
1211 new.lpage_info
[i
][0].write_count
= 1;
1212 if ((base_gfn
+npages
) % KVM_PAGES_PER_HPAGE(level
))
1213 new.lpage_info
[i
][lpages
- 1].write_count
= 1;
1214 ugfn
= new.userspace_addr
>> PAGE_SHIFT
;
1216 * If the gfn and userspace address are not aligned wrt each
1217 * other, or if explicitly asked to, disable large page
1218 * support for this slot
1220 if ((base_gfn
^ ugfn
) & (KVM_PAGES_PER_HPAGE(level
) - 1) ||
1221 !largepages_enabled
)
1222 for (j
= 0; j
< lpages
; ++j
)
1223 new.lpage_info
[i
][j
].write_count
= 1;
1228 /* Allocate page dirty bitmap if needed */
1229 if ((new.flags
& KVM_MEM_LOG_DIRTY_PAGES
) && !new.dirty_bitmap
) {
1230 unsigned dirty_bytes
= ALIGN(npages
, BITS_PER_LONG
) / 8;
1232 new.dirty_bitmap
= vmalloc(dirty_bytes
);
1233 if (!new.dirty_bitmap
)
1235 memset(new.dirty_bitmap
, 0, dirty_bytes
);
1237 kvm_arch_flush_shadow(kvm
);
1239 #else /* not defined CONFIG_S390 */
1240 new.user_alloc
= user_alloc
;
1242 new.userspace_addr
= mem
->userspace_addr
;
1243 #endif /* not defined CONFIG_S390 */
1246 kvm_arch_flush_shadow(kvm
);
1248 spin_lock(&kvm
->mmu_lock
);
1249 if (mem
->slot
>= kvm
->nmemslots
)
1250 kvm
->nmemslots
= mem
->slot
+ 1;
1253 spin_unlock(&kvm
->mmu_lock
);
1255 r
= kvm_arch_set_memory_region(kvm
, mem
, old
, user_alloc
);
1257 spin_lock(&kvm
->mmu_lock
);
1259 spin_unlock(&kvm
->mmu_lock
);
1263 kvm_free_physmem_slot(&old
, npages
? &new : NULL
);
1264 /* Slot deletion case: we have to update the current slot */
1265 spin_lock(&kvm
->mmu_lock
);
1268 spin_unlock(&kvm
->mmu_lock
);
1270 /* map the pages in iommu page table */
1271 r
= kvm_iommu_map_pages(kvm
, base_gfn
, npages
);
1278 kvm_free_physmem_slot(&new, &old
);
1283 EXPORT_SYMBOL_GPL(__kvm_set_memory_region
);
1285 int kvm_set_memory_region(struct kvm
*kvm
,
1286 struct kvm_userspace_memory_region
*mem
,
1291 down_write(&kvm
->slots_lock
);
1292 r
= __kvm_set_memory_region(kvm
, mem
, user_alloc
);
1293 up_write(&kvm
->slots_lock
);
1296 EXPORT_SYMBOL_GPL(kvm_set_memory_region
);
1298 int kvm_vm_ioctl_set_memory_region(struct kvm
*kvm
,
1300 kvm_userspace_memory_region
*mem
,
1303 if (mem
->slot
>= KVM_MEMORY_SLOTS
)
1305 return kvm_set_memory_region(kvm
, mem
, user_alloc
);
1308 int kvm_get_dirty_log(struct kvm
*kvm
,
1309 struct kvm_dirty_log
*log
, int *is_dirty
)
1311 struct kvm_memory_slot
*memslot
;
1314 unsigned long any
= 0;
1317 if (log
->slot
>= KVM_MEMORY_SLOTS
)
1320 memslot
= &kvm
->memslots
[log
->slot
];
1322 if (!memslot
->dirty_bitmap
)
1325 n
= ALIGN(memslot
->npages
, BITS_PER_LONG
) / 8;
1327 for (i
= 0; !any
&& i
< n
/sizeof(long); ++i
)
1328 any
= memslot
->dirty_bitmap
[i
];
1331 if (copy_to_user(log
->dirty_bitmap
, memslot
->dirty_bitmap
, n
))
1342 void kvm_disable_largepages(void)
1344 largepages_enabled
= false;
1346 EXPORT_SYMBOL_GPL(kvm_disable_largepages
);
1348 int is_error_page(struct page
*page
)
1350 return page
== bad_page
;
1352 EXPORT_SYMBOL_GPL(is_error_page
);
1354 int is_error_pfn(pfn_t pfn
)
1356 return pfn
== bad_pfn
;
1358 EXPORT_SYMBOL_GPL(is_error_pfn
);
1360 static inline unsigned long bad_hva(void)
1365 int kvm_is_error_hva(unsigned long addr
)
1367 return addr
== bad_hva();
1369 EXPORT_SYMBOL_GPL(kvm_is_error_hva
);
1371 struct kvm_memory_slot
*gfn_to_memslot_unaliased(struct kvm
*kvm
, gfn_t gfn
)
1375 for (i
= 0; i
< kvm
->nmemslots
; ++i
) {
1376 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
1378 if (gfn
>= memslot
->base_gfn
1379 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
1384 EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased
);
1386 struct kvm_memory_slot
*gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
)
1388 gfn
= unalias_gfn(kvm
, gfn
);
1389 return gfn_to_memslot_unaliased(kvm
, gfn
);
1392 int kvm_is_visible_gfn(struct kvm
*kvm
, gfn_t gfn
)
1396 gfn
= unalias_gfn(kvm
, gfn
);
1397 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
1398 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
1400 if (gfn
>= memslot
->base_gfn
1401 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
1406 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn
);
1408 unsigned long gfn_to_hva(struct kvm
*kvm
, gfn_t gfn
)
1410 struct kvm_memory_slot
*slot
;
1412 gfn
= unalias_gfn(kvm
, gfn
);
1413 slot
= gfn_to_memslot_unaliased(kvm
, gfn
);
1416 return (slot
->userspace_addr
+ (gfn
- slot
->base_gfn
) * PAGE_SIZE
);
1418 EXPORT_SYMBOL_GPL(gfn_to_hva
);
1420 pfn_t
gfn_to_pfn(struct kvm
*kvm
, gfn_t gfn
)
1422 struct page
*page
[1];
1429 addr
= gfn_to_hva(kvm
, gfn
);
1430 if (kvm_is_error_hva(addr
)) {
1432 return page_to_pfn(bad_page
);
1435 npages
= get_user_pages_fast(addr
, 1, 1, page
);
1437 if (unlikely(npages
!= 1)) {
1438 struct vm_area_struct
*vma
;
1440 down_read(¤t
->mm
->mmap_sem
);
1441 vma
= find_vma(current
->mm
, addr
);
1443 if (vma
== NULL
|| addr
< vma
->vm_start
||
1444 !(vma
->vm_flags
& VM_PFNMAP
)) {
1445 up_read(¤t
->mm
->mmap_sem
);
1447 return page_to_pfn(bad_page
);
1450 pfn
= ((addr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
1451 up_read(¤t
->mm
->mmap_sem
);
1452 BUG_ON(!kvm_is_mmio_pfn(pfn
));
1454 pfn
= page_to_pfn(page
[0]);
1459 EXPORT_SYMBOL_GPL(gfn_to_pfn
);
1461 struct page
*gfn_to_page(struct kvm
*kvm
, gfn_t gfn
)
1465 pfn
= gfn_to_pfn(kvm
, gfn
);
1466 if (!kvm_is_mmio_pfn(pfn
))
1467 return pfn_to_page(pfn
);
1469 WARN_ON(kvm_is_mmio_pfn(pfn
));
1475 EXPORT_SYMBOL_GPL(gfn_to_page
);
1477 void kvm_release_page_clean(struct page
*page
)
1479 kvm_release_pfn_clean(page_to_pfn(page
));
1481 EXPORT_SYMBOL_GPL(kvm_release_page_clean
);
1483 void kvm_release_pfn_clean(pfn_t pfn
)
1485 if (!kvm_is_mmio_pfn(pfn
))
1486 put_page(pfn_to_page(pfn
));
1488 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean
);
1490 void kvm_release_page_dirty(struct page
*page
)
1492 kvm_release_pfn_dirty(page_to_pfn(page
));
1494 EXPORT_SYMBOL_GPL(kvm_release_page_dirty
);
1496 void kvm_release_pfn_dirty(pfn_t pfn
)
1498 kvm_set_pfn_dirty(pfn
);
1499 kvm_release_pfn_clean(pfn
);
1501 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty
);
1503 void kvm_set_page_dirty(struct page
*page
)
1505 kvm_set_pfn_dirty(page_to_pfn(page
));
1507 EXPORT_SYMBOL_GPL(kvm_set_page_dirty
);
1509 void kvm_set_pfn_dirty(pfn_t pfn
)
1511 if (!kvm_is_mmio_pfn(pfn
)) {
1512 struct page
*page
= pfn_to_page(pfn
);
1513 if (!PageReserved(page
))
1517 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty
);
1519 void kvm_set_pfn_accessed(pfn_t pfn
)
1521 if (!kvm_is_mmio_pfn(pfn
))
1522 mark_page_accessed(pfn_to_page(pfn
));
1524 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed
);
1526 void kvm_get_pfn(pfn_t pfn
)
1528 if (!kvm_is_mmio_pfn(pfn
))
1529 get_page(pfn_to_page(pfn
));
1531 EXPORT_SYMBOL_GPL(kvm_get_pfn
);
1533 static int next_segment(unsigned long len
, int offset
)
1535 if (len
> PAGE_SIZE
- offset
)
1536 return PAGE_SIZE
- offset
;
1541 int kvm_read_guest_page(struct kvm
*kvm
, gfn_t gfn
, void *data
, int offset
,
1547 addr
= gfn_to_hva(kvm
, gfn
);
1548 if (kvm_is_error_hva(addr
))
1550 r
= copy_from_user(data
, (void __user
*)addr
+ offset
, len
);
1555 EXPORT_SYMBOL_GPL(kvm_read_guest_page
);
1557 int kvm_read_guest(struct kvm
*kvm
, gpa_t gpa
, void *data
, unsigned long len
)
1559 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1561 int offset
= offset_in_page(gpa
);
1564 while ((seg
= next_segment(len
, offset
)) != 0) {
1565 ret
= kvm_read_guest_page(kvm
, gfn
, data
, offset
, seg
);
1575 EXPORT_SYMBOL_GPL(kvm_read_guest
);
1577 int kvm_read_guest_atomic(struct kvm
*kvm
, gpa_t gpa
, void *data
,
1582 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1583 int offset
= offset_in_page(gpa
);
1585 addr
= gfn_to_hva(kvm
, gfn
);
1586 if (kvm_is_error_hva(addr
))
1588 pagefault_disable();
1589 r
= __copy_from_user_inatomic(data
, (void __user
*)addr
+ offset
, len
);
1595 EXPORT_SYMBOL(kvm_read_guest_atomic
);
1597 int kvm_write_guest_page(struct kvm
*kvm
, gfn_t gfn
, const void *data
,
1598 int offset
, int len
)
1603 addr
= gfn_to_hva(kvm
, gfn
);
1604 if (kvm_is_error_hva(addr
))
1606 r
= copy_to_user((void __user
*)addr
+ offset
, data
, len
);
1609 mark_page_dirty(kvm
, gfn
);
1612 EXPORT_SYMBOL_GPL(kvm_write_guest_page
);
1614 int kvm_write_guest(struct kvm
*kvm
, gpa_t gpa
, const void *data
,
1617 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1619 int offset
= offset_in_page(gpa
);
1622 while ((seg
= next_segment(len
, offset
)) != 0) {
1623 ret
= kvm_write_guest_page(kvm
, gfn
, data
, offset
, seg
);
1634 int kvm_clear_guest_page(struct kvm
*kvm
, gfn_t gfn
, int offset
, int len
)
1636 return kvm_write_guest_page(kvm
, gfn
, empty_zero_page
, offset
, len
);
1638 EXPORT_SYMBOL_GPL(kvm_clear_guest_page
);
1640 int kvm_clear_guest(struct kvm
*kvm
, gpa_t gpa
, unsigned long len
)
1642 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1644 int offset
= offset_in_page(gpa
);
1647 while ((seg
= next_segment(len
, offset
)) != 0) {
1648 ret
= kvm_clear_guest_page(kvm
, gfn
, offset
, seg
);
1657 EXPORT_SYMBOL_GPL(kvm_clear_guest
);
1659 void mark_page_dirty(struct kvm
*kvm
, gfn_t gfn
)
1661 struct kvm_memory_slot
*memslot
;
1663 gfn
= unalias_gfn(kvm
, gfn
);
1664 memslot
= gfn_to_memslot_unaliased(kvm
, gfn
);
1665 if (memslot
&& memslot
->dirty_bitmap
) {
1666 unsigned long rel_gfn
= gfn
- memslot
->base_gfn
;
1669 if (!generic_test_le_bit(rel_gfn
, memslot
->dirty_bitmap
))
1670 generic___set_le_bit(rel_gfn
, memslot
->dirty_bitmap
);
1675 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1677 void kvm_vcpu_block(struct kvm_vcpu
*vcpu
)
1682 prepare_to_wait(&vcpu
->wq
, &wait
, TASK_INTERRUPTIBLE
);
1684 if (kvm_arch_vcpu_runnable(vcpu
)) {
1685 set_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
1688 if (kvm_cpu_has_pending_timer(vcpu
))
1690 if (signal_pending(current
))
1698 finish_wait(&vcpu
->wq
, &wait
);
1701 void kvm_resched(struct kvm_vcpu
*vcpu
)
1703 if (!need_resched())
1707 EXPORT_SYMBOL_GPL(kvm_resched
);
1709 static int kvm_vcpu_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1711 struct kvm_vcpu
*vcpu
= vma
->vm_file
->private_data
;
1714 if (vmf
->pgoff
== 0)
1715 page
= virt_to_page(vcpu
->run
);
1717 else if (vmf
->pgoff
== KVM_PIO_PAGE_OFFSET
)
1718 page
= virt_to_page(vcpu
->arch
.pio_data
);
1720 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1721 else if (vmf
->pgoff
== KVM_COALESCED_MMIO_PAGE_OFFSET
)
1722 page
= virt_to_page(vcpu
->kvm
->coalesced_mmio_ring
);
1725 return VM_FAULT_SIGBUS
;
1731 static const struct vm_operations_struct kvm_vcpu_vm_ops
= {
1732 .fault
= kvm_vcpu_fault
,
1735 static int kvm_vcpu_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1737 vma
->vm_ops
= &kvm_vcpu_vm_ops
;
1741 static int kvm_vcpu_release(struct inode
*inode
, struct file
*filp
)
1743 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1745 kvm_put_kvm(vcpu
->kvm
);
1749 static struct file_operations kvm_vcpu_fops
= {
1750 .release
= kvm_vcpu_release
,
1751 .unlocked_ioctl
= kvm_vcpu_ioctl
,
1752 .compat_ioctl
= kvm_vcpu_ioctl
,
1753 .mmap
= kvm_vcpu_mmap
,
1757 * Allocates an inode for the vcpu.
1759 static int create_vcpu_fd(struct kvm_vcpu
*vcpu
)
1761 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops
, vcpu
, 0);
1765 * Creates some virtual cpus. Good luck creating more than one.
1767 static int kvm_vm_ioctl_create_vcpu(struct kvm
*kvm
, u32 id
)
1770 struct kvm_vcpu
*vcpu
, *v
;
1772 vcpu
= kvm_arch_vcpu_create(kvm
, id
);
1774 return PTR_ERR(vcpu
);
1776 preempt_notifier_init(&vcpu
->preempt_notifier
, &kvm_preempt_ops
);
1778 r
= kvm_arch_vcpu_setup(vcpu
);
1782 mutex_lock(&kvm
->lock
);
1783 if (atomic_read(&kvm
->online_vcpus
) == KVM_MAX_VCPUS
) {
1788 kvm_for_each_vcpu(r
, v
, kvm
)
1789 if (v
->vcpu_id
== id
) {
1794 BUG_ON(kvm
->vcpus
[atomic_read(&kvm
->online_vcpus
)]);
1796 /* Now it's all set up, let userspace reach it */
1798 r
= create_vcpu_fd(vcpu
);
1804 kvm
->vcpus
[atomic_read(&kvm
->online_vcpus
)] = vcpu
;
1806 atomic_inc(&kvm
->online_vcpus
);
1808 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
1809 if (kvm
->bsp_vcpu_id
== id
)
1810 kvm
->bsp_vcpu
= vcpu
;
1812 mutex_unlock(&kvm
->lock
);
1816 mutex_unlock(&kvm
->lock
);
1817 kvm_arch_vcpu_destroy(vcpu
);
1821 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu
*vcpu
, sigset_t
*sigset
)
1824 sigdelsetmask(sigset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
1825 vcpu
->sigset_active
= 1;
1826 vcpu
->sigset
= *sigset
;
1828 vcpu
->sigset_active
= 0;
1832 #ifdef __KVM_HAVE_MSIX
1833 static int kvm_vm_ioctl_set_msix_nr(struct kvm
*kvm
,
1834 struct kvm_assigned_msix_nr
*entry_nr
)
1837 struct kvm_assigned_dev_kernel
*adev
;
1839 mutex_lock(&kvm
->lock
);
1841 adev
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
1842 entry_nr
->assigned_dev_id
);
1848 if (adev
->entries_nr
== 0) {
1849 adev
->entries_nr
= entry_nr
->entry_nr
;
1850 if (adev
->entries_nr
== 0 ||
1851 adev
->entries_nr
>= KVM_MAX_MSIX_PER_DEV
) {
1856 adev
->host_msix_entries
= kzalloc(sizeof(struct msix_entry
) *
1859 if (!adev
->host_msix_entries
) {
1863 adev
->guest_msix_entries
= kzalloc(
1864 sizeof(struct kvm_guest_msix_entry
) *
1865 entry_nr
->entry_nr
, GFP_KERNEL
);
1866 if (!adev
->guest_msix_entries
) {
1867 kfree(adev
->host_msix_entries
);
1871 } else /* Not allowed set MSI-X number twice */
1874 mutex_unlock(&kvm
->lock
);
1878 static int kvm_vm_ioctl_set_msix_entry(struct kvm
*kvm
,
1879 struct kvm_assigned_msix_entry
*entry
)
1882 struct kvm_assigned_dev_kernel
*adev
;
1884 mutex_lock(&kvm
->lock
);
1886 adev
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
1887 entry
->assigned_dev_id
);
1891 goto msix_entry_out
;
1894 for (i
= 0; i
< adev
->entries_nr
; i
++)
1895 if (adev
->guest_msix_entries
[i
].vector
== 0 ||
1896 adev
->guest_msix_entries
[i
].entry
== entry
->entry
) {
1897 adev
->guest_msix_entries
[i
].entry
= entry
->entry
;
1898 adev
->guest_msix_entries
[i
].vector
= entry
->gsi
;
1899 adev
->host_msix_entries
[i
].entry
= entry
->entry
;
1902 if (i
== adev
->entries_nr
) {
1904 goto msix_entry_out
;
1908 mutex_unlock(&kvm
->lock
);
1914 static long kvm_vcpu_ioctl(struct file
*filp
,
1915 unsigned int ioctl
, unsigned long arg
)
1917 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1918 void __user
*argp
= (void __user
*)arg
;
1920 struct kvm_fpu
*fpu
= NULL
;
1921 struct kvm_sregs
*kvm_sregs
= NULL
;
1923 if (vcpu
->kvm
->mm
!= current
->mm
)
1930 r
= kvm_arch_vcpu_ioctl_run(vcpu
, vcpu
->run
);
1932 case KVM_GET_REGS
: {
1933 struct kvm_regs
*kvm_regs
;
1936 kvm_regs
= kzalloc(sizeof(struct kvm_regs
), GFP_KERNEL
);
1939 r
= kvm_arch_vcpu_ioctl_get_regs(vcpu
, kvm_regs
);
1943 if (copy_to_user(argp
, kvm_regs
, sizeof(struct kvm_regs
)))
1950 case KVM_SET_REGS
: {
1951 struct kvm_regs
*kvm_regs
;
1954 kvm_regs
= kzalloc(sizeof(struct kvm_regs
), GFP_KERNEL
);
1958 if (copy_from_user(kvm_regs
, argp
, sizeof(struct kvm_regs
)))
1960 r
= kvm_arch_vcpu_ioctl_set_regs(vcpu
, kvm_regs
);
1968 case KVM_GET_SREGS
: {
1969 kvm_sregs
= kzalloc(sizeof(struct kvm_sregs
), GFP_KERNEL
);
1973 r
= kvm_arch_vcpu_ioctl_get_sregs(vcpu
, kvm_sregs
);
1977 if (copy_to_user(argp
, kvm_sregs
, sizeof(struct kvm_sregs
)))
1982 case KVM_SET_SREGS
: {
1983 kvm_sregs
= kmalloc(sizeof(struct kvm_sregs
), GFP_KERNEL
);
1988 if (copy_from_user(kvm_sregs
, argp
, sizeof(struct kvm_sregs
)))
1990 r
= kvm_arch_vcpu_ioctl_set_sregs(vcpu
, kvm_sregs
);
1996 case KVM_GET_MP_STATE
: {
1997 struct kvm_mp_state mp_state
;
1999 r
= kvm_arch_vcpu_ioctl_get_mpstate(vcpu
, &mp_state
);
2003 if (copy_to_user(argp
, &mp_state
, sizeof mp_state
))
2008 case KVM_SET_MP_STATE
: {
2009 struct kvm_mp_state mp_state
;
2012 if (copy_from_user(&mp_state
, argp
, sizeof mp_state
))
2014 r
= kvm_arch_vcpu_ioctl_set_mpstate(vcpu
, &mp_state
);
2020 case KVM_TRANSLATE
: {
2021 struct kvm_translation tr
;
2024 if (copy_from_user(&tr
, argp
, sizeof tr
))
2026 r
= kvm_arch_vcpu_ioctl_translate(vcpu
, &tr
);
2030 if (copy_to_user(argp
, &tr
, sizeof tr
))
2035 case KVM_SET_GUEST_DEBUG
: {
2036 struct kvm_guest_debug dbg
;
2039 if (copy_from_user(&dbg
, argp
, sizeof dbg
))
2041 r
= kvm_arch_vcpu_ioctl_set_guest_debug(vcpu
, &dbg
);
2047 case KVM_SET_SIGNAL_MASK
: {
2048 struct kvm_signal_mask __user
*sigmask_arg
= argp
;
2049 struct kvm_signal_mask kvm_sigmask
;
2050 sigset_t sigset
, *p
;
2055 if (copy_from_user(&kvm_sigmask
, argp
,
2056 sizeof kvm_sigmask
))
2059 if (kvm_sigmask
.len
!= sizeof sigset
)
2062 if (copy_from_user(&sigset
, sigmask_arg
->sigset
,
2067 r
= kvm_vcpu_ioctl_set_sigmask(vcpu
, &sigset
);
2071 fpu
= kzalloc(sizeof(struct kvm_fpu
), GFP_KERNEL
);
2075 r
= kvm_arch_vcpu_ioctl_get_fpu(vcpu
, fpu
);
2079 if (copy_to_user(argp
, fpu
, sizeof(struct kvm_fpu
)))
2085 fpu
= kmalloc(sizeof(struct kvm_fpu
), GFP_KERNEL
);
2090 if (copy_from_user(fpu
, argp
, sizeof(struct kvm_fpu
)))
2092 r
= kvm_arch_vcpu_ioctl_set_fpu(vcpu
, fpu
);
2099 r
= kvm_arch_vcpu_ioctl(filp
, ioctl
, arg
);
2107 static long kvm_vm_ioctl(struct file
*filp
,
2108 unsigned int ioctl
, unsigned long arg
)
2110 struct kvm
*kvm
= filp
->private_data
;
2111 void __user
*argp
= (void __user
*)arg
;
2114 if (kvm
->mm
!= current
->mm
)
2117 case KVM_CREATE_VCPU
:
2118 r
= kvm_vm_ioctl_create_vcpu(kvm
, arg
);
2122 case KVM_SET_USER_MEMORY_REGION
: {
2123 struct kvm_userspace_memory_region kvm_userspace_mem
;
2126 if (copy_from_user(&kvm_userspace_mem
, argp
,
2127 sizeof kvm_userspace_mem
))
2130 r
= kvm_vm_ioctl_set_memory_region(kvm
, &kvm_userspace_mem
, 1);
2135 case KVM_GET_DIRTY_LOG
: {
2136 struct kvm_dirty_log log
;
2139 if (copy_from_user(&log
, argp
, sizeof log
))
2141 r
= kvm_vm_ioctl_get_dirty_log(kvm
, &log
);
2146 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2147 case KVM_REGISTER_COALESCED_MMIO
: {
2148 struct kvm_coalesced_mmio_zone zone
;
2150 if (copy_from_user(&zone
, argp
, sizeof zone
))
2153 r
= kvm_vm_ioctl_register_coalesced_mmio(kvm
, &zone
);
2159 case KVM_UNREGISTER_COALESCED_MMIO
: {
2160 struct kvm_coalesced_mmio_zone zone
;
2162 if (copy_from_user(&zone
, argp
, sizeof zone
))
2165 r
= kvm_vm_ioctl_unregister_coalesced_mmio(kvm
, &zone
);
2172 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
2173 case KVM_ASSIGN_PCI_DEVICE
: {
2174 struct kvm_assigned_pci_dev assigned_dev
;
2177 if (copy_from_user(&assigned_dev
, argp
, sizeof assigned_dev
))
2179 r
= kvm_vm_ioctl_assign_device(kvm
, &assigned_dev
);
2184 case KVM_ASSIGN_IRQ
: {
2188 #ifdef KVM_CAP_ASSIGN_DEV_IRQ
2189 case KVM_ASSIGN_DEV_IRQ
: {
2190 struct kvm_assigned_irq assigned_irq
;
2193 if (copy_from_user(&assigned_irq
, argp
, sizeof assigned_irq
))
2195 r
= kvm_vm_ioctl_assign_irq(kvm
, &assigned_irq
);
2200 case KVM_DEASSIGN_DEV_IRQ
: {
2201 struct kvm_assigned_irq assigned_irq
;
2204 if (copy_from_user(&assigned_irq
, argp
, sizeof assigned_irq
))
2206 r
= kvm_vm_ioctl_deassign_dev_irq(kvm
, &assigned_irq
);
2213 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
2214 case KVM_DEASSIGN_PCI_DEVICE
: {
2215 struct kvm_assigned_pci_dev assigned_dev
;
2218 if (copy_from_user(&assigned_dev
, argp
, sizeof assigned_dev
))
2220 r
= kvm_vm_ioctl_deassign_device(kvm
, &assigned_dev
);
2226 #ifdef KVM_CAP_IRQ_ROUTING
2227 case KVM_SET_GSI_ROUTING
: {
2228 struct kvm_irq_routing routing
;
2229 struct kvm_irq_routing __user
*urouting
;
2230 struct kvm_irq_routing_entry
*entries
;
2233 if (copy_from_user(&routing
, argp
, sizeof(routing
)))
2236 if (routing
.nr
>= KVM_MAX_IRQ_ROUTES
)
2241 entries
= vmalloc(routing
.nr
* sizeof(*entries
));
2246 if (copy_from_user(entries
, urouting
->entries
,
2247 routing
.nr
* sizeof(*entries
)))
2248 goto out_free_irq_routing
;
2249 r
= kvm_set_irq_routing(kvm
, entries
, routing
.nr
,
2251 out_free_irq_routing
:
2255 #endif /* KVM_CAP_IRQ_ROUTING */
2256 #ifdef __KVM_HAVE_MSIX
2257 case KVM_ASSIGN_SET_MSIX_NR
: {
2258 struct kvm_assigned_msix_nr entry_nr
;
2260 if (copy_from_user(&entry_nr
, argp
, sizeof entry_nr
))
2262 r
= kvm_vm_ioctl_set_msix_nr(kvm
, &entry_nr
);
2267 case KVM_ASSIGN_SET_MSIX_ENTRY
: {
2268 struct kvm_assigned_msix_entry entry
;
2270 if (copy_from_user(&entry
, argp
, sizeof entry
))
2272 r
= kvm_vm_ioctl_set_msix_entry(kvm
, &entry
);
2279 struct kvm_irqfd data
;
2282 if (copy_from_user(&data
, argp
, sizeof data
))
2284 r
= kvm_irqfd(kvm
, data
.fd
, data
.gsi
, data
.flags
);
2287 case KVM_IOEVENTFD
: {
2288 struct kvm_ioeventfd data
;
2291 if (copy_from_user(&data
, argp
, sizeof data
))
2293 r
= kvm_ioeventfd(kvm
, &data
);
2296 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
2297 case KVM_SET_BOOT_CPU_ID
:
2299 mutex_lock(&kvm
->lock
);
2300 if (atomic_read(&kvm
->online_vcpus
) != 0)
2303 kvm
->bsp_vcpu_id
= arg
;
2304 mutex_unlock(&kvm
->lock
);
2308 r
= kvm_arch_vm_ioctl(filp
, ioctl
, arg
);
2314 static int kvm_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
2316 struct page
*page
[1];
2319 gfn_t gfn
= vmf
->pgoff
;
2320 struct kvm
*kvm
= vma
->vm_file
->private_data
;
2322 addr
= gfn_to_hva(kvm
, gfn
);
2323 if (kvm_is_error_hva(addr
))
2324 return VM_FAULT_SIGBUS
;
2326 npages
= get_user_pages(current
, current
->mm
, addr
, 1, 1, 0, page
,
2328 if (unlikely(npages
!= 1))
2329 return VM_FAULT_SIGBUS
;
2331 vmf
->page
= page
[0];
2335 static const struct vm_operations_struct kvm_vm_vm_ops
= {
2336 .fault
= kvm_vm_fault
,
2339 static int kvm_vm_mmap(struct file
*file
, struct vm_area_struct
*vma
)
2341 vma
->vm_ops
= &kvm_vm_vm_ops
;
2345 static struct file_operations kvm_vm_fops
= {
2346 .release
= kvm_vm_release
,
2347 .unlocked_ioctl
= kvm_vm_ioctl
,
2348 .compat_ioctl
= kvm_vm_ioctl
,
2349 .mmap
= kvm_vm_mmap
,
2352 static int kvm_dev_ioctl_create_vm(void)
2357 kvm
= kvm_create_vm();
2359 return PTR_ERR(kvm
);
2360 fd
= anon_inode_getfd("kvm-vm", &kvm_vm_fops
, kvm
, 0);
2367 static long kvm_dev_ioctl_check_extension_generic(long arg
)
2370 case KVM_CAP_USER_MEMORY
:
2371 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS
:
2372 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS
:
2373 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
2374 case KVM_CAP_SET_BOOT_CPU_ID
:
2377 #ifdef CONFIG_HAVE_KVM_IRQCHIP
2378 case KVM_CAP_IRQ_ROUTING
:
2379 return KVM_MAX_IRQ_ROUTES
;
2384 return kvm_dev_ioctl_check_extension(arg
);
2387 static long kvm_dev_ioctl(struct file
*filp
,
2388 unsigned int ioctl
, unsigned long arg
)
2393 case KVM_GET_API_VERSION
:
2397 r
= KVM_API_VERSION
;
2403 r
= kvm_dev_ioctl_create_vm();
2405 case KVM_CHECK_EXTENSION
:
2406 r
= kvm_dev_ioctl_check_extension_generic(arg
);
2408 case KVM_GET_VCPU_MMAP_SIZE
:
2412 r
= PAGE_SIZE
; /* struct kvm_run */
2414 r
+= PAGE_SIZE
; /* pio data page */
2416 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2417 r
+= PAGE_SIZE
; /* coalesced mmio ring page */
2420 case KVM_TRACE_ENABLE
:
2421 case KVM_TRACE_PAUSE
:
2422 case KVM_TRACE_DISABLE
:
2426 return kvm_arch_dev_ioctl(filp
, ioctl
, arg
);
2432 static struct file_operations kvm_chardev_ops
= {
2433 .unlocked_ioctl
= kvm_dev_ioctl
,
2434 .compat_ioctl
= kvm_dev_ioctl
,
2437 static struct miscdevice kvm_dev
= {
2443 static void hardware_enable(void *junk
)
2445 int cpu
= raw_smp_processor_id();
2447 if (cpumask_test_cpu(cpu
, cpus_hardware_enabled
))
2449 cpumask_set_cpu(cpu
, cpus_hardware_enabled
);
2450 kvm_arch_hardware_enable(NULL
);
2453 static void hardware_disable(void *junk
)
2455 int cpu
= raw_smp_processor_id();
2457 if (!cpumask_test_cpu(cpu
, cpus_hardware_enabled
))
2459 cpumask_clear_cpu(cpu
, cpus_hardware_enabled
);
2460 kvm_arch_hardware_disable(NULL
);
2463 static int kvm_cpu_hotplug(struct notifier_block
*notifier
, unsigned long val
,
2468 val
&= ~CPU_TASKS_FROZEN
;
2471 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
2473 hardware_disable(NULL
);
2475 case CPU_UP_CANCELED
:
2476 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
2478 smp_call_function_single(cpu
, hardware_disable
, NULL
, 1);
2481 printk(KERN_INFO
"kvm: enabling virtualization on CPU%d\n",
2483 smp_call_function_single(cpu
, hardware_enable
, NULL
, 1);
2490 asmlinkage
void kvm_handle_fault_on_reboot(void)
2493 /* spin while reset goes on */
2496 /* Fault while not rebooting. We want the trace. */
2499 EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot
);
2501 static int kvm_reboot(struct notifier_block
*notifier
, unsigned long val
,
2505 * Some (well, at least mine) BIOSes hang on reboot if
2508 * And Intel TXT required VMX off for all cpu when system shutdown.
2510 printk(KERN_INFO
"kvm: exiting hardware virtualization\n");
2511 kvm_rebooting
= true;
2512 on_each_cpu(hardware_disable
, NULL
, 1);
2516 static struct notifier_block kvm_reboot_notifier
= {
2517 .notifier_call
= kvm_reboot
,
2521 void kvm_io_bus_init(struct kvm_io_bus
*bus
)
2523 memset(bus
, 0, sizeof(*bus
));
2526 void kvm_io_bus_destroy(struct kvm_io_bus
*bus
)
2530 for (i
= 0; i
< bus
->dev_count
; i
++) {
2531 struct kvm_io_device
*pos
= bus
->devs
[i
];
2533 kvm_iodevice_destructor(pos
);
2537 /* kvm_io_bus_write - called under kvm->slots_lock */
2538 int kvm_io_bus_write(struct kvm_io_bus
*bus
, gpa_t addr
,
2539 int len
, const void *val
)
2542 for (i
= 0; i
< bus
->dev_count
; i
++)
2543 if (!kvm_iodevice_write(bus
->devs
[i
], addr
, len
, val
))
2548 /* kvm_io_bus_read - called under kvm->slots_lock */
2549 int kvm_io_bus_read(struct kvm_io_bus
*bus
, gpa_t addr
, int len
, void *val
)
2552 for (i
= 0; i
< bus
->dev_count
; i
++)
2553 if (!kvm_iodevice_read(bus
->devs
[i
], addr
, len
, val
))
2558 int kvm_io_bus_register_dev(struct kvm
*kvm
, struct kvm_io_bus
*bus
,
2559 struct kvm_io_device
*dev
)
2563 down_write(&kvm
->slots_lock
);
2564 ret
= __kvm_io_bus_register_dev(bus
, dev
);
2565 up_write(&kvm
->slots_lock
);
2570 /* An unlocked version. Caller must have write lock on slots_lock. */
2571 int __kvm_io_bus_register_dev(struct kvm_io_bus
*bus
,
2572 struct kvm_io_device
*dev
)
2574 if (bus
->dev_count
> NR_IOBUS_DEVS
-1)
2577 bus
->devs
[bus
->dev_count
++] = dev
;
2582 void kvm_io_bus_unregister_dev(struct kvm
*kvm
,
2583 struct kvm_io_bus
*bus
,
2584 struct kvm_io_device
*dev
)
2586 down_write(&kvm
->slots_lock
);
2587 __kvm_io_bus_unregister_dev(bus
, dev
);
2588 up_write(&kvm
->slots_lock
);
2591 /* An unlocked version. Caller must have write lock on slots_lock. */
2592 void __kvm_io_bus_unregister_dev(struct kvm_io_bus
*bus
,
2593 struct kvm_io_device
*dev
)
2597 for (i
= 0; i
< bus
->dev_count
; i
++)
2598 if (bus
->devs
[i
] == dev
) {
2599 bus
->devs
[i
] = bus
->devs
[--bus
->dev_count
];
2604 static struct notifier_block kvm_cpu_notifier
= {
2605 .notifier_call
= kvm_cpu_hotplug
,
2606 .priority
= 20, /* must be > scheduler priority */
2609 static int vm_stat_get(void *_offset
, u64
*val
)
2611 unsigned offset
= (long)_offset
;
2615 spin_lock(&kvm_lock
);
2616 list_for_each_entry(kvm
, &vm_list
, vm_list
)
2617 *val
+= *(u32
*)((void *)kvm
+ offset
);
2618 spin_unlock(&kvm_lock
);
2622 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops
, vm_stat_get
, NULL
, "%llu\n");
2624 static int vcpu_stat_get(void *_offset
, u64
*val
)
2626 unsigned offset
= (long)_offset
;
2628 struct kvm_vcpu
*vcpu
;
2632 spin_lock(&kvm_lock
);
2633 list_for_each_entry(kvm
, &vm_list
, vm_list
)
2634 kvm_for_each_vcpu(i
, vcpu
, kvm
)
2635 *val
+= *(u32
*)((void *)vcpu
+ offset
);
2637 spin_unlock(&kvm_lock
);
2641 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops
, vcpu_stat_get
, NULL
, "%llu\n");
2643 static const struct file_operations
*stat_fops
[] = {
2644 [KVM_STAT_VCPU
] = &vcpu_stat_fops
,
2645 [KVM_STAT_VM
] = &vm_stat_fops
,
2648 static void kvm_init_debug(void)
2650 struct kvm_stats_debugfs_item
*p
;
2652 kvm_debugfs_dir
= debugfs_create_dir("kvm", NULL
);
2653 for (p
= debugfs_entries
; p
->name
; ++p
)
2654 p
->dentry
= debugfs_create_file(p
->name
, 0444, kvm_debugfs_dir
,
2655 (void *)(long)p
->offset
,
2656 stat_fops
[p
->kind
]);
2659 static void kvm_exit_debug(void)
2661 struct kvm_stats_debugfs_item
*p
;
2663 for (p
= debugfs_entries
; p
->name
; ++p
)
2664 debugfs_remove(p
->dentry
);
2665 debugfs_remove(kvm_debugfs_dir
);
2668 static int kvm_suspend(struct sys_device
*dev
, pm_message_t state
)
2670 hardware_disable(NULL
);
2674 static int kvm_resume(struct sys_device
*dev
)
2676 hardware_enable(NULL
);
2680 static struct sysdev_class kvm_sysdev_class
= {
2682 .suspend
= kvm_suspend
,
2683 .resume
= kvm_resume
,
2686 static struct sys_device kvm_sysdev
= {
2688 .cls
= &kvm_sysdev_class
,
2691 struct page
*bad_page
;
2695 struct kvm_vcpu
*preempt_notifier_to_vcpu(struct preempt_notifier
*pn
)
2697 return container_of(pn
, struct kvm_vcpu
, preempt_notifier
);
2700 static void kvm_sched_in(struct preempt_notifier
*pn
, int cpu
)
2702 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
2704 kvm_arch_vcpu_load(vcpu
, cpu
);
2707 static void kvm_sched_out(struct preempt_notifier
*pn
,
2708 struct task_struct
*next
)
2710 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
2712 kvm_arch_vcpu_put(vcpu
);
2715 int kvm_init(void *opaque
, unsigned int vcpu_size
,
2716 struct module
*module
)
2721 r
= kvm_arch_init(opaque
);
2725 bad_page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
2727 if (bad_page
== NULL
) {
2732 bad_pfn
= page_to_pfn(bad_page
);
2734 if (!zalloc_cpumask_var(&cpus_hardware_enabled
, GFP_KERNEL
)) {
2739 r
= kvm_arch_hardware_setup();
2743 for_each_online_cpu(cpu
) {
2744 smp_call_function_single(cpu
,
2745 kvm_arch_check_processor_compat
,
2751 on_each_cpu(hardware_enable
, NULL
, 1);
2752 r
= register_cpu_notifier(&kvm_cpu_notifier
);
2755 register_reboot_notifier(&kvm_reboot_notifier
);
2757 r
= sysdev_class_register(&kvm_sysdev_class
);
2761 r
= sysdev_register(&kvm_sysdev
);
2765 /* A kmem cache lets us meet the alignment requirements of fx_save. */
2766 kvm_vcpu_cache
= kmem_cache_create("kvm_vcpu", vcpu_size
,
2767 __alignof__(struct kvm_vcpu
),
2769 if (!kvm_vcpu_cache
) {
2774 kvm_chardev_ops
.owner
= module
;
2775 kvm_vm_fops
.owner
= module
;
2776 kvm_vcpu_fops
.owner
= module
;
2778 r
= misc_register(&kvm_dev
);
2780 printk(KERN_ERR
"kvm: misc device register failed\n");
2784 kvm_preempt_ops
.sched_in
= kvm_sched_in
;
2785 kvm_preempt_ops
.sched_out
= kvm_sched_out
;
2792 kmem_cache_destroy(kvm_vcpu_cache
);
2794 sysdev_unregister(&kvm_sysdev
);
2796 sysdev_class_unregister(&kvm_sysdev_class
);
2798 unregister_reboot_notifier(&kvm_reboot_notifier
);
2799 unregister_cpu_notifier(&kvm_cpu_notifier
);
2801 on_each_cpu(hardware_disable
, NULL
, 1);
2803 kvm_arch_hardware_unsetup();
2805 free_cpumask_var(cpus_hardware_enabled
);
2807 __free_page(bad_page
);
2813 EXPORT_SYMBOL_GPL(kvm_init
);
2817 tracepoint_synchronize_unregister();
2819 misc_deregister(&kvm_dev
);
2820 kmem_cache_destroy(kvm_vcpu_cache
);
2821 sysdev_unregister(&kvm_sysdev
);
2822 sysdev_class_unregister(&kvm_sysdev_class
);
2823 unregister_reboot_notifier(&kvm_reboot_notifier
);
2824 unregister_cpu_notifier(&kvm_cpu_notifier
);
2825 on_each_cpu(hardware_disable
, NULL
, 1);
2826 kvm_arch_hardware_unsetup();
2828 free_cpumask_var(cpus_hardware_enabled
);
2829 __free_page(bad_page
);
2831 EXPORT_SYMBOL_GPL(kvm_exit
);