2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * Copyright (C) 2006 Qumranet, Inc.
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
20 #include <linux/kvm_host.h>
21 #include <linux/kvm.h>
22 #include <linux/module.h>
23 #include <linux/errno.h>
24 #include <linux/percpu.h>
25 #include <linux/gfp.h>
27 #include <linux/miscdevice.h>
28 #include <linux/vmalloc.h>
29 #include <linux/reboot.h>
30 #include <linux/debugfs.h>
31 #include <linux/highmem.h>
32 #include <linux/file.h>
33 #include <linux/sysdev.h>
34 #include <linux/cpu.h>
35 #include <linux/sched.h>
36 #include <linux/cpumask.h>
37 #include <linux/smp.h>
38 #include <linux/anon_inodes.h>
39 #include <linux/profile.h>
40 #include <linux/kvm_para.h>
41 #include <linux/pagemap.h>
42 #include <linux/mman.h>
43 #include <linux/swap.h>
44 #include <linux/bitops.h>
45 #include <linux/spinlock.h>
47 #include <asm/processor.h>
49 #include <asm/uaccess.h>
50 #include <asm/pgtable.h>
52 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
53 #include "coalesced_mmio.h"
56 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
57 #include <linux/pci.h>
58 #include <linux/interrupt.h>
62 #define CREATE_TRACE_POINTS
63 #include <trace/events/kvm.h>
65 MODULE_AUTHOR("Qumranet");
66 MODULE_LICENSE("GPL");
71 * kvm->slots_lock --> kvm->lock --> kvm->irq_lock
74 DEFINE_SPINLOCK(kvm_lock
);
77 static cpumask_var_t cpus_hardware_enabled
;
79 struct kmem_cache
*kvm_vcpu_cache
;
80 EXPORT_SYMBOL_GPL(kvm_vcpu_cache
);
82 static __read_mostly
struct preempt_ops kvm_preempt_ops
;
84 struct dentry
*kvm_debugfs_dir
;
86 static long kvm_vcpu_ioctl(struct file
*file
, unsigned int ioctl
,
89 static bool kvm_rebooting
;
91 static bool largepages_enabled
= true;
93 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
94 static struct kvm_assigned_dev_kernel
*kvm_find_assigned_dev(struct list_head
*head
,
97 struct list_head
*ptr
;
98 struct kvm_assigned_dev_kernel
*match
;
100 list_for_each(ptr
, head
) {
101 match
= list_entry(ptr
, struct kvm_assigned_dev_kernel
, list
);
102 if (match
->assigned_dev_id
== assigned_dev_id
)
108 static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
109 *assigned_dev
, int irq
)
112 struct msix_entry
*host_msix_entries
;
114 host_msix_entries
= assigned_dev
->host_msix_entries
;
117 for (i
= 0; i
< assigned_dev
->entries_nr
; i
++)
118 if (irq
== host_msix_entries
[i
].vector
) {
123 printk(KERN_WARNING
"Fail to find correlated MSI-X entry!\n");
130 static void kvm_assigned_dev_interrupt_work_handler(struct work_struct
*work
)
132 struct kvm_assigned_dev_kernel
*assigned_dev
;
136 assigned_dev
= container_of(work
, struct kvm_assigned_dev_kernel
,
138 kvm
= assigned_dev
->kvm
;
140 mutex_lock(&kvm
->irq_lock
);
141 spin_lock_irq(&assigned_dev
->assigned_dev_lock
);
142 if (assigned_dev
->irq_requested_type
& KVM_DEV_IRQ_HOST_MSIX
) {
143 struct kvm_guest_msix_entry
*guest_entries
=
144 assigned_dev
->guest_msix_entries
;
145 for (i
= 0; i
< assigned_dev
->entries_nr
; i
++) {
146 if (!(guest_entries
[i
].flags
&
147 KVM_ASSIGNED_MSIX_PENDING
))
149 guest_entries
[i
].flags
&= ~KVM_ASSIGNED_MSIX_PENDING
;
150 kvm_set_irq(assigned_dev
->kvm
,
151 assigned_dev
->irq_source_id
,
152 guest_entries
[i
].vector
, 1);
155 kvm_set_irq(assigned_dev
->kvm
, assigned_dev
->irq_source_id
,
156 assigned_dev
->guest_irq
, 1);
158 spin_unlock_irq(&assigned_dev
->assigned_dev_lock
);
159 mutex_unlock(&assigned_dev
->kvm
->irq_lock
);
162 static irqreturn_t
kvm_assigned_dev_intr(int irq
, void *dev_id
)
165 struct kvm_assigned_dev_kernel
*assigned_dev
=
166 (struct kvm_assigned_dev_kernel
*) dev_id
;
168 spin_lock_irqsave(&assigned_dev
->assigned_dev_lock
, flags
);
169 if (assigned_dev
->irq_requested_type
& KVM_DEV_IRQ_HOST_MSIX
) {
170 int index
= find_index_from_host_irq(assigned_dev
, irq
);
173 assigned_dev
->guest_msix_entries
[index
].flags
|=
174 KVM_ASSIGNED_MSIX_PENDING
;
177 schedule_work(&assigned_dev
->interrupt_work
);
179 if (assigned_dev
->irq_requested_type
& KVM_DEV_IRQ_GUEST_INTX
) {
180 disable_irq_nosync(irq
);
181 assigned_dev
->host_irq_disabled
= true;
185 spin_unlock_irqrestore(&assigned_dev
->assigned_dev_lock
, flags
);
189 /* Ack the irq line for an assigned device */
190 static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier
*kian
)
192 struct kvm_assigned_dev_kernel
*dev
;
198 dev
= container_of(kian
, struct kvm_assigned_dev_kernel
,
201 kvm_set_irq(dev
->kvm
, dev
->irq_source_id
, dev
->guest_irq
, 0);
203 /* The guest irq may be shared so this ack may be
204 * from another device.
206 spin_lock_irqsave(&dev
->assigned_dev_lock
, flags
);
207 if (dev
->host_irq_disabled
) {
208 enable_irq(dev
->host_irq
);
209 dev
->host_irq_disabled
= false;
211 spin_unlock_irqrestore(&dev
->assigned_dev_lock
, flags
);
214 static void deassign_guest_irq(struct kvm
*kvm
,
215 struct kvm_assigned_dev_kernel
*assigned_dev
)
217 kvm_unregister_irq_ack_notifier(kvm
, &assigned_dev
->ack_notifier
);
218 assigned_dev
->ack_notifier
.gsi
= -1;
220 if (assigned_dev
->irq_source_id
!= -1)
221 kvm_free_irq_source_id(kvm
, assigned_dev
->irq_source_id
);
222 assigned_dev
->irq_source_id
= -1;
223 assigned_dev
->irq_requested_type
&= ~(KVM_DEV_IRQ_GUEST_MASK
);
226 /* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
227 static void deassign_host_irq(struct kvm
*kvm
,
228 struct kvm_assigned_dev_kernel
*assigned_dev
)
231 * In kvm_free_device_irq, cancel_work_sync return true if:
232 * 1. work is scheduled, and then cancelled.
233 * 2. work callback is executed.
235 * The first one ensured that the irq is disabled and no more events
236 * would happen. But for the second one, the irq may be enabled (e.g.
237 * for MSI). So we disable irq here to prevent further events.
239 * Notice this maybe result in nested disable if the interrupt type is
240 * INTx, but it's OK for we are going to free it.
242 * If this function is a part of VM destroy, please ensure that till
243 * now, the kvm state is still legal for probably we also have to wait
244 * interrupt_work done.
246 if (assigned_dev
->irq_requested_type
& KVM_DEV_IRQ_HOST_MSIX
) {
248 for (i
= 0; i
< assigned_dev
->entries_nr
; i
++)
249 disable_irq_nosync(assigned_dev
->
250 host_msix_entries
[i
].vector
);
252 cancel_work_sync(&assigned_dev
->interrupt_work
);
254 for (i
= 0; i
< assigned_dev
->entries_nr
; i
++)
255 free_irq(assigned_dev
->host_msix_entries
[i
].vector
,
256 (void *)assigned_dev
);
258 assigned_dev
->entries_nr
= 0;
259 kfree(assigned_dev
->host_msix_entries
);
260 kfree(assigned_dev
->guest_msix_entries
);
261 pci_disable_msix(assigned_dev
->dev
);
263 /* Deal with MSI and INTx */
264 disable_irq_nosync(assigned_dev
->host_irq
);
265 cancel_work_sync(&assigned_dev
->interrupt_work
);
267 free_irq(assigned_dev
->host_irq
, (void *)assigned_dev
);
269 if (assigned_dev
->irq_requested_type
& KVM_DEV_IRQ_HOST_MSI
)
270 pci_disable_msi(assigned_dev
->dev
);
273 assigned_dev
->irq_requested_type
&= ~(KVM_DEV_IRQ_HOST_MASK
);
276 static int kvm_deassign_irq(struct kvm
*kvm
,
277 struct kvm_assigned_dev_kernel
*assigned_dev
,
278 unsigned long irq_requested_type
)
280 unsigned long guest_irq_type
, host_irq_type
;
282 if (!irqchip_in_kernel(kvm
))
284 /* no irq assignment to deassign */
285 if (!assigned_dev
->irq_requested_type
)
288 host_irq_type
= irq_requested_type
& KVM_DEV_IRQ_HOST_MASK
;
289 guest_irq_type
= irq_requested_type
& KVM_DEV_IRQ_GUEST_MASK
;
292 deassign_host_irq(kvm
, assigned_dev
);
294 deassign_guest_irq(kvm
, assigned_dev
);
299 static void kvm_free_assigned_irq(struct kvm
*kvm
,
300 struct kvm_assigned_dev_kernel
*assigned_dev
)
302 kvm_deassign_irq(kvm
, assigned_dev
, assigned_dev
->irq_requested_type
);
305 static void kvm_free_assigned_device(struct kvm
*kvm
,
306 struct kvm_assigned_dev_kernel
309 kvm_free_assigned_irq(kvm
, assigned_dev
);
311 pci_reset_function(assigned_dev
->dev
);
313 pci_release_regions(assigned_dev
->dev
);
314 pci_disable_device(assigned_dev
->dev
);
315 pci_dev_put(assigned_dev
->dev
);
317 list_del(&assigned_dev
->list
);
321 void kvm_free_all_assigned_devices(struct kvm
*kvm
)
323 struct list_head
*ptr
, *ptr2
;
324 struct kvm_assigned_dev_kernel
*assigned_dev
;
326 list_for_each_safe(ptr
, ptr2
, &kvm
->arch
.assigned_dev_head
) {
327 assigned_dev
= list_entry(ptr
,
328 struct kvm_assigned_dev_kernel
,
331 kvm_free_assigned_device(kvm
, assigned_dev
);
335 static int assigned_device_enable_host_intx(struct kvm
*kvm
,
336 struct kvm_assigned_dev_kernel
*dev
)
338 dev
->host_irq
= dev
->dev
->irq
;
339 /* Even though this is PCI, we don't want to use shared
340 * interrupts. Sharing host devices with guest-assigned devices
341 * on the same interrupt line is not a happy situation: there
342 * are going to be long delays in accepting, acking, etc.
344 if (request_irq(dev
->host_irq
, kvm_assigned_dev_intr
,
345 0, "kvm_assigned_intx_device", (void *)dev
))
350 #ifdef __KVM_HAVE_MSI
351 static int assigned_device_enable_host_msi(struct kvm
*kvm
,
352 struct kvm_assigned_dev_kernel
*dev
)
356 if (!dev
->dev
->msi_enabled
) {
357 r
= pci_enable_msi(dev
->dev
);
362 dev
->host_irq
= dev
->dev
->irq
;
363 if (request_irq(dev
->host_irq
, kvm_assigned_dev_intr
, 0,
364 "kvm_assigned_msi_device", (void *)dev
)) {
365 pci_disable_msi(dev
->dev
);
373 #ifdef __KVM_HAVE_MSIX
374 static int assigned_device_enable_host_msix(struct kvm
*kvm
,
375 struct kvm_assigned_dev_kernel
*dev
)
379 /* host_msix_entries and guest_msix_entries should have been
381 if (dev
->entries_nr
== 0)
384 r
= pci_enable_msix(dev
->dev
, dev
->host_msix_entries
, dev
->entries_nr
);
388 for (i
= 0; i
< dev
->entries_nr
; i
++) {
389 r
= request_irq(dev
->host_msix_entries
[i
].vector
,
390 kvm_assigned_dev_intr
, 0,
391 "kvm_assigned_msix_device",
393 /* FIXME: free requested_irq's on failure */
403 static int assigned_device_enable_guest_intx(struct kvm
*kvm
,
404 struct kvm_assigned_dev_kernel
*dev
,
405 struct kvm_assigned_irq
*irq
)
407 dev
->guest_irq
= irq
->guest_irq
;
408 dev
->ack_notifier
.gsi
= irq
->guest_irq
;
412 #ifdef __KVM_HAVE_MSI
413 static int assigned_device_enable_guest_msi(struct kvm
*kvm
,
414 struct kvm_assigned_dev_kernel
*dev
,
415 struct kvm_assigned_irq
*irq
)
417 dev
->guest_irq
= irq
->guest_irq
;
418 dev
->ack_notifier
.gsi
= -1;
419 dev
->host_irq_disabled
= false;
423 #ifdef __KVM_HAVE_MSIX
424 static int assigned_device_enable_guest_msix(struct kvm
*kvm
,
425 struct kvm_assigned_dev_kernel
*dev
,
426 struct kvm_assigned_irq
*irq
)
428 dev
->guest_irq
= irq
->guest_irq
;
429 dev
->ack_notifier
.gsi
= -1;
430 dev
->host_irq_disabled
= false;
435 static int assign_host_irq(struct kvm
*kvm
,
436 struct kvm_assigned_dev_kernel
*dev
,
441 if (dev
->irq_requested_type
& KVM_DEV_IRQ_HOST_MASK
)
444 switch (host_irq_type
) {
445 case KVM_DEV_IRQ_HOST_INTX
:
446 r
= assigned_device_enable_host_intx(kvm
, dev
);
448 #ifdef __KVM_HAVE_MSI
449 case KVM_DEV_IRQ_HOST_MSI
:
450 r
= assigned_device_enable_host_msi(kvm
, dev
);
453 #ifdef __KVM_HAVE_MSIX
454 case KVM_DEV_IRQ_HOST_MSIX
:
455 r
= assigned_device_enable_host_msix(kvm
, dev
);
463 dev
->irq_requested_type
|= host_irq_type
;
468 static int assign_guest_irq(struct kvm
*kvm
,
469 struct kvm_assigned_dev_kernel
*dev
,
470 struct kvm_assigned_irq
*irq
,
471 unsigned long guest_irq_type
)
476 if (dev
->irq_requested_type
& KVM_DEV_IRQ_GUEST_MASK
)
479 id
= kvm_request_irq_source_id(kvm
);
483 dev
->irq_source_id
= id
;
485 switch (guest_irq_type
) {
486 case KVM_DEV_IRQ_GUEST_INTX
:
487 r
= assigned_device_enable_guest_intx(kvm
, dev
, irq
);
489 #ifdef __KVM_HAVE_MSI
490 case KVM_DEV_IRQ_GUEST_MSI
:
491 r
= assigned_device_enable_guest_msi(kvm
, dev
, irq
);
494 #ifdef __KVM_HAVE_MSIX
495 case KVM_DEV_IRQ_GUEST_MSIX
:
496 r
= assigned_device_enable_guest_msix(kvm
, dev
, irq
);
504 dev
->irq_requested_type
|= guest_irq_type
;
505 kvm_register_irq_ack_notifier(kvm
, &dev
->ack_notifier
);
507 kvm_free_irq_source_id(kvm
, dev
->irq_source_id
);
512 /* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */
513 static int kvm_vm_ioctl_assign_irq(struct kvm
*kvm
,
514 struct kvm_assigned_irq
*assigned_irq
)
517 struct kvm_assigned_dev_kernel
*match
;
518 unsigned long host_irq_type
, guest_irq_type
;
520 if (!capable(CAP_SYS_RAWIO
))
523 if (!irqchip_in_kernel(kvm
))
526 mutex_lock(&kvm
->lock
);
528 match
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
529 assigned_irq
->assigned_dev_id
);
533 host_irq_type
= (assigned_irq
->flags
& KVM_DEV_IRQ_HOST_MASK
);
534 guest_irq_type
= (assigned_irq
->flags
& KVM_DEV_IRQ_GUEST_MASK
);
537 /* can only assign one type at a time */
538 if (hweight_long(host_irq_type
) > 1)
540 if (hweight_long(guest_irq_type
) > 1)
542 if (host_irq_type
== 0 && guest_irq_type
== 0)
547 r
= assign_host_irq(kvm
, match
, host_irq_type
);
552 r
= assign_guest_irq(kvm
, match
, assigned_irq
, guest_irq_type
);
554 mutex_unlock(&kvm
->lock
);
558 static int kvm_vm_ioctl_deassign_dev_irq(struct kvm
*kvm
,
559 struct kvm_assigned_irq
563 struct kvm_assigned_dev_kernel
*match
;
565 mutex_lock(&kvm
->lock
);
567 match
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
568 assigned_irq
->assigned_dev_id
);
572 r
= kvm_deassign_irq(kvm
, match
, assigned_irq
->flags
);
574 mutex_unlock(&kvm
->lock
);
578 static int kvm_vm_ioctl_assign_device(struct kvm
*kvm
,
579 struct kvm_assigned_pci_dev
*assigned_dev
)
582 struct kvm_assigned_dev_kernel
*match
;
585 down_read(&kvm
->slots_lock
);
586 mutex_lock(&kvm
->lock
);
588 match
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
589 assigned_dev
->assigned_dev_id
);
591 /* device already assigned */
596 match
= kzalloc(sizeof(struct kvm_assigned_dev_kernel
), GFP_KERNEL
);
598 printk(KERN_INFO
"%s: Couldn't allocate memory\n",
603 dev
= pci_get_bus_and_slot(assigned_dev
->busnr
,
604 assigned_dev
->devfn
);
606 printk(KERN_INFO
"%s: host device not found\n", __func__
);
610 if (pci_enable_device(dev
)) {
611 printk(KERN_INFO
"%s: Could not enable PCI device\n", __func__
);
615 r
= pci_request_regions(dev
, "kvm_assigned_device");
617 printk(KERN_INFO
"%s: Could not get access to device regions\n",
622 pci_reset_function(dev
);
624 match
->assigned_dev_id
= assigned_dev
->assigned_dev_id
;
625 match
->host_busnr
= assigned_dev
->busnr
;
626 match
->host_devfn
= assigned_dev
->devfn
;
627 match
->flags
= assigned_dev
->flags
;
629 spin_lock_init(&match
->assigned_dev_lock
);
630 match
->irq_source_id
= -1;
632 match
->ack_notifier
.irq_acked
= kvm_assigned_dev_ack_irq
;
633 INIT_WORK(&match
->interrupt_work
,
634 kvm_assigned_dev_interrupt_work_handler
);
636 list_add(&match
->list
, &kvm
->arch
.assigned_dev_head
);
638 if (assigned_dev
->flags
& KVM_DEV_ASSIGN_ENABLE_IOMMU
) {
639 if (!kvm
->arch
.iommu_domain
) {
640 r
= kvm_iommu_map_guest(kvm
);
644 r
= kvm_assign_device(kvm
, match
);
650 mutex_unlock(&kvm
->lock
);
651 up_read(&kvm
->slots_lock
);
654 list_del(&match
->list
);
655 pci_release_regions(dev
);
657 pci_disable_device(dev
);
662 mutex_unlock(&kvm
->lock
);
663 up_read(&kvm
->slots_lock
);
668 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
669 static int kvm_vm_ioctl_deassign_device(struct kvm
*kvm
,
670 struct kvm_assigned_pci_dev
*assigned_dev
)
673 struct kvm_assigned_dev_kernel
*match
;
675 mutex_lock(&kvm
->lock
);
677 match
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
678 assigned_dev
->assigned_dev_id
);
680 printk(KERN_INFO
"%s: device hasn't been assigned before, "
681 "so cannot be deassigned\n", __func__
);
686 if (match
->flags
& KVM_DEV_ASSIGN_ENABLE_IOMMU
)
687 kvm_deassign_device(kvm
, match
);
689 kvm_free_assigned_device(kvm
, match
);
692 mutex_unlock(&kvm
->lock
);
697 inline int kvm_is_mmio_pfn(pfn_t pfn
)
699 if (pfn_valid(pfn
)) {
700 struct page
*page
= compound_head(pfn_to_page(pfn
));
701 return PageReserved(page
);
708 * Switches to specified vcpu, until a matching vcpu_put()
710 void vcpu_load(struct kvm_vcpu
*vcpu
)
714 mutex_lock(&vcpu
->mutex
);
716 preempt_notifier_register(&vcpu
->preempt_notifier
);
717 kvm_arch_vcpu_load(vcpu
, cpu
);
721 void vcpu_put(struct kvm_vcpu
*vcpu
)
724 kvm_arch_vcpu_put(vcpu
);
725 preempt_notifier_unregister(&vcpu
->preempt_notifier
);
727 mutex_unlock(&vcpu
->mutex
);
730 static void ack_flush(void *_completed
)
734 static bool make_all_cpus_request(struct kvm
*kvm
, unsigned int req
)
739 struct kvm_vcpu
*vcpu
;
741 zalloc_cpumask_var(&cpus
, GFP_ATOMIC
);
743 spin_lock(&kvm
->requests_lock
);
744 me
= smp_processor_id();
745 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
746 if (test_and_set_bit(req
, &vcpu
->requests
))
749 if (cpus
!= NULL
&& cpu
!= -1 && cpu
!= me
)
750 cpumask_set_cpu(cpu
, cpus
);
752 if (unlikely(cpus
== NULL
))
753 smp_call_function_many(cpu_online_mask
, ack_flush
, NULL
, 1);
754 else if (!cpumask_empty(cpus
))
755 smp_call_function_many(cpus
, ack_flush
, NULL
, 1);
758 spin_unlock(&kvm
->requests_lock
);
759 free_cpumask_var(cpus
);
763 void kvm_flush_remote_tlbs(struct kvm
*kvm
)
765 if (make_all_cpus_request(kvm
, KVM_REQ_TLB_FLUSH
))
766 ++kvm
->stat
.remote_tlb_flush
;
769 void kvm_reload_remote_mmus(struct kvm
*kvm
)
771 make_all_cpus_request(kvm
, KVM_REQ_MMU_RELOAD
);
774 int kvm_vcpu_init(struct kvm_vcpu
*vcpu
, struct kvm
*kvm
, unsigned id
)
779 mutex_init(&vcpu
->mutex
);
783 init_waitqueue_head(&vcpu
->wq
);
785 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
790 vcpu
->run
= page_address(page
);
792 r
= kvm_arch_vcpu_init(vcpu
);
798 free_page((unsigned long)vcpu
->run
);
802 EXPORT_SYMBOL_GPL(kvm_vcpu_init
);
804 void kvm_vcpu_uninit(struct kvm_vcpu
*vcpu
)
806 kvm_arch_vcpu_uninit(vcpu
);
807 free_page((unsigned long)vcpu
->run
);
809 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit
);
811 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
812 static inline struct kvm
*mmu_notifier_to_kvm(struct mmu_notifier
*mn
)
814 return container_of(mn
, struct kvm
, mmu_notifier
);
817 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier
*mn
,
818 struct mm_struct
*mm
,
819 unsigned long address
)
821 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
825 * When ->invalidate_page runs, the linux pte has been zapped
826 * already but the page is still allocated until
827 * ->invalidate_page returns. So if we increase the sequence
828 * here the kvm page fault will notice if the spte can't be
829 * established because the page is going to be freed. If
830 * instead the kvm page fault establishes the spte before
831 * ->invalidate_page runs, kvm_unmap_hva will release it
834 * The sequence increase only need to be seen at spin_unlock
835 * time, and not at spin_lock time.
837 * Increasing the sequence after the spin_unlock would be
838 * unsafe because the kvm page fault could then establish the
839 * pte after kvm_unmap_hva returned, without noticing the page
840 * is going to be freed.
842 spin_lock(&kvm
->mmu_lock
);
843 kvm
->mmu_notifier_seq
++;
844 need_tlb_flush
= kvm_unmap_hva(kvm
, address
);
845 spin_unlock(&kvm
->mmu_lock
);
847 /* we've to flush the tlb before the pages can be freed */
849 kvm_flush_remote_tlbs(kvm
);
853 static void kvm_mmu_notifier_change_pte(struct mmu_notifier
*mn
,
854 struct mm_struct
*mm
,
855 unsigned long address
,
858 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
860 spin_lock(&kvm
->mmu_lock
);
861 kvm
->mmu_notifier_seq
++;
862 kvm_set_spte_hva(kvm
, address
, pte
);
863 spin_unlock(&kvm
->mmu_lock
);
866 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier
*mn
,
867 struct mm_struct
*mm
,
871 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
872 int need_tlb_flush
= 0;
874 spin_lock(&kvm
->mmu_lock
);
876 * The count increase must become visible at unlock time as no
877 * spte can be established without taking the mmu_lock and
878 * count is also read inside the mmu_lock critical section.
880 kvm
->mmu_notifier_count
++;
881 for (; start
< end
; start
+= PAGE_SIZE
)
882 need_tlb_flush
|= kvm_unmap_hva(kvm
, start
);
883 spin_unlock(&kvm
->mmu_lock
);
885 /* we've to flush the tlb before the pages can be freed */
887 kvm_flush_remote_tlbs(kvm
);
890 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier
*mn
,
891 struct mm_struct
*mm
,
895 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
897 spin_lock(&kvm
->mmu_lock
);
899 * This sequence increase will notify the kvm page fault that
900 * the page that is going to be mapped in the spte could have
903 kvm
->mmu_notifier_seq
++;
905 * The above sequence increase must be visible before the
906 * below count decrease but both values are read by the kvm
907 * page fault under mmu_lock spinlock so we don't need to add
908 * a smb_wmb() here in between the two.
910 kvm
->mmu_notifier_count
--;
911 spin_unlock(&kvm
->mmu_lock
);
913 BUG_ON(kvm
->mmu_notifier_count
< 0);
916 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier
*mn
,
917 struct mm_struct
*mm
,
918 unsigned long address
)
920 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
923 spin_lock(&kvm
->mmu_lock
);
924 young
= kvm_age_hva(kvm
, address
);
925 spin_unlock(&kvm
->mmu_lock
);
928 kvm_flush_remote_tlbs(kvm
);
933 static void kvm_mmu_notifier_release(struct mmu_notifier
*mn
,
934 struct mm_struct
*mm
)
936 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
937 kvm_arch_flush_shadow(kvm
);
940 static const struct mmu_notifier_ops kvm_mmu_notifier_ops
= {
941 .invalidate_page
= kvm_mmu_notifier_invalidate_page
,
942 .invalidate_range_start
= kvm_mmu_notifier_invalidate_range_start
,
943 .invalidate_range_end
= kvm_mmu_notifier_invalidate_range_end
,
944 .clear_flush_young
= kvm_mmu_notifier_clear_flush_young
,
945 .change_pte
= kvm_mmu_notifier_change_pte
,
946 .release
= kvm_mmu_notifier_release
,
948 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
950 static struct kvm
*kvm_create_vm(void)
952 struct kvm
*kvm
= kvm_arch_create_vm();
953 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
959 #ifdef CONFIG_HAVE_KVM_IRQCHIP
960 INIT_LIST_HEAD(&kvm
->irq_routing
);
961 INIT_HLIST_HEAD(&kvm
->mask_notifier_list
);
964 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
965 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
968 return ERR_PTR(-ENOMEM
);
970 kvm
->coalesced_mmio_ring
=
971 (struct kvm_coalesced_mmio_ring
*)page_address(page
);
974 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
977 kvm
->mmu_notifier
.ops
= &kvm_mmu_notifier_ops
;
978 err
= mmu_notifier_register(&kvm
->mmu_notifier
, current
->mm
);
980 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
989 kvm
->mm
= current
->mm
;
990 atomic_inc(&kvm
->mm
->mm_count
);
991 spin_lock_init(&kvm
->mmu_lock
);
992 spin_lock_init(&kvm
->requests_lock
);
993 kvm_io_bus_init(&kvm
->pio_bus
);
994 kvm_eventfd_init(kvm
);
995 mutex_init(&kvm
->lock
);
996 mutex_init(&kvm
->irq_lock
);
997 kvm_io_bus_init(&kvm
->mmio_bus
);
998 init_rwsem(&kvm
->slots_lock
);
999 atomic_set(&kvm
->users_count
, 1);
1000 spin_lock(&kvm_lock
);
1001 list_add(&kvm
->vm_list
, &vm_list
);
1002 spin_unlock(&kvm_lock
);
1003 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1004 kvm_coalesced_mmio_init(kvm
);
1011 * Free any memory in @free but not in @dont.
1013 static void kvm_free_physmem_slot(struct kvm_memory_slot
*free
,
1014 struct kvm_memory_slot
*dont
)
1018 if (!dont
|| free
->rmap
!= dont
->rmap
)
1021 if (!dont
|| free
->dirty_bitmap
!= dont
->dirty_bitmap
)
1022 vfree(free
->dirty_bitmap
);
1025 for (i
= 0; i
< KVM_NR_PAGE_SIZES
- 1; ++i
) {
1026 if (!dont
|| free
->lpage_info
[i
] != dont
->lpage_info
[i
]) {
1027 vfree(free
->lpage_info
[i
]);
1028 free
->lpage_info
[i
] = NULL
;
1033 free
->dirty_bitmap
= NULL
;
1037 void kvm_free_physmem(struct kvm
*kvm
)
1041 for (i
= 0; i
< kvm
->nmemslots
; ++i
)
1042 kvm_free_physmem_slot(&kvm
->memslots
[i
], NULL
);
1045 static void kvm_destroy_vm(struct kvm
*kvm
)
1047 struct mm_struct
*mm
= kvm
->mm
;
1049 kvm_arch_sync_events(kvm
);
1050 spin_lock(&kvm_lock
);
1051 list_del(&kvm
->vm_list
);
1052 spin_unlock(&kvm_lock
);
1053 kvm_free_irq_routing(kvm
);
1054 kvm_io_bus_destroy(&kvm
->pio_bus
);
1055 kvm_io_bus_destroy(&kvm
->mmio_bus
);
1056 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1057 if (kvm
->coalesced_mmio_ring
!= NULL
)
1058 free_page((unsigned long)kvm
->coalesced_mmio_ring
);
1060 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1061 mmu_notifier_unregister(&kvm
->mmu_notifier
, kvm
->mm
);
1063 kvm_arch_flush_shadow(kvm
);
1065 kvm_arch_destroy_vm(kvm
);
1069 void kvm_get_kvm(struct kvm
*kvm
)
1071 atomic_inc(&kvm
->users_count
);
1073 EXPORT_SYMBOL_GPL(kvm_get_kvm
);
1075 void kvm_put_kvm(struct kvm
*kvm
)
1077 if (atomic_dec_and_test(&kvm
->users_count
))
1078 kvm_destroy_vm(kvm
);
1080 EXPORT_SYMBOL_GPL(kvm_put_kvm
);
1083 static int kvm_vm_release(struct inode
*inode
, struct file
*filp
)
1085 struct kvm
*kvm
= filp
->private_data
;
1087 kvm_irqfd_release(kvm
);
1094 * Allocate some memory and give it an address in the guest physical address
1097 * Discontiguous memory is allowed, mostly for framebuffers.
1099 * Must be called holding mmap_sem for write.
1101 int __kvm_set_memory_region(struct kvm
*kvm
,
1102 struct kvm_userspace_memory_region
*mem
,
1107 unsigned long npages
;
1109 struct kvm_memory_slot
*memslot
;
1110 struct kvm_memory_slot old
, new;
1113 /* General sanity checks */
1114 if (mem
->memory_size
& (PAGE_SIZE
- 1))
1116 if (mem
->guest_phys_addr
& (PAGE_SIZE
- 1))
1118 if (user_alloc
&& (mem
->userspace_addr
& (PAGE_SIZE
- 1)))
1120 if (mem
->slot
>= KVM_MEMORY_SLOTS
+ KVM_PRIVATE_MEM_SLOTS
)
1122 if (mem
->guest_phys_addr
+ mem
->memory_size
< mem
->guest_phys_addr
)
1125 memslot
= &kvm
->memslots
[mem
->slot
];
1126 base_gfn
= mem
->guest_phys_addr
>> PAGE_SHIFT
;
1127 npages
= mem
->memory_size
>> PAGE_SHIFT
;
1130 mem
->flags
&= ~KVM_MEM_LOG_DIRTY_PAGES
;
1132 new = old
= *memslot
;
1134 new.base_gfn
= base_gfn
;
1135 new.npages
= npages
;
1136 new.flags
= mem
->flags
;
1138 /* Disallow changing a memory slot's size. */
1140 if (npages
&& old
.npages
&& npages
!= old
.npages
)
1143 /* Check for overlaps */
1145 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
1146 struct kvm_memory_slot
*s
= &kvm
->memslots
[i
];
1148 if (s
== memslot
|| !s
->npages
)
1150 if (!((base_gfn
+ npages
<= s
->base_gfn
) ||
1151 (base_gfn
>= s
->base_gfn
+ s
->npages
)))
1155 /* Free page dirty bitmap if unneeded */
1156 if (!(new.flags
& KVM_MEM_LOG_DIRTY_PAGES
))
1157 new.dirty_bitmap
= NULL
;
1161 /* Allocate if a slot is being created */
1163 if (npages
&& !new.rmap
) {
1164 new.rmap
= vmalloc(npages
* sizeof(struct page
*));
1169 memset(new.rmap
, 0, npages
* sizeof(*new.rmap
));
1171 new.user_alloc
= user_alloc
;
1173 * hva_to_rmmap() serialzies with the mmu_lock and to be
1174 * safe it has to ignore memslots with !user_alloc &&
1178 new.userspace_addr
= mem
->userspace_addr
;
1180 new.userspace_addr
= 0;
1185 for (i
= 0; i
< KVM_NR_PAGE_SIZES
- 1; ++i
) {
1191 /* Avoid unused variable warning if no large pages */
1194 if (new.lpage_info
[i
])
1197 lpages
= 1 + (base_gfn
+ npages
- 1) /
1198 KVM_PAGES_PER_HPAGE(level
);
1199 lpages
-= base_gfn
/ KVM_PAGES_PER_HPAGE(level
);
1201 new.lpage_info
[i
] = vmalloc(lpages
* sizeof(*new.lpage_info
[i
]));
1203 if (!new.lpage_info
[i
])
1206 memset(new.lpage_info
[i
], 0,
1207 lpages
* sizeof(*new.lpage_info
[i
]));
1209 if (base_gfn
% KVM_PAGES_PER_HPAGE(level
))
1210 new.lpage_info
[i
][0].write_count
= 1;
1211 if ((base_gfn
+npages
) % KVM_PAGES_PER_HPAGE(level
))
1212 new.lpage_info
[i
][lpages
- 1].write_count
= 1;
1213 ugfn
= new.userspace_addr
>> PAGE_SHIFT
;
1215 * If the gfn and userspace address are not aligned wrt each
1216 * other, or if explicitly asked to, disable large page
1217 * support for this slot
1219 if ((base_gfn
^ ugfn
) & (KVM_PAGES_PER_HPAGE(level
) - 1) ||
1220 !largepages_enabled
)
1221 for (j
= 0; j
< lpages
; ++j
)
1222 new.lpage_info
[i
][j
].write_count
= 1;
1227 /* Allocate page dirty bitmap if needed */
1228 if ((new.flags
& KVM_MEM_LOG_DIRTY_PAGES
) && !new.dirty_bitmap
) {
1229 unsigned dirty_bytes
= ALIGN(npages
, BITS_PER_LONG
) / 8;
1231 new.dirty_bitmap
= vmalloc(dirty_bytes
);
1232 if (!new.dirty_bitmap
)
1234 memset(new.dirty_bitmap
, 0, dirty_bytes
);
1236 kvm_arch_flush_shadow(kvm
);
1238 #else /* not defined CONFIG_S390 */
1239 new.user_alloc
= user_alloc
;
1241 new.userspace_addr
= mem
->userspace_addr
;
1242 #endif /* not defined CONFIG_S390 */
1245 kvm_arch_flush_shadow(kvm
);
1247 spin_lock(&kvm
->mmu_lock
);
1248 if (mem
->slot
>= kvm
->nmemslots
)
1249 kvm
->nmemslots
= mem
->slot
+ 1;
1252 spin_unlock(&kvm
->mmu_lock
);
1254 r
= kvm_arch_set_memory_region(kvm
, mem
, old
, user_alloc
);
1256 spin_lock(&kvm
->mmu_lock
);
1258 spin_unlock(&kvm
->mmu_lock
);
1262 kvm_free_physmem_slot(&old
, npages
? &new : NULL
);
1263 /* Slot deletion case: we have to update the current slot */
1264 spin_lock(&kvm
->mmu_lock
);
1267 spin_unlock(&kvm
->mmu_lock
);
1269 /* map the pages in iommu page table */
1270 r
= kvm_iommu_map_pages(kvm
, base_gfn
, npages
);
1277 kvm_free_physmem_slot(&new, &old
);
1282 EXPORT_SYMBOL_GPL(__kvm_set_memory_region
);
1284 int kvm_set_memory_region(struct kvm
*kvm
,
1285 struct kvm_userspace_memory_region
*mem
,
1290 down_write(&kvm
->slots_lock
);
1291 r
= __kvm_set_memory_region(kvm
, mem
, user_alloc
);
1292 up_write(&kvm
->slots_lock
);
1295 EXPORT_SYMBOL_GPL(kvm_set_memory_region
);
1297 int kvm_vm_ioctl_set_memory_region(struct kvm
*kvm
,
1299 kvm_userspace_memory_region
*mem
,
1302 if (mem
->slot
>= KVM_MEMORY_SLOTS
)
1304 return kvm_set_memory_region(kvm
, mem
, user_alloc
);
1307 int kvm_get_dirty_log(struct kvm
*kvm
,
1308 struct kvm_dirty_log
*log
, int *is_dirty
)
1310 struct kvm_memory_slot
*memslot
;
1313 unsigned long any
= 0;
1316 if (log
->slot
>= KVM_MEMORY_SLOTS
)
1319 memslot
= &kvm
->memslots
[log
->slot
];
1321 if (!memslot
->dirty_bitmap
)
1324 n
= ALIGN(memslot
->npages
, BITS_PER_LONG
) / 8;
1326 for (i
= 0; !any
&& i
< n
/sizeof(long); ++i
)
1327 any
= memslot
->dirty_bitmap
[i
];
1330 if (copy_to_user(log
->dirty_bitmap
, memslot
->dirty_bitmap
, n
))
1341 void kvm_disable_largepages(void)
1343 largepages_enabled
= false;
1345 EXPORT_SYMBOL_GPL(kvm_disable_largepages
);
1347 int is_error_page(struct page
*page
)
1349 return page
== bad_page
;
1351 EXPORT_SYMBOL_GPL(is_error_page
);
1353 int is_error_pfn(pfn_t pfn
)
1355 return pfn
== bad_pfn
;
1357 EXPORT_SYMBOL_GPL(is_error_pfn
);
1359 static inline unsigned long bad_hva(void)
1364 int kvm_is_error_hva(unsigned long addr
)
1366 return addr
== bad_hva();
1368 EXPORT_SYMBOL_GPL(kvm_is_error_hva
);
1370 struct kvm_memory_slot
*gfn_to_memslot_unaliased(struct kvm
*kvm
, gfn_t gfn
)
1374 for (i
= 0; i
< kvm
->nmemslots
; ++i
) {
1375 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
1377 if (gfn
>= memslot
->base_gfn
1378 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
1383 EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased
);
1385 struct kvm_memory_slot
*gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
)
1387 gfn
= unalias_gfn(kvm
, gfn
);
1388 return gfn_to_memslot_unaliased(kvm
, gfn
);
1391 int kvm_is_visible_gfn(struct kvm
*kvm
, gfn_t gfn
)
1395 gfn
= unalias_gfn(kvm
, gfn
);
1396 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
1397 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
1399 if (gfn
>= memslot
->base_gfn
1400 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
1405 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn
);
1407 unsigned long gfn_to_hva(struct kvm
*kvm
, gfn_t gfn
)
1409 struct kvm_memory_slot
*slot
;
1411 gfn
= unalias_gfn(kvm
, gfn
);
1412 slot
= gfn_to_memslot_unaliased(kvm
, gfn
);
1415 return (slot
->userspace_addr
+ (gfn
- slot
->base_gfn
) * PAGE_SIZE
);
1417 EXPORT_SYMBOL_GPL(gfn_to_hva
);
1419 pfn_t
gfn_to_pfn(struct kvm
*kvm
, gfn_t gfn
)
1421 struct page
*page
[1];
1428 addr
= gfn_to_hva(kvm
, gfn
);
1429 if (kvm_is_error_hva(addr
)) {
1431 return page_to_pfn(bad_page
);
1434 npages
= get_user_pages_fast(addr
, 1, 1, page
);
1436 if (unlikely(npages
!= 1)) {
1437 struct vm_area_struct
*vma
;
1439 down_read(¤t
->mm
->mmap_sem
);
1440 vma
= find_vma(current
->mm
, addr
);
1442 if (vma
== NULL
|| addr
< vma
->vm_start
||
1443 !(vma
->vm_flags
& VM_PFNMAP
)) {
1444 up_read(¤t
->mm
->mmap_sem
);
1446 return page_to_pfn(bad_page
);
1449 pfn
= ((addr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
1450 up_read(¤t
->mm
->mmap_sem
);
1451 BUG_ON(!kvm_is_mmio_pfn(pfn
));
1453 pfn
= page_to_pfn(page
[0]);
1458 EXPORT_SYMBOL_GPL(gfn_to_pfn
);
1460 struct page
*gfn_to_page(struct kvm
*kvm
, gfn_t gfn
)
1464 pfn
= gfn_to_pfn(kvm
, gfn
);
1465 if (!kvm_is_mmio_pfn(pfn
))
1466 return pfn_to_page(pfn
);
1468 WARN_ON(kvm_is_mmio_pfn(pfn
));
1474 EXPORT_SYMBOL_GPL(gfn_to_page
);
1476 void kvm_release_page_clean(struct page
*page
)
1478 kvm_release_pfn_clean(page_to_pfn(page
));
1480 EXPORT_SYMBOL_GPL(kvm_release_page_clean
);
1482 void kvm_release_pfn_clean(pfn_t pfn
)
1484 if (!kvm_is_mmio_pfn(pfn
))
1485 put_page(pfn_to_page(pfn
));
1487 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean
);
1489 void kvm_release_page_dirty(struct page
*page
)
1491 kvm_release_pfn_dirty(page_to_pfn(page
));
1493 EXPORT_SYMBOL_GPL(kvm_release_page_dirty
);
1495 void kvm_release_pfn_dirty(pfn_t pfn
)
1497 kvm_set_pfn_dirty(pfn
);
1498 kvm_release_pfn_clean(pfn
);
1500 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty
);
1502 void kvm_set_page_dirty(struct page
*page
)
1504 kvm_set_pfn_dirty(page_to_pfn(page
));
1506 EXPORT_SYMBOL_GPL(kvm_set_page_dirty
);
1508 void kvm_set_pfn_dirty(pfn_t pfn
)
1510 if (!kvm_is_mmio_pfn(pfn
)) {
1511 struct page
*page
= pfn_to_page(pfn
);
1512 if (!PageReserved(page
))
1516 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty
);
1518 void kvm_set_pfn_accessed(pfn_t pfn
)
1520 if (!kvm_is_mmio_pfn(pfn
))
1521 mark_page_accessed(pfn_to_page(pfn
));
1523 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed
);
1525 void kvm_get_pfn(pfn_t pfn
)
1527 if (!kvm_is_mmio_pfn(pfn
))
1528 get_page(pfn_to_page(pfn
));
1530 EXPORT_SYMBOL_GPL(kvm_get_pfn
);
1532 static int next_segment(unsigned long len
, int offset
)
1534 if (len
> PAGE_SIZE
- offset
)
1535 return PAGE_SIZE
- offset
;
1540 int kvm_read_guest_page(struct kvm
*kvm
, gfn_t gfn
, void *data
, int offset
,
1546 addr
= gfn_to_hva(kvm
, gfn
);
1547 if (kvm_is_error_hva(addr
))
1549 r
= copy_from_user(data
, (void __user
*)addr
+ offset
, len
);
1554 EXPORT_SYMBOL_GPL(kvm_read_guest_page
);
1556 int kvm_read_guest(struct kvm
*kvm
, gpa_t gpa
, void *data
, unsigned long len
)
1558 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1560 int offset
= offset_in_page(gpa
);
1563 while ((seg
= next_segment(len
, offset
)) != 0) {
1564 ret
= kvm_read_guest_page(kvm
, gfn
, data
, offset
, seg
);
1574 EXPORT_SYMBOL_GPL(kvm_read_guest
);
1576 int kvm_read_guest_atomic(struct kvm
*kvm
, gpa_t gpa
, void *data
,
1581 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1582 int offset
= offset_in_page(gpa
);
1584 addr
= gfn_to_hva(kvm
, gfn
);
1585 if (kvm_is_error_hva(addr
))
1587 pagefault_disable();
1588 r
= __copy_from_user_inatomic(data
, (void __user
*)addr
+ offset
, len
);
1594 EXPORT_SYMBOL(kvm_read_guest_atomic
);
1596 int kvm_write_guest_page(struct kvm
*kvm
, gfn_t gfn
, const void *data
,
1597 int offset
, int len
)
1602 addr
= gfn_to_hva(kvm
, gfn
);
1603 if (kvm_is_error_hva(addr
))
1605 r
= copy_to_user((void __user
*)addr
+ offset
, data
, len
);
1608 mark_page_dirty(kvm
, gfn
);
1611 EXPORT_SYMBOL_GPL(kvm_write_guest_page
);
1613 int kvm_write_guest(struct kvm
*kvm
, gpa_t gpa
, const void *data
,
1616 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1618 int offset
= offset_in_page(gpa
);
1621 while ((seg
= next_segment(len
, offset
)) != 0) {
1622 ret
= kvm_write_guest_page(kvm
, gfn
, data
, offset
, seg
);
1633 int kvm_clear_guest_page(struct kvm
*kvm
, gfn_t gfn
, int offset
, int len
)
1635 return kvm_write_guest_page(kvm
, gfn
, empty_zero_page
, offset
, len
);
1637 EXPORT_SYMBOL_GPL(kvm_clear_guest_page
);
1639 int kvm_clear_guest(struct kvm
*kvm
, gpa_t gpa
, unsigned long len
)
1641 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1643 int offset
= offset_in_page(gpa
);
1646 while ((seg
= next_segment(len
, offset
)) != 0) {
1647 ret
= kvm_clear_guest_page(kvm
, gfn
, offset
, seg
);
1656 EXPORT_SYMBOL_GPL(kvm_clear_guest
);
1658 void mark_page_dirty(struct kvm
*kvm
, gfn_t gfn
)
1660 struct kvm_memory_slot
*memslot
;
1662 gfn
= unalias_gfn(kvm
, gfn
);
1663 memslot
= gfn_to_memslot_unaliased(kvm
, gfn
);
1664 if (memslot
&& memslot
->dirty_bitmap
) {
1665 unsigned long rel_gfn
= gfn
- memslot
->base_gfn
;
1668 if (!test_bit(rel_gfn
, memslot
->dirty_bitmap
))
1669 set_bit(rel_gfn
, memslot
->dirty_bitmap
);
1674 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1676 void kvm_vcpu_block(struct kvm_vcpu
*vcpu
)
1681 prepare_to_wait(&vcpu
->wq
, &wait
, TASK_INTERRUPTIBLE
);
1683 if (kvm_arch_vcpu_runnable(vcpu
)) {
1684 set_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
1687 if (kvm_cpu_has_pending_timer(vcpu
))
1689 if (signal_pending(current
))
1697 finish_wait(&vcpu
->wq
, &wait
);
1700 void kvm_resched(struct kvm_vcpu
*vcpu
)
1702 if (!need_resched())
1706 EXPORT_SYMBOL_GPL(kvm_resched
);
1708 static int kvm_vcpu_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1710 struct kvm_vcpu
*vcpu
= vma
->vm_file
->private_data
;
1713 if (vmf
->pgoff
== 0)
1714 page
= virt_to_page(vcpu
->run
);
1716 else if (vmf
->pgoff
== KVM_PIO_PAGE_OFFSET
)
1717 page
= virt_to_page(vcpu
->arch
.pio_data
);
1719 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1720 else if (vmf
->pgoff
== KVM_COALESCED_MMIO_PAGE_OFFSET
)
1721 page
= virt_to_page(vcpu
->kvm
->coalesced_mmio_ring
);
1724 return VM_FAULT_SIGBUS
;
1730 static const struct vm_operations_struct kvm_vcpu_vm_ops
= {
1731 .fault
= kvm_vcpu_fault
,
1734 static int kvm_vcpu_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1736 vma
->vm_ops
= &kvm_vcpu_vm_ops
;
1740 static int kvm_vcpu_release(struct inode
*inode
, struct file
*filp
)
1742 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1744 kvm_put_kvm(vcpu
->kvm
);
1748 static struct file_operations kvm_vcpu_fops
= {
1749 .release
= kvm_vcpu_release
,
1750 .unlocked_ioctl
= kvm_vcpu_ioctl
,
1751 .compat_ioctl
= kvm_vcpu_ioctl
,
1752 .mmap
= kvm_vcpu_mmap
,
1756 * Allocates an inode for the vcpu.
1758 static int create_vcpu_fd(struct kvm_vcpu
*vcpu
)
1760 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops
, vcpu
, 0);
1764 * Creates some virtual cpus. Good luck creating more than one.
1766 static int kvm_vm_ioctl_create_vcpu(struct kvm
*kvm
, u32 id
)
1769 struct kvm_vcpu
*vcpu
, *v
;
1771 vcpu
= kvm_arch_vcpu_create(kvm
, id
);
1773 return PTR_ERR(vcpu
);
1775 preempt_notifier_init(&vcpu
->preempt_notifier
, &kvm_preempt_ops
);
1777 r
= kvm_arch_vcpu_setup(vcpu
);
1781 mutex_lock(&kvm
->lock
);
1782 if (atomic_read(&kvm
->online_vcpus
) == KVM_MAX_VCPUS
) {
1787 kvm_for_each_vcpu(r
, v
, kvm
)
1788 if (v
->vcpu_id
== id
) {
1793 BUG_ON(kvm
->vcpus
[atomic_read(&kvm
->online_vcpus
)]);
1795 /* Now it's all set up, let userspace reach it */
1797 r
= create_vcpu_fd(vcpu
);
1803 kvm
->vcpus
[atomic_read(&kvm
->online_vcpus
)] = vcpu
;
1805 atomic_inc(&kvm
->online_vcpus
);
1807 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
1808 if (kvm
->bsp_vcpu_id
== id
)
1809 kvm
->bsp_vcpu
= vcpu
;
1811 mutex_unlock(&kvm
->lock
);
1815 mutex_unlock(&kvm
->lock
);
1816 kvm_arch_vcpu_destroy(vcpu
);
1820 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu
*vcpu
, sigset_t
*sigset
)
1823 sigdelsetmask(sigset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
1824 vcpu
->sigset_active
= 1;
1825 vcpu
->sigset
= *sigset
;
1827 vcpu
->sigset_active
= 0;
1831 #ifdef __KVM_HAVE_MSIX
1832 static int kvm_vm_ioctl_set_msix_nr(struct kvm
*kvm
,
1833 struct kvm_assigned_msix_nr
*entry_nr
)
1836 struct kvm_assigned_dev_kernel
*adev
;
1838 mutex_lock(&kvm
->lock
);
1840 adev
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
1841 entry_nr
->assigned_dev_id
);
1847 if (adev
->entries_nr
== 0) {
1848 adev
->entries_nr
= entry_nr
->entry_nr
;
1849 if (adev
->entries_nr
== 0 ||
1850 adev
->entries_nr
>= KVM_MAX_MSIX_PER_DEV
) {
1855 adev
->host_msix_entries
= kzalloc(sizeof(struct msix_entry
) *
1858 if (!adev
->host_msix_entries
) {
1862 adev
->guest_msix_entries
= kzalloc(
1863 sizeof(struct kvm_guest_msix_entry
) *
1864 entry_nr
->entry_nr
, GFP_KERNEL
);
1865 if (!adev
->guest_msix_entries
) {
1866 kfree(adev
->host_msix_entries
);
1870 } else /* Not allowed set MSI-X number twice */
1873 mutex_unlock(&kvm
->lock
);
1877 static int kvm_vm_ioctl_set_msix_entry(struct kvm
*kvm
,
1878 struct kvm_assigned_msix_entry
*entry
)
1881 struct kvm_assigned_dev_kernel
*adev
;
1883 mutex_lock(&kvm
->lock
);
1885 adev
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
1886 entry
->assigned_dev_id
);
1890 goto msix_entry_out
;
1893 for (i
= 0; i
< adev
->entries_nr
; i
++)
1894 if (adev
->guest_msix_entries
[i
].vector
== 0 ||
1895 adev
->guest_msix_entries
[i
].entry
== entry
->entry
) {
1896 adev
->guest_msix_entries
[i
].entry
= entry
->entry
;
1897 adev
->guest_msix_entries
[i
].vector
= entry
->gsi
;
1898 adev
->host_msix_entries
[i
].entry
= entry
->entry
;
1901 if (i
== adev
->entries_nr
) {
1903 goto msix_entry_out
;
1907 mutex_unlock(&kvm
->lock
);
1913 static long kvm_vcpu_ioctl(struct file
*filp
,
1914 unsigned int ioctl
, unsigned long arg
)
1916 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1917 void __user
*argp
= (void __user
*)arg
;
1919 struct kvm_fpu
*fpu
= NULL
;
1920 struct kvm_sregs
*kvm_sregs
= NULL
;
1922 if (vcpu
->kvm
->mm
!= current
->mm
)
1929 r
= kvm_arch_vcpu_ioctl_run(vcpu
, vcpu
->run
);
1931 case KVM_GET_REGS
: {
1932 struct kvm_regs
*kvm_regs
;
1935 kvm_regs
= kzalloc(sizeof(struct kvm_regs
), GFP_KERNEL
);
1938 r
= kvm_arch_vcpu_ioctl_get_regs(vcpu
, kvm_regs
);
1942 if (copy_to_user(argp
, kvm_regs
, sizeof(struct kvm_regs
)))
1949 case KVM_SET_REGS
: {
1950 struct kvm_regs
*kvm_regs
;
1953 kvm_regs
= kzalloc(sizeof(struct kvm_regs
), GFP_KERNEL
);
1957 if (copy_from_user(kvm_regs
, argp
, sizeof(struct kvm_regs
)))
1959 r
= kvm_arch_vcpu_ioctl_set_regs(vcpu
, kvm_regs
);
1967 case KVM_GET_SREGS
: {
1968 kvm_sregs
= kzalloc(sizeof(struct kvm_sregs
), GFP_KERNEL
);
1972 r
= kvm_arch_vcpu_ioctl_get_sregs(vcpu
, kvm_sregs
);
1976 if (copy_to_user(argp
, kvm_sregs
, sizeof(struct kvm_sregs
)))
1981 case KVM_SET_SREGS
: {
1982 kvm_sregs
= kmalloc(sizeof(struct kvm_sregs
), GFP_KERNEL
);
1987 if (copy_from_user(kvm_sregs
, argp
, sizeof(struct kvm_sregs
)))
1989 r
= kvm_arch_vcpu_ioctl_set_sregs(vcpu
, kvm_sregs
);
1995 case KVM_GET_MP_STATE
: {
1996 struct kvm_mp_state mp_state
;
1998 r
= kvm_arch_vcpu_ioctl_get_mpstate(vcpu
, &mp_state
);
2002 if (copy_to_user(argp
, &mp_state
, sizeof mp_state
))
2007 case KVM_SET_MP_STATE
: {
2008 struct kvm_mp_state mp_state
;
2011 if (copy_from_user(&mp_state
, argp
, sizeof mp_state
))
2013 r
= kvm_arch_vcpu_ioctl_set_mpstate(vcpu
, &mp_state
);
2019 case KVM_TRANSLATE
: {
2020 struct kvm_translation tr
;
2023 if (copy_from_user(&tr
, argp
, sizeof tr
))
2025 r
= kvm_arch_vcpu_ioctl_translate(vcpu
, &tr
);
2029 if (copy_to_user(argp
, &tr
, sizeof tr
))
2034 case KVM_SET_GUEST_DEBUG
: {
2035 struct kvm_guest_debug dbg
;
2038 if (copy_from_user(&dbg
, argp
, sizeof dbg
))
2040 r
= kvm_arch_vcpu_ioctl_set_guest_debug(vcpu
, &dbg
);
2046 case KVM_SET_SIGNAL_MASK
: {
2047 struct kvm_signal_mask __user
*sigmask_arg
= argp
;
2048 struct kvm_signal_mask kvm_sigmask
;
2049 sigset_t sigset
, *p
;
2054 if (copy_from_user(&kvm_sigmask
, argp
,
2055 sizeof kvm_sigmask
))
2058 if (kvm_sigmask
.len
!= sizeof sigset
)
2061 if (copy_from_user(&sigset
, sigmask_arg
->sigset
,
2066 r
= kvm_vcpu_ioctl_set_sigmask(vcpu
, &sigset
);
2070 fpu
= kzalloc(sizeof(struct kvm_fpu
), GFP_KERNEL
);
2074 r
= kvm_arch_vcpu_ioctl_get_fpu(vcpu
, fpu
);
2078 if (copy_to_user(argp
, fpu
, sizeof(struct kvm_fpu
)))
2084 fpu
= kmalloc(sizeof(struct kvm_fpu
), GFP_KERNEL
);
2089 if (copy_from_user(fpu
, argp
, sizeof(struct kvm_fpu
)))
2091 r
= kvm_arch_vcpu_ioctl_set_fpu(vcpu
, fpu
);
2098 r
= kvm_arch_vcpu_ioctl(filp
, ioctl
, arg
);
2106 static long kvm_vm_ioctl(struct file
*filp
,
2107 unsigned int ioctl
, unsigned long arg
)
2109 struct kvm
*kvm
= filp
->private_data
;
2110 void __user
*argp
= (void __user
*)arg
;
2113 if (kvm
->mm
!= current
->mm
)
2116 case KVM_CREATE_VCPU
:
2117 r
= kvm_vm_ioctl_create_vcpu(kvm
, arg
);
2121 case KVM_SET_USER_MEMORY_REGION
: {
2122 struct kvm_userspace_memory_region kvm_userspace_mem
;
2125 if (copy_from_user(&kvm_userspace_mem
, argp
,
2126 sizeof kvm_userspace_mem
))
2129 r
= kvm_vm_ioctl_set_memory_region(kvm
, &kvm_userspace_mem
, 1);
2134 case KVM_GET_DIRTY_LOG
: {
2135 struct kvm_dirty_log log
;
2138 if (copy_from_user(&log
, argp
, sizeof log
))
2140 r
= kvm_vm_ioctl_get_dirty_log(kvm
, &log
);
2145 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2146 case KVM_REGISTER_COALESCED_MMIO
: {
2147 struct kvm_coalesced_mmio_zone zone
;
2149 if (copy_from_user(&zone
, argp
, sizeof zone
))
2152 r
= kvm_vm_ioctl_register_coalesced_mmio(kvm
, &zone
);
2158 case KVM_UNREGISTER_COALESCED_MMIO
: {
2159 struct kvm_coalesced_mmio_zone zone
;
2161 if (copy_from_user(&zone
, argp
, sizeof zone
))
2164 r
= kvm_vm_ioctl_unregister_coalesced_mmio(kvm
, &zone
);
2171 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
2172 case KVM_ASSIGN_PCI_DEVICE
: {
2173 struct kvm_assigned_pci_dev assigned_dev
;
2176 if (copy_from_user(&assigned_dev
, argp
, sizeof assigned_dev
))
2178 r
= kvm_vm_ioctl_assign_device(kvm
, &assigned_dev
);
2183 case KVM_ASSIGN_IRQ
: {
2187 #ifdef KVM_CAP_ASSIGN_DEV_IRQ
2188 case KVM_ASSIGN_DEV_IRQ
: {
2189 struct kvm_assigned_irq assigned_irq
;
2192 if (copy_from_user(&assigned_irq
, argp
, sizeof assigned_irq
))
2194 r
= kvm_vm_ioctl_assign_irq(kvm
, &assigned_irq
);
2199 case KVM_DEASSIGN_DEV_IRQ
: {
2200 struct kvm_assigned_irq assigned_irq
;
2203 if (copy_from_user(&assigned_irq
, argp
, sizeof assigned_irq
))
2205 r
= kvm_vm_ioctl_deassign_dev_irq(kvm
, &assigned_irq
);
2212 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
2213 case KVM_DEASSIGN_PCI_DEVICE
: {
2214 struct kvm_assigned_pci_dev assigned_dev
;
2217 if (copy_from_user(&assigned_dev
, argp
, sizeof assigned_dev
))
2219 r
= kvm_vm_ioctl_deassign_device(kvm
, &assigned_dev
);
2225 #ifdef KVM_CAP_IRQ_ROUTING
2226 case KVM_SET_GSI_ROUTING
: {
2227 struct kvm_irq_routing routing
;
2228 struct kvm_irq_routing __user
*urouting
;
2229 struct kvm_irq_routing_entry
*entries
;
2232 if (copy_from_user(&routing
, argp
, sizeof(routing
)))
2235 if (routing
.nr
>= KVM_MAX_IRQ_ROUTES
)
2240 entries
= vmalloc(routing
.nr
* sizeof(*entries
));
2245 if (copy_from_user(entries
, urouting
->entries
,
2246 routing
.nr
* sizeof(*entries
)))
2247 goto out_free_irq_routing
;
2248 r
= kvm_set_irq_routing(kvm
, entries
, routing
.nr
,
2250 out_free_irq_routing
:
2254 #endif /* KVM_CAP_IRQ_ROUTING */
2255 #ifdef __KVM_HAVE_MSIX
2256 case KVM_ASSIGN_SET_MSIX_NR
: {
2257 struct kvm_assigned_msix_nr entry_nr
;
2259 if (copy_from_user(&entry_nr
, argp
, sizeof entry_nr
))
2261 r
= kvm_vm_ioctl_set_msix_nr(kvm
, &entry_nr
);
2266 case KVM_ASSIGN_SET_MSIX_ENTRY
: {
2267 struct kvm_assigned_msix_entry entry
;
2269 if (copy_from_user(&entry
, argp
, sizeof entry
))
2271 r
= kvm_vm_ioctl_set_msix_entry(kvm
, &entry
);
2278 struct kvm_irqfd data
;
2281 if (copy_from_user(&data
, argp
, sizeof data
))
2283 r
= kvm_irqfd(kvm
, data
.fd
, data
.gsi
, data
.flags
);
2286 case KVM_IOEVENTFD
: {
2287 struct kvm_ioeventfd data
;
2290 if (copy_from_user(&data
, argp
, sizeof data
))
2292 r
= kvm_ioeventfd(kvm
, &data
);
2295 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
2296 case KVM_SET_BOOT_CPU_ID
:
2298 mutex_lock(&kvm
->lock
);
2299 if (atomic_read(&kvm
->online_vcpus
) != 0)
2302 kvm
->bsp_vcpu_id
= arg
;
2303 mutex_unlock(&kvm
->lock
);
2307 r
= kvm_arch_vm_ioctl(filp
, ioctl
, arg
);
2313 static int kvm_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
2315 struct page
*page
[1];
2318 gfn_t gfn
= vmf
->pgoff
;
2319 struct kvm
*kvm
= vma
->vm_file
->private_data
;
2321 addr
= gfn_to_hva(kvm
, gfn
);
2322 if (kvm_is_error_hva(addr
))
2323 return VM_FAULT_SIGBUS
;
2325 npages
= get_user_pages(current
, current
->mm
, addr
, 1, 1, 0, page
,
2327 if (unlikely(npages
!= 1))
2328 return VM_FAULT_SIGBUS
;
2330 vmf
->page
= page
[0];
2334 static const struct vm_operations_struct kvm_vm_vm_ops
= {
2335 .fault
= kvm_vm_fault
,
2338 static int kvm_vm_mmap(struct file
*file
, struct vm_area_struct
*vma
)
2340 vma
->vm_ops
= &kvm_vm_vm_ops
;
2344 static struct file_operations kvm_vm_fops
= {
2345 .release
= kvm_vm_release
,
2346 .unlocked_ioctl
= kvm_vm_ioctl
,
2347 .compat_ioctl
= kvm_vm_ioctl
,
2348 .mmap
= kvm_vm_mmap
,
2351 static int kvm_dev_ioctl_create_vm(void)
2356 kvm
= kvm_create_vm();
2358 return PTR_ERR(kvm
);
2359 fd
= anon_inode_getfd("kvm-vm", &kvm_vm_fops
, kvm
, 0);
2366 static long kvm_dev_ioctl_check_extension_generic(long arg
)
2369 case KVM_CAP_USER_MEMORY
:
2370 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS
:
2371 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS
:
2372 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
2373 case KVM_CAP_SET_BOOT_CPU_ID
:
2376 #ifdef CONFIG_HAVE_KVM_IRQCHIP
2377 case KVM_CAP_IRQ_ROUTING
:
2378 return KVM_MAX_IRQ_ROUTES
;
2383 return kvm_dev_ioctl_check_extension(arg
);
2386 static long kvm_dev_ioctl(struct file
*filp
,
2387 unsigned int ioctl
, unsigned long arg
)
2392 case KVM_GET_API_VERSION
:
2396 r
= KVM_API_VERSION
;
2402 r
= kvm_dev_ioctl_create_vm();
2404 case KVM_CHECK_EXTENSION
:
2405 r
= kvm_dev_ioctl_check_extension_generic(arg
);
2407 case KVM_GET_VCPU_MMAP_SIZE
:
2411 r
= PAGE_SIZE
; /* struct kvm_run */
2413 r
+= PAGE_SIZE
; /* pio data page */
2415 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2416 r
+= PAGE_SIZE
; /* coalesced mmio ring page */
2419 case KVM_TRACE_ENABLE
:
2420 case KVM_TRACE_PAUSE
:
2421 case KVM_TRACE_DISABLE
:
2425 return kvm_arch_dev_ioctl(filp
, ioctl
, arg
);
2431 static struct file_operations kvm_chardev_ops
= {
2432 .unlocked_ioctl
= kvm_dev_ioctl
,
2433 .compat_ioctl
= kvm_dev_ioctl
,
2436 static struct miscdevice kvm_dev
= {
2442 static void hardware_enable(void *junk
)
2444 int cpu
= raw_smp_processor_id();
2446 if (cpumask_test_cpu(cpu
, cpus_hardware_enabled
))
2448 cpumask_set_cpu(cpu
, cpus_hardware_enabled
);
2449 kvm_arch_hardware_enable(NULL
);
2452 static void hardware_disable(void *junk
)
2454 int cpu
= raw_smp_processor_id();
2456 if (!cpumask_test_cpu(cpu
, cpus_hardware_enabled
))
2458 cpumask_clear_cpu(cpu
, cpus_hardware_enabled
);
2459 kvm_arch_hardware_disable(NULL
);
2462 static int kvm_cpu_hotplug(struct notifier_block
*notifier
, unsigned long val
,
2467 val
&= ~CPU_TASKS_FROZEN
;
2470 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
2472 hardware_disable(NULL
);
2474 case CPU_UP_CANCELED
:
2475 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
2477 smp_call_function_single(cpu
, hardware_disable
, NULL
, 1);
2480 printk(KERN_INFO
"kvm: enabling virtualization on CPU%d\n",
2482 smp_call_function_single(cpu
, hardware_enable
, NULL
, 1);
2489 asmlinkage
void kvm_handle_fault_on_reboot(void)
2492 /* spin while reset goes on */
2495 /* Fault while not rebooting. We want the trace. */
2498 EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot
);
2500 static int kvm_reboot(struct notifier_block
*notifier
, unsigned long val
,
2504 * Some (well, at least mine) BIOSes hang on reboot if
2507 * And Intel TXT required VMX off for all cpu when system shutdown.
2509 printk(KERN_INFO
"kvm: exiting hardware virtualization\n");
2510 kvm_rebooting
= true;
2511 on_each_cpu(hardware_disable
, NULL
, 1);
2515 static struct notifier_block kvm_reboot_notifier
= {
2516 .notifier_call
= kvm_reboot
,
2520 void kvm_io_bus_init(struct kvm_io_bus
*bus
)
2522 memset(bus
, 0, sizeof(*bus
));
2525 void kvm_io_bus_destroy(struct kvm_io_bus
*bus
)
2529 for (i
= 0; i
< bus
->dev_count
; i
++) {
2530 struct kvm_io_device
*pos
= bus
->devs
[i
];
2532 kvm_iodevice_destructor(pos
);
2536 /* kvm_io_bus_write - called under kvm->slots_lock */
2537 int kvm_io_bus_write(struct kvm_io_bus
*bus
, gpa_t addr
,
2538 int len
, const void *val
)
2541 for (i
= 0; i
< bus
->dev_count
; i
++)
2542 if (!kvm_iodevice_write(bus
->devs
[i
], addr
, len
, val
))
2547 /* kvm_io_bus_read - called under kvm->slots_lock */
2548 int kvm_io_bus_read(struct kvm_io_bus
*bus
, gpa_t addr
, int len
, void *val
)
2551 for (i
= 0; i
< bus
->dev_count
; i
++)
2552 if (!kvm_iodevice_read(bus
->devs
[i
], addr
, len
, val
))
2557 int kvm_io_bus_register_dev(struct kvm
*kvm
, struct kvm_io_bus
*bus
,
2558 struct kvm_io_device
*dev
)
2562 down_write(&kvm
->slots_lock
);
2563 ret
= __kvm_io_bus_register_dev(bus
, dev
);
2564 up_write(&kvm
->slots_lock
);
2569 /* An unlocked version. Caller must have write lock on slots_lock. */
2570 int __kvm_io_bus_register_dev(struct kvm_io_bus
*bus
,
2571 struct kvm_io_device
*dev
)
2573 if (bus
->dev_count
> NR_IOBUS_DEVS
-1)
2576 bus
->devs
[bus
->dev_count
++] = dev
;
2581 void kvm_io_bus_unregister_dev(struct kvm
*kvm
,
2582 struct kvm_io_bus
*bus
,
2583 struct kvm_io_device
*dev
)
2585 down_write(&kvm
->slots_lock
);
2586 __kvm_io_bus_unregister_dev(bus
, dev
);
2587 up_write(&kvm
->slots_lock
);
2590 /* An unlocked version. Caller must have write lock on slots_lock. */
2591 void __kvm_io_bus_unregister_dev(struct kvm_io_bus
*bus
,
2592 struct kvm_io_device
*dev
)
2596 for (i
= 0; i
< bus
->dev_count
; i
++)
2597 if (bus
->devs
[i
] == dev
) {
2598 bus
->devs
[i
] = bus
->devs
[--bus
->dev_count
];
2603 static struct notifier_block kvm_cpu_notifier
= {
2604 .notifier_call
= kvm_cpu_hotplug
,
2605 .priority
= 20, /* must be > scheduler priority */
2608 static int vm_stat_get(void *_offset
, u64
*val
)
2610 unsigned offset
= (long)_offset
;
2614 spin_lock(&kvm_lock
);
2615 list_for_each_entry(kvm
, &vm_list
, vm_list
)
2616 *val
+= *(u32
*)((void *)kvm
+ offset
);
2617 spin_unlock(&kvm_lock
);
2621 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops
, vm_stat_get
, NULL
, "%llu\n");
2623 static int vcpu_stat_get(void *_offset
, u64
*val
)
2625 unsigned offset
= (long)_offset
;
2627 struct kvm_vcpu
*vcpu
;
2631 spin_lock(&kvm_lock
);
2632 list_for_each_entry(kvm
, &vm_list
, vm_list
)
2633 kvm_for_each_vcpu(i
, vcpu
, kvm
)
2634 *val
+= *(u32
*)((void *)vcpu
+ offset
);
2636 spin_unlock(&kvm_lock
);
2640 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops
, vcpu_stat_get
, NULL
, "%llu\n");
2642 static const struct file_operations
*stat_fops
[] = {
2643 [KVM_STAT_VCPU
] = &vcpu_stat_fops
,
2644 [KVM_STAT_VM
] = &vm_stat_fops
,
2647 static void kvm_init_debug(void)
2649 struct kvm_stats_debugfs_item
*p
;
2651 kvm_debugfs_dir
= debugfs_create_dir("kvm", NULL
);
2652 for (p
= debugfs_entries
; p
->name
; ++p
)
2653 p
->dentry
= debugfs_create_file(p
->name
, 0444, kvm_debugfs_dir
,
2654 (void *)(long)p
->offset
,
2655 stat_fops
[p
->kind
]);
2658 static void kvm_exit_debug(void)
2660 struct kvm_stats_debugfs_item
*p
;
2662 for (p
= debugfs_entries
; p
->name
; ++p
)
2663 debugfs_remove(p
->dentry
);
2664 debugfs_remove(kvm_debugfs_dir
);
2667 static int kvm_suspend(struct sys_device
*dev
, pm_message_t state
)
2669 hardware_disable(NULL
);
2673 static int kvm_resume(struct sys_device
*dev
)
2675 hardware_enable(NULL
);
2679 static struct sysdev_class kvm_sysdev_class
= {
2681 .suspend
= kvm_suspend
,
2682 .resume
= kvm_resume
,
2685 static struct sys_device kvm_sysdev
= {
2687 .cls
= &kvm_sysdev_class
,
2690 struct page
*bad_page
;
2694 struct kvm_vcpu
*preempt_notifier_to_vcpu(struct preempt_notifier
*pn
)
2696 return container_of(pn
, struct kvm_vcpu
, preempt_notifier
);
2699 static void kvm_sched_in(struct preempt_notifier
*pn
, int cpu
)
2701 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
2703 kvm_arch_vcpu_load(vcpu
, cpu
);
2706 static void kvm_sched_out(struct preempt_notifier
*pn
,
2707 struct task_struct
*next
)
2709 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
2711 kvm_arch_vcpu_put(vcpu
);
2714 int kvm_init(void *opaque
, unsigned int vcpu_size
,
2715 struct module
*module
)
2720 r
= kvm_arch_init(opaque
);
2724 bad_page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
2726 if (bad_page
== NULL
) {
2731 bad_pfn
= page_to_pfn(bad_page
);
2733 if (!zalloc_cpumask_var(&cpus_hardware_enabled
, GFP_KERNEL
)) {
2738 r
= kvm_arch_hardware_setup();
2742 for_each_online_cpu(cpu
) {
2743 smp_call_function_single(cpu
,
2744 kvm_arch_check_processor_compat
,
2750 on_each_cpu(hardware_enable
, NULL
, 1);
2751 r
= register_cpu_notifier(&kvm_cpu_notifier
);
2754 register_reboot_notifier(&kvm_reboot_notifier
);
2756 r
= sysdev_class_register(&kvm_sysdev_class
);
2760 r
= sysdev_register(&kvm_sysdev
);
2764 /* A kmem cache lets us meet the alignment requirements of fx_save. */
2765 kvm_vcpu_cache
= kmem_cache_create("kvm_vcpu", vcpu_size
,
2766 __alignof__(struct kvm_vcpu
),
2768 if (!kvm_vcpu_cache
) {
2773 kvm_chardev_ops
.owner
= module
;
2774 kvm_vm_fops
.owner
= module
;
2775 kvm_vcpu_fops
.owner
= module
;
2777 r
= misc_register(&kvm_dev
);
2779 printk(KERN_ERR
"kvm: misc device register failed\n");
2783 kvm_preempt_ops
.sched_in
= kvm_sched_in
;
2784 kvm_preempt_ops
.sched_out
= kvm_sched_out
;
2791 kmem_cache_destroy(kvm_vcpu_cache
);
2793 sysdev_unregister(&kvm_sysdev
);
2795 sysdev_class_unregister(&kvm_sysdev_class
);
2797 unregister_reboot_notifier(&kvm_reboot_notifier
);
2798 unregister_cpu_notifier(&kvm_cpu_notifier
);
2800 on_each_cpu(hardware_disable
, NULL
, 1);
2802 kvm_arch_hardware_unsetup();
2804 free_cpumask_var(cpus_hardware_enabled
);
2806 __free_page(bad_page
);
2812 EXPORT_SYMBOL_GPL(kvm_init
);
2816 tracepoint_synchronize_unregister();
2818 misc_deregister(&kvm_dev
);
2819 kmem_cache_destroy(kvm_vcpu_cache
);
2820 sysdev_unregister(&kvm_sysdev
);
2821 sysdev_class_unregister(&kvm_sysdev_class
);
2822 unregister_reboot_notifier(&kvm_reboot_notifier
);
2823 unregister_cpu_notifier(&kvm_cpu_notifier
);
2824 on_each_cpu(hardware_disable
, NULL
, 1);
2825 kvm_arch_hardware_unsetup();
2827 free_cpumask_var(cpus_hardware_enabled
);
2828 __free_page(bad_page
);
2830 EXPORT_SYMBOL_GPL(kvm_exit
);