2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * Copyright (C) 2006 Qumranet, Inc.
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
20 #include <linux/kvm_host.h>
21 #include <linux/kvm.h>
22 #include <linux/module.h>
23 #include <linux/errno.h>
24 #include <linux/percpu.h>
25 #include <linux/gfp.h>
27 #include <linux/miscdevice.h>
28 #include <linux/vmalloc.h>
29 #include <linux/reboot.h>
30 #include <linux/debugfs.h>
31 #include <linux/highmem.h>
32 #include <linux/file.h>
33 #include <linux/sysdev.h>
34 #include <linux/cpu.h>
35 #include <linux/sched.h>
36 #include <linux/cpumask.h>
37 #include <linux/smp.h>
38 #include <linux/anon_inodes.h>
39 #include <linux/profile.h>
40 #include <linux/kvm_para.h>
41 #include <linux/pagemap.h>
42 #include <linux/mman.h>
43 #include <linux/swap.h>
45 #include <asm/processor.h>
47 #include <asm/uaccess.h>
48 #include <asm/pgtable.h>
50 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
51 #include "coalesced_mmio.h"
54 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
55 #include <linux/pci.h>
56 #include <linux/interrupt.h>
60 MODULE_AUTHOR("Qumranet");
61 MODULE_LICENSE("GPL");
63 static int msi2intx
= 1;
64 module_param(msi2intx
, bool, 0);
66 DEFINE_SPINLOCK(kvm_lock
);
69 static cpumask_var_t cpus_hardware_enabled
;
71 struct kmem_cache
*kvm_vcpu_cache
;
72 EXPORT_SYMBOL_GPL(kvm_vcpu_cache
);
74 static __read_mostly
struct preempt_ops kvm_preempt_ops
;
76 struct dentry
*kvm_debugfs_dir
;
78 static long kvm_vcpu_ioctl(struct file
*file
, unsigned int ioctl
,
81 static bool kvm_rebooting
;
83 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
84 static struct kvm_assigned_dev_kernel
*kvm_find_assigned_dev(struct list_head
*head
,
87 struct list_head
*ptr
;
88 struct kvm_assigned_dev_kernel
*match
;
90 list_for_each(ptr
, head
) {
91 match
= list_entry(ptr
, struct kvm_assigned_dev_kernel
, list
);
92 if (match
->assigned_dev_id
== assigned_dev_id
)
98 static void kvm_assigned_dev_interrupt_work_handler(struct work_struct
*work
)
100 struct kvm_assigned_dev_kernel
*assigned_dev
;
102 assigned_dev
= container_of(work
, struct kvm_assigned_dev_kernel
,
105 /* This is taken to safely inject irq inside the guest. When
106 * the interrupt injection (or the ioapic code) uses a
107 * finer-grained lock, update this
109 mutex_lock(&assigned_dev
->kvm
->lock
);
110 kvm_set_irq(assigned_dev
->kvm
, assigned_dev
->irq_source_id
,
111 assigned_dev
->guest_irq
, 1);
113 if (assigned_dev
->irq_requested_type
& KVM_ASSIGNED_DEV_GUEST_MSI
) {
114 enable_irq(assigned_dev
->host_irq
);
115 assigned_dev
->host_irq_disabled
= false;
117 mutex_unlock(&assigned_dev
->kvm
->lock
);
120 static irqreturn_t
kvm_assigned_dev_intr(int irq
, void *dev_id
)
122 struct kvm_assigned_dev_kernel
*assigned_dev
=
123 (struct kvm_assigned_dev_kernel
*) dev_id
;
125 schedule_work(&assigned_dev
->interrupt_work
);
127 disable_irq_nosync(irq
);
128 assigned_dev
->host_irq_disabled
= true;
133 /* Ack the irq line for an assigned device */
134 static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier
*kian
)
136 struct kvm_assigned_dev_kernel
*dev
;
141 dev
= container_of(kian
, struct kvm_assigned_dev_kernel
,
144 kvm_set_irq(dev
->kvm
, dev
->irq_source_id
, dev
->guest_irq
, 0);
146 /* The guest irq may be shared so this ack may be
147 * from another device.
149 if (dev
->host_irq_disabled
) {
150 enable_irq(dev
->host_irq
);
151 dev
->host_irq_disabled
= false;
155 /* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
156 static void kvm_free_assigned_irq(struct kvm
*kvm
,
157 struct kvm_assigned_dev_kernel
*assigned_dev
)
159 if (!irqchip_in_kernel(kvm
))
162 kvm_unregister_irq_ack_notifier(&assigned_dev
->ack_notifier
);
164 if (assigned_dev
->irq_source_id
!= -1)
165 kvm_free_irq_source_id(kvm
, assigned_dev
->irq_source_id
);
166 assigned_dev
->irq_source_id
= -1;
168 if (!assigned_dev
->irq_requested_type
)
172 * In kvm_free_device_irq, cancel_work_sync return true if:
173 * 1. work is scheduled, and then cancelled.
174 * 2. work callback is executed.
176 * The first one ensured that the irq is disabled and no more events
177 * would happen. But for the second one, the irq may be enabled (e.g.
178 * for MSI). So we disable irq here to prevent further events.
180 * Notice this maybe result in nested disable if the interrupt type is
181 * INTx, but it's OK for we are going to free it.
183 * If this function is a part of VM destroy, please ensure that till
184 * now, the kvm state is still legal for probably we also have to wait
185 * interrupt_work done.
187 disable_irq_nosync(assigned_dev
->host_irq
);
188 cancel_work_sync(&assigned_dev
->interrupt_work
);
190 free_irq(assigned_dev
->host_irq
, (void *)assigned_dev
);
192 if (assigned_dev
->irq_requested_type
& KVM_ASSIGNED_DEV_HOST_MSI
)
193 pci_disable_msi(assigned_dev
->dev
);
195 assigned_dev
->irq_requested_type
= 0;
199 static void kvm_free_assigned_device(struct kvm
*kvm
,
200 struct kvm_assigned_dev_kernel
203 kvm_free_assigned_irq(kvm
, assigned_dev
);
205 pci_reset_function(assigned_dev
->dev
);
207 pci_release_regions(assigned_dev
->dev
);
208 pci_disable_device(assigned_dev
->dev
);
209 pci_dev_put(assigned_dev
->dev
);
211 list_del(&assigned_dev
->list
);
215 void kvm_free_all_assigned_devices(struct kvm
*kvm
)
217 struct list_head
*ptr
, *ptr2
;
218 struct kvm_assigned_dev_kernel
*assigned_dev
;
220 list_for_each_safe(ptr
, ptr2
, &kvm
->arch
.assigned_dev_head
) {
221 assigned_dev
= list_entry(ptr
,
222 struct kvm_assigned_dev_kernel
,
225 kvm_free_assigned_device(kvm
, assigned_dev
);
229 static int assigned_device_update_intx(struct kvm
*kvm
,
230 struct kvm_assigned_dev_kernel
*adev
,
231 struct kvm_assigned_irq
*airq
)
233 adev
->guest_irq
= airq
->guest_irq
;
234 adev
->ack_notifier
.gsi
= airq
->guest_irq
;
236 if (adev
->irq_requested_type
& KVM_ASSIGNED_DEV_HOST_INTX
)
239 if (irqchip_in_kernel(kvm
)) {
241 (adev
->irq_requested_type
& KVM_ASSIGNED_DEV_HOST_MSI
)) {
242 free_irq(adev
->host_irq
, (void *)adev
);
243 pci_disable_msi(adev
->dev
);
246 if (!capable(CAP_SYS_RAWIO
))
250 adev
->host_irq
= airq
->host_irq
;
252 adev
->host_irq
= adev
->dev
->irq
;
254 /* Even though this is PCI, we don't want to use shared
255 * interrupts. Sharing host devices with guest-assigned devices
256 * on the same interrupt line is not a happy situation: there
257 * are going to be long delays in accepting, acking, etc.
259 if (request_irq(adev
->host_irq
, kvm_assigned_dev_intr
,
260 0, "kvm_assigned_intx_device", (void *)adev
))
264 adev
->irq_requested_type
= KVM_ASSIGNED_DEV_GUEST_INTX
|
265 KVM_ASSIGNED_DEV_HOST_INTX
;
270 static int assigned_device_update_msi(struct kvm
*kvm
,
271 struct kvm_assigned_dev_kernel
*adev
,
272 struct kvm_assigned_irq
*airq
)
276 adev
->guest_irq
= airq
->guest_irq
;
277 if (airq
->flags
& KVM_DEV_IRQ_ASSIGN_ENABLE_MSI
) {
278 /* x86 don't care upper address of guest msi message addr */
279 adev
->irq_requested_type
|= KVM_ASSIGNED_DEV_GUEST_MSI
;
280 adev
->irq_requested_type
&= ~KVM_ASSIGNED_DEV_GUEST_INTX
;
281 adev
->ack_notifier
.gsi
= -1;
282 } else if (msi2intx
) {
283 adev
->irq_requested_type
|= KVM_ASSIGNED_DEV_GUEST_INTX
;
284 adev
->irq_requested_type
&= ~KVM_ASSIGNED_DEV_GUEST_MSI
;
285 adev
->ack_notifier
.gsi
= airq
->guest_irq
;
288 * Guest require to disable device MSI, we disable MSI and
289 * re-enable INTx by default again. Notice it's only for
292 assigned_device_update_intx(kvm
, adev
, airq
);
296 if (adev
->irq_requested_type
& KVM_ASSIGNED_DEV_HOST_MSI
)
299 if (irqchip_in_kernel(kvm
)) {
301 if (adev
->irq_requested_type
&
302 KVM_ASSIGNED_DEV_HOST_INTX
)
303 free_irq(adev
->host_irq
, (void *)adev
);
305 r
= pci_enable_msi(adev
->dev
);
310 adev
->host_irq
= adev
->dev
->irq
;
311 if (request_irq(adev
->host_irq
, kvm_assigned_dev_intr
, 0,
312 "kvm_assigned_msi_device", (void *)adev
))
317 adev
->irq_requested_type
= KVM_ASSIGNED_DEV_GUEST_MSI
;
319 adev
->irq_requested_type
|= KVM_ASSIGNED_DEV_HOST_MSI
;
324 static int kvm_vm_ioctl_assign_irq(struct kvm
*kvm
,
325 struct kvm_assigned_irq
329 struct kvm_assigned_dev_kernel
*match
;
330 u32 current_flags
= 0, changed_flags
;
332 mutex_lock(&kvm
->lock
);
334 match
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
335 assigned_irq
->assigned_dev_id
);
337 mutex_unlock(&kvm
->lock
);
341 if (!match
->irq_requested_type
) {
342 INIT_WORK(&match
->interrupt_work
,
343 kvm_assigned_dev_interrupt_work_handler
);
344 if (irqchip_in_kernel(kvm
)) {
345 /* Register ack nofitier */
346 match
->ack_notifier
.gsi
= -1;
347 match
->ack_notifier
.irq_acked
=
348 kvm_assigned_dev_ack_irq
;
349 kvm_register_irq_ack_notifier(kvm
,
350 &match
->ack_notifier
);
352 /* Request IRQ source ID */
353 r
= kvm_request_irq_source_id(kvm
);
357 match
->irq_source_id
= r
;
360 /* Determine host device irq type, we can know the
361 * result from dev->msi_enabled */
363 pci_enable_msi(match
->dev
);
368 if ((match
->irq_requested_type
& KVM_ASSIGNED_DEV_HOST_MSI
) &&
369 (match
->irq_requested_type
& KVM_ASSIGNED_DEV_GUEST_MSI
))
370 current_flags
|= KVM_DEV_IRQ_ASSIGN_ENABLE_MSI
;
372 changed_flags
= assigned_irq
->flags
^ current_flags
;
374 if ((changed_flags
& KVM_DEV_IRQ_ASSIGN_MSI_ACTION
) ||
375 (msi2intx
&& match
->dev
->msi_enabled
)) {
377 r
= assigned_device_update_msi(kvm
, match
, assigned_irq
);
379 printk(KERN_WARNING
"kvm: failed to enable "
386 } else if (assigned_irq
->host_irq
== 0 && match
->dev
->irq
== 0) {
387 /* Host device IRQ 0 means don't support INTx */
390 "kvm: wait device to enable MSI!\n");
394 "kvm: failed to enable MSI device!\n");
399 /* Non-sharing INTx mode */
400 r
= assigned_device_update_intx(kvm
, match
, assigned_irq
);
402 printk(KERN_WARNING
"kvm: failed to enable "
408 mutex_unlock(&kvm
->lock
);
411 mutex_unlock(&kvm
->lock
);
412 kvm_free_assigned_device(kvm
, match
);
416 static int kvm_vm_ioctl_assign_device(struct kvm
*kvm
,
417 struct kvm_assigned_pci_dev
*assigned_dev
)
420 struct kvm_assigned_dev_kernel
*match
;
423 down_read(&kvm
->slots_lock
);
424 mutex_lock(&kvm
->lock
);
426 match
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
427 assigned_dev
->assigned_dev_id
);
429 /* device already assigned */
434 match
= kzalloc(sizeof(struct kvm_assigned_dev_kernel
), GFP_KERNEL
);
436 printk(KERN_INFO
"%s: Couldn't allocate memory\n",
441 dev
= pci_get_bus_and_slot(assigned_dev
->busnr
,
442 assigned_dev
->devfn
);
444 printk(KERN_INFO
"%s: host device not found\n", __func__
);
448 if (pci_enable_device(dev
)) {
449 printk(KERN_INFO
"%s: Could not enable PCI device\n", __func__
);
453 r
= pci_request_regions(dev
, "kvm_assigned_device");
455 printk(KERN_INFO
"%s: Could not get access to device regions\n",
460 pci_reset_function(dev
);
462 match
->assigned_dev_id
= assigned_dev
->assigned_dev_id
;
463 match
->host_busnr
= assigned_dev
->busnr
;
464 match
->host_devfn
= assigned_dev
->devfn
;
465 match
->flags
= assigned_dev
->flags
;
467 match
->irq_source_id
= -1;
470 list_add(&match
->list
, &kvm
->arch
.assigned_dev_head
);
472 if (assigned_dev
->flags
& KVM_DEV_ASSIGN_ENABLE_IOMMU
) {
473 if (!kvm
->arch
.iommu_domain
) {
474 r
= kvm_iommu_map_guest(kvm
);
478 r
= kvm_assign_device(kvm
, match
);
484 mutex_unlock(&kvm
->lock
);
485 up_read(&kvm
->slots_lock
);
488 list_del(&match
->list
);
489 pci_release_regions(dev
);
491 pci_disable_device(dev
);
496 mutex_unlock(&kvm
->lock
);
497 up_read(&kvm
->slots_lock
);
502 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
503 static int kvm_vm_ioctl_deassign_device(struct kvm
*kvm
,
504 struct kvm_assigned_pci_dev
*assigned_dev
)
507 struct kvm_assigned_dev_kernel
*match
;
509 mutex_lock(&kvm
->lock
);
511 match
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
512 assigned_dev
->assigned_dev_id
);
514 printk(KERN_INFO
"%s: device hasn't been assigned before, "
515 "so cannot be deassigned\n", __func__
);
520 if (match
->flags
& KVM_DEV_ASSIGN_ENABLE_IOMMU
)
521 kvm_deassign_device(kvm
, match
);
523 kvm_free_assigned_device(kvm
, match
);
526 mutex_unlock(&kvm
->lock
);
531 static inline int valid_vcpu(int n
)
533 return likely(n
>= 0 && n
< KVM_MAX_VCPUS
);
536 inline int kvm_is_mmio_pfn(pfn_t pfn
)
538 if (pfn_valid(pfn
)) {
539 struct page
*page
= compound_head(pfn_to_page(pfn
));
540 return PageReserved(page
);
547 * Switches to specified vcpu, until a matching vcpu_put()
549 void vcpu_load(struct kvm_vcpu
*vcpu
)
553 mutex_lock(&vcpu
->mutex
);
555 preempt_notifier_register(&vcpu
->preempt_notifier
);
556 kvm_arch_vcpu_load(vcpu
, cpu
);
560 void vcpu_put(struct kvm_vcpu
*vcpu
)
563 kvm_arch_vcpu_put(vcpu
);
564 preempt_notifier_unregister(&vcpu
->preempt_notifier
);
566 mutex_unlock(&vcpu
->mutex
);
569 static void ack_flush(void *_completed
)
573 static bool make_all_cpus_request(struct kvm
*kvm
, unsigned int req
)
578 struct kvm_vcpu
*vcpu
;
580 if (alloc_cpumask_var(&cpus
, GFP_ATOMIC
))
584 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
585 vcpu
= kvm
->vcpus
[i
];
588 if (test_and_set_bit(req
, &vcpu
->requests
))
591 if (cpus
!= NULL
&& cpu
!= -1 && cpu
!= me
)
592 cpumask_set_cpu(cpu
, cpus
);
594 if (unlikely(cpus
== NULL
))
595 smp_call_function_many(cpu_online_mask
, ack_flush
, NULL
, 1);
596 else if (!cpumask_empty(cpus
))
597 smp_call_function_many(cpus
, ack_flush
, NULL
, 1);
601 free_cpumask_var(cpus
);
605 void kvm_flush_remote_tlbs(struct kvm
*kvm
)
607 if (make_all_cpus_request(kvm
, KVM_REQ_TLB_FLUSH
))
608 ++kvm
->stat
.remote_tlb_flush
;
611 void kvm_reload_remote_mmus(struct kvm
*kvm
)
613 make_all_cpus_request(kvm
, KVM_REQ_MMU_RELOAD
);
616 int kvm_vcpu_init(struct kvm_vcpu
*vcpu
, struct kvm
*kvm
, unsigned id
)
621 mutex_init(&vcpu
->mutex
);
625 init_waitqueue_head(&vcpu
->wq
);
627 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
632 vcpu
->run
= page_address(page
);
634 r
= kvm_arch_vcpu_init(vcpu
);
640 free_page((unsigned long)vcpu
->run
);
644 EXPORT_SYMBOL_GPL(kvm_vcpu_init
);
646 void kvm_vcpu_uninit(struct kvm_vcpu
*vcpu
)
648 kvm_arch_vcpu_uninit(vcpu
);
649 free_page((unsigned long)vcpu
->run
);
651 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit
);
653 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
654 static inline struct kvm
*mmu_notifier_to_kvm(struct mmu_notifier
*mn
)
656 return container_of(mn
, struct kvm
, mmu_notifier
);
659 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier
*mn
,
660 struct mm_struct
*mm
,
661 unsigned long address
)
663 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
667 * When ->invalidate_page runs, the linux pte has been zapped
668 * already but the page is still allocated until
669 * ->invalidate_page returns. So if we increase the sequence
670 * here the kvm page fault will notice if the spte can't be
671 * established because the page is going to be freed. If
672 * instead the kvm page fault establishes the spte before
673 * ->invalidate_page runs, kvm_unmap_hva will release it
676 * The sequence increase only need to be seen at spin_unlock
677 * time, and not at spin_lock time.
679 * Increasing the sequence after the spin_unlock would be
680 * unsafe because the kvm page fault could then establish the
681 * pte after kvm_unmap_hva returned, without noticing the page
682 * is going to be freed.
684 spin_lock(&kvm
->mmu_lock
);
685 kvm
->mmu_notifier_seq
++;
686 need_tlb_flush
= kvm_unmap_hva(kvm
, address
);
687 spin_unlock(&kvm
->mmu_lock
);
689 /* we've to flush the tlb before the pages can be freed */
691 kvm_flush_remote_tlbs(kvm
);
695 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier
*mn
,
696 struct mm_struct
*mm
,
700 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
701 int need_tlb_flush
= 0;
703 spin_lock(&kvm
->mmu_lock
);
705 * The count increase must become visible at unlock time as no
706 * spte can be established without taking the mmu_lock and
707 * count is also read inside the mmu_lock critical section.
709 kvm
->mmu_notifier_count
++;
710 for (; start
< end
; start
+= PAGE_SIZE
)
711 need_tlb_flush
|= kvm_unmap_hva(kvm
, start
);
712 spin_unlock(&kvm
->mmu_lock
);
714 /* we've to flush the tlb before the pages can be freed */
716 kvm_flush_remote_tlbs(kvm
);
719 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier
*mn
,
720 struct mm_struct
*mm
,
724 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
726 spin_lock(&kvm
->mmu_lock
);
728 * This sequence increase will notify the kvm page fault that
729 * the page that is going to be mapped in the spte could have
732 kvm
->mmu_notifier_seq
++;
734 * The above sequence increase must be visible before the
735 * below count decrease but both values are read by the kvm
736 * page fault under mmu_lock spinlock so we don't need to add
737 * a smb_wmb() here in between the two.
739 kvm
->mmu_notifier_count
--;
740 spin_unlock(&kvm
->mmu_lock
);
742 BUG_ON(kvm
->mmu_notifier_count
< 0);
745 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier
*mn
,
746 struct mm_struct
*mm
,
747 unsigned long address
)
749 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
752 spin_lock(&kvm
->mmu_lock
);
753 young
= kvm_age_hva(kvm
, address
);
754 spin_unlock(&kvm
->mmu_lock
);
757 kvm_flush_remote_tlbs(kvm
);
762 static void kvm_mmu_notifier_release(struct mmu_notifier
*mn
,
763 struct mm_struct
*mm
)
765 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
766 kvm_arch_flush_shadow(kvm
);
769 static const struct mmu_notifier_ops kvm_mmu_notifier_ops
= {
770 .invalidate_page
= kvm_mmu_notifier_invalidate_page
,
771 .invalidate_range_start
= kvm_mmu_notifier_invalidate_range_start
,
772 .invalidate_range_end
= kvm_mmu_notifier_invalidate_range_end
,
773 .clear_flush_young
= kvm_mmu_notifier_clear_flush_young
,
774 .release
= kvm_mmu_notifier_release
,
776 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
778 static struct kvm
*kvm_create_vm(void)
780 struct kvm
*kvm
= kvm_arch_create_vm();
781 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
787 #ifdef CONFIG_HAVE_KVM_IRQCHIP
788 INIT_LIST_HEAD(&kvm
->irq_routing
);
789 INIT_HLIST_HEAD(&kvm
->mask_notifier_list
);
792 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
793 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
796 return ERR_PTR(-ENOMEM
);
798 kvm
->coalesced_mmio_ring
=
799 (struct kvm_coalesced_mmio_ring
*)page_address(page
);
802 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
805 kvm
->mmu_notifier
.ops
= &kvm_mmu_notifier_ops
;
806 err
= mmu_notifier_register(&kvm
->mmu_notifier
, current
->mm
);
808 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
817 kvm
->mm
= current
->mm
;
818 atomic_inc(&kvm
->mm
->mm_count
);
819 spin_lock_init(&kvm
->mmu_lock
);
820 kvm_io_bus_init(&kvm
->pio_bus
);
821 mutex_init(&kvm
->lock
);
822 kvm_io_bus_init(&kvm
->mmio_bus
);
823 init_rwsem(&kvm
->slots_lock
);
824 atomic_set(&kvm
->users_count
, 1);
825 spin_lock(&kvm_lock
);
826 list_add(&kvm
->vm_list
, &vm_list
);
827 spin_unlock(&kvm_lock
);
828 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
829 kvm_coalesced_mmio_init(kvm
);
836 * Free any memory in @free but not in @dont.
838 static void kvm_free_physmem_slot(struct kvm_memory_slot
*free
,
839 struct kvm_memory_slot
*dont
)
841 if (!dont
|| free
->rmap
!= dont
->rmap
)
844 if (!dont
|| free
->dirty_bitmap
!= dont
->dirty_bitmap
)
845 vfree(free
->dirty_bitmap
);
847 if (!dont
|| free
->lpage_info
!= dont
->lpage_info
)
848 vfree(free
->lpage_info
);
851 free
->dirty_bitmap
= NULL
;
853 free
->lpage_info
= NULL
;
856 void kvm_free_physmem(struct kvm
*kvm
)
860 for (i
= 0; i
< kvm
->nmemslots
; ++i
)
861 kvm_free_physmem_slot(&kvm
->memslots
[i
], NULL
);
864 static void kvm_destroy_vm(struct kvm
*kvm
)
866 struct mm_struct
*mm
= kvm
->mm
;
868 kvm_arch_sync_events(kvm
);
869 spin_lock(&kvm_lock
);
870 list_del(&kvm
->vm_list
);
871 spin_unlock(&kvm_lock
);
872 kvm_free_irq_routing(kvm
);
873 kvm_io_bus_destroy(&kvm
->pio_bus
);
874 kvm_io_bus_destroy(&kvm
->mmio_bus
);
875 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
876 if (kvm
->coalesced_mmio_ring
!= NULL
)
877 free_page((unsigned long)kvm
->coalesced_mmio_ring
);
879 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
880 mmu_notifier_unregister(&kvm
->mmu_notifier
, kvm
->mm
);
882 kvm_arch_destroy_vm(kvm
);
886 void kvm_get_kvm(struct kvm
*kvm
)
888 atomic_inc(&kvm
->users_count
);
890 EXPORT_SYMBOL_GPL(kvm_get_kvm
);
892 void kvm_put_kvm(struct kvm
*kvm
)
894 if (atomic_dec_and_test(&kvm
->users_count
))
897 EXPORT_SYMBOL_GPL(kvm_put_kvm
);
900 static int kvm_vm_release(struct inode
*inode
, struct file
*filp
)
902 struct kvm
*kvm
= filp
->private_data
;
909 * Allocate some memory and give it an address in the guest physical address
912 * Discontiguous memory is allowed, mostly for framebuffers.
914 * Must be called holding mmap_sem for write.
916 int __kvm_set_memory_region(struct kvm
*kvm
,
917 struct kvm_userspace_memory_region
*mem
,
922 unsigned long npages
;
925 struct kvm_memory_slot
*memslot
;
926 struct kvm_memory_slot old
, new;
929 /* General sanity checks */
930 if (mem
->memory_size
& (PAGE_SIZE
- 1))
932 if (mem
->guest_phys_addr
& (PAGE_SIZE
- 1))
934 if (user_alloc
&& (mem
->userspace_addr
& (PAGE_SIZE
- 1)))
936 if (mem
->slot
>= KVM_MEMORY_SLOTS
+ KVM_PRIVATE_MEM_SLOTS
)
938 if (mem
->guest_phys_addr
+ mem
->memory_size
< mem
->guest_phys_addr
)
941 memslot
= &kvm
->memslots
[mem
->slot
];
942 base_gfn
= mem
->guest_phys_addr
>> PAGE_SHIFT
;
943 npages
= mem
->memory_size
>> PAGE_SHIFT
;
946 mem
->flags
&= ~KVM_MEM_LOG_DIRTY_PAGES
;
948 new = old
= *memslot
;
950 new.base_gfn
= base_gfn
;
952 new.flags
= mem
->flags
;
954 /* Disallow changing a memory slot's size. */
956 if (npages
&& old
.npages
&& npages
!= old
.npages
)
959 /* Check for overlaps */
961 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
962 struct kvm_memory_slot
*s
= &kvm
->memslots
[i
];
964 if (s
== memslot
|| !s
->npages
)
966 if (!((base_gfn
+ npages
<= s
->base_gfn
) ||
967 (base_gfn
>= s
->base_gfn
+ s
->npages
)))
971 /* Free page dirty bitmap if unneeded */
972 if (!(new.flags
& KVM_MEM_LOG_DIRTY_PAGES
))
973 new.dirty_bitmap
= NULL
;
977 /* Allocate if a slot is being created */
979 if (npages
&& !new.rmap
) {
980 new.rmap
= vmalloc(npages
* sizeof(struct page
*));
985 memset(new.rmap
, 0, npages
* sizeof(*new.rmap
));
987 new.user_alloc
= user_alloc
;
989 * hva_to_rmmap() serialzies with the mmu_lock and to be
990 * safe it has to ignore memslots with !user_alloc &&
994 new.userspace_addr
= mem
->userspace_addr
;
996 new.userspace_addr
= 0;
998 if (npages
&& !new.lpage_info
) {
999 largepages
= 1 + (base_gfn
+ npages
- 1) / KVM_PAGES_PER_HPAGE
;
1000 largepages
-= base_gfn
/ KVM_PAGES_PER_HPAGE
;
1002 new.lpage_info
= vmalloc(largepages
* sizeof(*new.lpage_info
));
1004 if (!new.lpage_info
)
1007 memset(new.lpage_info
, 0, largepages
* sizeof(*new.lpage_info
));
1009 if (base_gfn
% KVM_PAGES_PER_HPAGE
)
1010 new.lpage_info
[0].write_count
= 1;
1011 if ((base_gfn
+npages
) % KVM_PAGES_PER_HPAGE
)
1012 new.lpage_info
[largepages
-1].write_count
= 1;
1015 /* Allocate page dirty bitmap if needed */
1016 if ((new.flags
& KVM_MEM_LOG_DIRTY_PAGES
) && !new.dirty_bitmap
) {
1017 unsigned dirty_bytes
= ALIGN(npages
, BITS_PER_LONG
) / 8;
1019 new.dirty_bitmap
= vmalloc(dirty_bytes
);
1020 if (!new.dirty_bitmap
)
1022 memset(new.dirty_bitmap
, 0, dirty_bytes
);
1024 #endif /* not defined CONFIG_S390 */
1027 kvm_arch_flush_shadow(kvm
);
1029 spin_lock(&kvm
->mmu_lock
);
1030 if (mem
->slot
>= kvm
->nmemslots
)
1031 kvm
->nmemslots
= mem
->slot
+ 1;
1034 spin_unlock(&kvm
->mmu_lock
);
1036 r
= kvm_arch_set_memory_region(kvm
, mem
, old
, user_alloc
);
1038 spin_lock(&kvm
->mmu_lock
);
1040 spin_unlock(&kvm
->mmu_lock
);
1044 kvm_free_physmem_slot(&old
, npages
? &new : NULL
);
1045 /* Slot deletion case: we have to update the current slot */
1049 /* map the pages in iommu page table */
1050 r
= kvm_iommu_map_pages(kvm
, base_gfn
, npages
);
1057 kvm_free_physmem_slot(&new, &old
);
1062 EXPORT_SYMBOL_GPL(__kvm_set_memory_region
);
1064 int kvm_set_memory_region(struct kvm
*kvm
,
1065 struct kvm_userspace_memory_region
*mem
,
1070 down_write(&kvm
->slots_lock
);
1071 r
= __kvm_set_memory_region(kvm
, mem
, user_alloc
);
1072 up_write(&kvm
->slots_lock
);
1075 EXPORT_SYMBOL_GPL(kvm_set_memory_region
);
1077 int kvm_vm_ioctl_set_memory_region(struct kvm
*kvm
,
1079 kvm_userspace_memory_region
*mem
,
1082 if (mem
->slot
>= KVM_MEMORY_SLOTS
)
1084 return kvm_set_memory_region(kvm
, mem
, user_alloc
);
1087 int kvm_get_dirty_log(struct kvm
*kvm
,
1088 struct kvm_dirty_log
*log
, int *is_dirty
)
1090 struct kvm_memory_slot
*memslot
;
1093 unsigned long any
= 0;
1096 if (log
->slot
>= KVM_MEMORY_SLOTS
)
1099 memslot
= &kvm
->memslots
[log
->slot
];
1101 if (!memslot
->dirty_bitmap
)
1104 n
= ALIGN(memslot
->npages
, BITS_PER_LONG
) / 8;
1106 for (i
= 0; !any
&& i
< n
/sizeof(long); ++i
)
1107 any
= memslot
->dirty_bitmap
[i
];
1110 if (copy_to_user(log
->dirty_bitmap
, memslot
->dirty_bitmap
, n
))
1121 int is_error_page(struct page
*page
)
1123 return page
== bad_page
;
1125 EXPORT_SYMBOL_GPL(is_error_page
);
1127 int is_error_pfn(pfn_t pfn
)
1129 return pfn
== bad_pfn
;
1131 EXPORT_SYMBOL_GPL(is_error_pfn
);
1133 static inline unsigned long bad_hva(void)
1138 int kvm_is_error_hva(unsigned long addr
)
1140 return addr
== bad_hva();
1142 EXPORT_SYMBOL_GPL(kvm_is_error_hva
);
1144 struct kvm_memory_slot
*gfn_to_memslot_unaliased(struct kvm
*kvm
, gfn_t gfn
)
1148 for (i
= 0; i
< kvm
->nmemslots
; ++i
) {
1149 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
1151 if (gfn
>= memslot
->base_gfn
1152 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
1157 EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased
);
1159 struct kvm_memory_slot
*gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
)
1161 gfn
= unalias_gfn(kvm
, gfn
);
1162 return gfn_to_memslot_unaliased(kvm
, gfn
);
1165 int kvm_is_visible_gfn(struct kvm
*kvm
, gfn_t gfn
)
1169 gfn
= unalias_gfn(kvm
, gfn
);
1170 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
1171 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
1173 if (gfn
>= memslot
->base_gfn
1174 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
1179 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn
);
1181 unsigned long gfn_to_hva(struct kvm
*kvm
, gfn_t gfn
)
1183 struct kvm_memory_slot
*slot
;
1185 gfn
= unalias_gfn(kvm
, gfn
);
1186 slot
= gfn_to_memslot_unaliased(kvm
, gfn
);
1189 return (slot
->userspace_addr
+ (gfn
- slot
->base_gfn
) * PAGE_SIZE
);
1191 EXPORT_SYMBOL_GPL(gfn_to_hva
);
1193 pfn_t
gfn_to_pfn(struct kvm
*kvm
, gfn_t gfn
)
1195 struct page
*page
[1];
1202 addr
= gfn_to_hva(kvm
, gfn
);
1203 if (kvm_is_error_hva(addr
)) {
1205 return page_to_pfn(bad_page
);
1208 npages
= get_user_pages_fast(addr
, 1, 1, page
);
1210 if (unlikely(npages
!= 1)) {
1211 struct vm_area_struct
*vma
;
1213 down_read(¤t
->mm
->mmap_sem
);
1214 vma
= find_vma(current
->mm
, addr
);
1216 if (vma
== NULL
|| addr
< vma
->vm_start
||
1217 !(vma
->vm_flags
& VM_PFNMAP
)) {
1218 up_read(¤t
->mm
->mmap_sem
);
1220 return page_to_pfn(bad_page
);
1223 pfn
= ((addr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
1224 up_read(¤t
->mm
->mmap_sem
);
1225 BUG_ON(!kvm_is_mmio_pfn(pfn
));
1227 pfn
= page_to_pfn(page
[0]);
1232 EXPORT_SYMBOL_GPL(gfn_to_pfn
);
1234 struct page
*gfn_to_page(struct kvm
*kvm
, gfn_t gfn
)
1238 pfn
= gfn_to_pfn(kvm
, gfn
);
1239 if (!kvm_is_mmio_pfn(pfn
))
1240 return pfn_to_page(pfn
);
1242 WARN_ON(kvm_is_mmio_pfn(pfn
));
1248 EXPORT_SYMBOL_GPL(gfn_to_page
);
1250 void kvm_release_page_clean(struct page
*page
)
1252 kvm_release_pfn_clean(page_to_pfn(page
));
1254 EXPORT_SYMBOL_GPL(kvm_release_page_clean
);
1256 void kvm_release_pfn_clean(pfn_t pfn
)
1258 if (!kvm_is_mmio_pfn(pfn
))
1259 put_page(pfn_to_page(pfn
));
1261 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean
);
1263 void kvm_release_page_dirty(struct page
*page
)
1265 kvm_release_pfn_dirty(page_to_pfn(page
));
1267 EXPORT_SYMBOL_GPL(kvm_release_page_dirty
);
1269 void kvm_release_pfn_dirty(pfn_t pfn
)
1271 kvm_set_pfn_dirty(pfn
);
1272 kvm_release_pfn_clean(pfn
);
1274 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty
);
1276 void kvm_set_page_dirty(struct page
*page
)
1278 kvm_set_pfn_dirty(page_to_pfn(page
));
1280 EXPORT_SYMBOL_GPL(kvm_set_page_dirty
);
1282 void kvm_set_pfn_dirty(pfn_t pfn
)
1284 if (!kvm_is_mmio_pfn(pfn
)) {
1285 struct page
*page
= pfn_to_page(pfn
);
1286 if (!PageReserved(page
))
1290 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty
);
1292 void kvm_set_pfn_accessed(pfn_t pfn
)
1294 if (!kvm_is_mmio_pfn(pfn
))
1295 mark_page_accessed(pfn_to_page(pfn
));
1297 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed
);
1299 void kvm_get_pfn(pfn_t pfn
)
1301 if (!kvm_is_mmio_pfn(pfn
))
1302 get_page(pfn_to_page(pfn
));
1304 EXPORT_SYMBOL_GPL(kvm_get_pfn
);
1306 static int next_segment(unsigned long len
, int offset
)
1308 if (len
> PAGE_SIZE
- offset
)
1309 return PAGE_SIZE
- offset
;
1314 int kvm_read_guest_page(struct kvm
*kvm
, gfn_t gfn
, void *data
, int offset
,
1320 addr
= gfn_to_hva(kvm
, gfn
);
1321 if (kvm_is_error_hva(addr
))
1323 r
= copy_from_user(data
, (void __user
*)addr
+ offset
, len
);
1328 EXPORT_SYMBOL_GPL(kvm_read_guest_page
);
1330 int kvm_read_guest(struct kvm
*kvm
, gpa_t gpa
, void *data
, unsigned long len
)
1332 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1334 int offset
= offset_in_page(gpa
);
1337 while ((seg
= next_segment(len
, offset
)) != 0) {
1338 ret
= kvm_read_guest_page(kvm
, gfn
, data
, offset
, seg
);
1348 EXPORT_SYMBOL_GPL(kvm_read_guest
);
1350 int kvm_read_guest_atomic(struct kvm
*kvm
, gpa_t gpa
, void *data
,
1355 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1356 int offset
= offset_in_page(gpa
);
1358 addr
= gfn_to_hva(kvm
, gfn
);
1359 if (kvm_is_error_hva(addr
))
1361 pagefault_disable();
1362 r
= __copy_from_user_inatomic(data
, (void __user
*)addr
+ offset
, len
);
1368 EXPORT_SYMBOL(kvm_read_guest_atomic
);
1370 int kvm_write_guest_page(struct kvm
*kvm
, gfn_t gfn
, const void *data
,
1371 int offset
, int len
)
1376 addr
= gfn_to_hva(kvm
, gfn
);
1377 if (kvm_is_error_hva(addr
))
1379 r
= copy_to_user((void __user
*)addr
+ offset
, data
, len
);
1382 mark_page_dirty(kvm
, gfn
);
1385 EXPORT_SYMBOL_GPL(kvm_write_guest_page
);
1387 int kvm_write_guest(struct kvm
*kvm
, gpa_t gpa
, const void *data
,
1390 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1392 int offset
= offset_in_page(gpa
);
1395 while ((seg
= next_segment(len
, offset
)) != 0) {
1396 ret
= kvm_write_guest_page(kvm
, gfn
, data
, offset
, seg
);
1407 int kvm_clear_guest_page(struct kvm
*kvm
, gfn_t gfn
, int offset
, int len
)
1409 return kvm_write_guest_page(kvm
, gfn
, empty_zero_page
, offset
, len
);
1411 EXPORT_SYMBOL_GPL(kvm_clear_guest_page
);
1413 int kvm_clear_guest(struct kvm
*kvm
, gpa_t gpa
, unsigned long len
)
1415 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1417 int offset
= offset_in_page(gpa
);
1420 while ((seg
= next_segment(len
, offset
)) != 0) {
1421 ret
= kvm_clear_guest_page(kvm
, gfn
, offset
, seg
);
1430 EXPORT_SYMBOL_GPL(kvm_clear_guest
);
1432 void mark_page_dirty(struct kvm
*kvm
, gfn_t gfn
)
1434 struct kvm_memory_slot
*memslot
;
1436 gfn
= unalias_gfn(kvm
, gfn
);
1437 memslot
= gfn_to_memslot_unaliased(kvm
, gfn
);
1438 if (memslot
&& memslot
->dirty_bitmap
) {
1439 unsigned long rel_gfn
= gfn
- memslot
->base_gfn
;
1442 if (!test_bit(rel_gfn
, memslot
->dirty_bitmap
))
1443 set_bit(rel_gfn
, memslot
->dirty_bitmap
);
1448 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1450 void kvm_vcpu_block(struct kvm_vcpu
*vcpu
)
1455 prepare_to_wait(&vcpu
->wq
, &wait
, TASK_INTERRUPTIBLE
);
1457 if (kvm_cpu_has_interrupt(vcpu
) ||
1458 kvm_cpu_has_pending_timer(vcpu
) ||
1459 kvm_arch_vcpu_runnable(vcpu
)) {
1460 set_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
1463 if (signal_pending(current
))
1471 finish_wait(&vcpu
->wq
, &wait
);
1474 void kvm_resched(struct kvm_vcpu
*vcpu
)
1476 if (!need_resched())
1480 EXPORT_SYMBOL_GPL(kvm_resched
);
1482 static int kvm_vcpu_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1484 struct kvm_vcpu
*vcpu
= vma
->vm_file
->private_data
;
1487 if (vmf
->pgoff
== 0)
1488 page
= virt_to_page(vcpu
->run
);
1490 else if (vmf
->pgoff
== KVM_PIO_PAGE_OFFSET
)
1491 page
= virt_to_page(vcpu
->arch
.pio_data
);
1493 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1494 else if (vmf
->pgoff
== KVM_COALESCED_MMIO_PAGE_OFFSET
)
1495 page
= virt_to_page(vcpu
->kvm
->coalesced_mmio_ring
);
1498 return VM_FAULT_SIGBUS
;
1504 static struct vm_operations_struct kvm_vcpu_vm_ops
= {
1505 .fault
= kvm_vcpu_fault
,
1508 static int kvm_vcpu_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1510 vma
->vm_ops
= &kvm_vcpu_vm_ops
;
1514 static int kvm_vcpu_release(struct inode
*inode
, struct file
*filp
)
1516 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1518 kvm_put_kvm(vcpu
->kvm
);
1522 static struct file_operations kvm_vcpu_fops
= {
1523 .release
= kvm_vcpu_release
,
1524 .unlocked_ioctl
= kvm_vcpu_ioctl
,
1525 .compat_ioctl
= kvm_vcpu_ioctl
,
1526 .mmap
= kvm_vcpu_mmap
,
1530 * Allocates an inode for the vcpu.
1532 static int create_vcpu_fd(struct kvm_vcpu
*vcpu
)
1534 int fd
= anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops
, vcpu
, 0);
1536 kvm_put_kvm(vcpu
->kvm
);
1541 * Creates some virtual cpus. Good luck creating more than one.
1543 static int kvm_vm_ioctl_create_vcpu(struct kvm
*kvm
, int n
)
1546 struct kvm_vcpu
*vcpu
;
1551 vcpu
= kvm_arch_vcpu_create(kvm
, n
);
1553 return PTR_ERR(vcpu
);
1555 preempt_notifier_init(&vcpu
->preempt_notifier
, &kvm_preempt_ops
);
1557 r
= kvm_arch_vcpu_setup(vcpu
);
1561 mutex_lock(&kvm
->lock
);
1562 if (kvm
->vcpus
[n
]) {
1566 kvm
->vcpus
[n
] = vcpu
;
1567 mutex_unlock(&kvm
->lock
);
1569 /* Now it's all set up, let userspace reach it */
1571 r
= create_vcpu_fd(vcpu
);
1577 mutex_lock(&kvm
->lock
);
1578 kvm
->vcpus
[n
] = NULL
;
1580 mutex_unlock(&kvm
->lock
);
1581 kvm_arch_vcpu_destroy(vcpu
);
1585 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu
*vcpu
, sigset_t
*sigset
)
1588 sigdelsetmask(sigset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
1589 vcpu
->sigset_active
= 1;
1590 vcpu
->sigset
= *sigset
;
1592 vcpu
->sigset_active
= 0;
1596 static long kvm_vcpu_ioctl(struct file
*filp
,
1597 unsigned int ioctl
, unsigned long arg
)
1599 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1600 void __user
*argp
= (void __user
*)arg
;
1602 struct kvm_fpu
*fpu
= NULL
;
1603 struct kvm_sregs
*kvm_sregs
= NULL
;
1605 if (vcpu
->kvm
->mm
!= current
->mm
)
1612 r
= kvm_arch_vcpu_ioctl_run(vcpu
, vcpu
->run
);
1614 case KVM_GET_REGS
: {
1615 struct kvm_regs
*kvm_regs
;
1618 kvm_regs
= kzalloc(sizeof(struct kvm_regs
), GFP_KERNEL
);
1621 r
= kvm_arch_vcpu_ioctl_get_regs(vcpu
, kvm_regs
);
1625 if (copy_to_user(argp
, kvm_regs
, sizeof(struct kvm_regs
)))
1632 case KVM_SET_REGS
: {
1633 struct kvm_regs
*kvm_regs
;
1636 kvm_regs
= kzalloc(sizeof(struct kvm_regs
), GFP_KERNEL
);
1640 if (copy_from_user(kvm_regs
, argp
, sizeof(struct kvm_regs
)))
1642 r
= kvm_arch_vcpu_ioctl_set_regs(vcpu
, kvm_regs
);
1650 case KVM_GET_SREGS
: {
1651 kvm_sregs
= kzalloc(sizeof(struct kvm_sregs
), GFP_KERNEL
);
1655 r
= kvm_arch_vcpu_ioctl_get_sregs(vcpu
, kvm_sregs
);
1659 if (copy_to_user(argp
, kvm_sregs
, sizeof(struct kvm_sregs
)))
1664 case KVM_SET_SREGS
: {
1665 kvm_sregs
= kmalloc(sizeof(struct kvm_sregs
), GFP_KERNEL
);
1670 if (copy_from_user(kvm_sregs
, argp
, sizeof(struct kvm_sregs
)))
1672 r
= kvm_arch_vcpu_ioctl_set_sregs(vcpu
, kvm_sregs
);
1678 case KVM_GET_MP_STATE
: {
1679 struct kvm_mp_state mp_state
;
1681 r
= kvm_arch_vcpu_ioctl_get_mpstate(vcpu
, &mp_state
);
1685 if (copy_to_user(argp
, &mp_state
, sizeof mp_state
))
1690 case KVM_SET_MP_STATE
: {
1691 struct kvm_mp_state mp_state
;
1694 if (copy_from_user(&mp_state
, argp
, sizeof mp_state
))
1696 r
= kvm_arch_vcpu_ioctl_set_mpstate(vcpu
, &mp_state
);
1702 case KVM_TRANSLATE
: {
1703 struct kvm_translation tr
;
1706 if (copy_from_user(&tr
, argp
, sizeof tr
))
1708 r
= kvm_arch_vcpu_ioctl_translate(vcpu
, &tr
);
1712 if (copy_to_user(argp
, &tr
, sizeof tr
))
1717 case KVM_SET_GUEST_DEBUG
: {
1718 struct kvm_guest_debug dbg
;
1721 if (copy_from_user(&dbg
, argp
, sizeof dbg
))
1723 r
= kvm_arch_vcpu_ioctl_set_guest_debug(vcpu
, &dbg
);
1729 case KVM_SET_SIGNAL_MASK
: {
1730 struct kvm_signal_mask __user
*sigmask_arg
= argp
;
1731 struct kvm_signal_mask kvm_sigmask
;
1732 sigset_t sigset
, *p
;
1737 if (copy_from_user(&kvm_sigmask
, argp
,
1738 sizeof kvm_sigmask
))
1741 if (kvm_sigmask
.len
!= sizeof sigset
)
1744 if (copy_from_user(&sigset
, sigmask_arg
->sigset
,
1749 r
= kvm_vcpu_ioctl_set_sigmask(vcpu
, &sigset
);
1753 fpu
= kzalloc(sizeof(struct kvm_fpu
), GFP_KERNEL
);
1757 r
= kvm_arch_vcpu_ioctl_get_fpu(vcpu
, fpu
);
1761 if (copy_to_user(argp
, fpu
, sizeof(struct kvm_fpu
)))
1767 fpu
= kmalloc(sizeof(struct kvm_fpu
), GFP_KERNEL
);
1772 if (copy_from_user(fpu
, argp
, sizeof(struct kvm_fpu
)))
1774 r
= kvm_arch_vcpu_ioctl_set_fpu(vcpu
, fpu
);
1781 r
= kvm_arch_vcpu_ioctl(filp
, ioctl
, arg
);
1789 static long kvm_vm_ioctl(struct file
*filp
,
1790 unsigned int ioctl
, unsigned long arg
)
1792 struct kvm
*kvm
= filp
->private_data
;
1793 void __user
*argp
= (void __user
*)arg
;
1796 if (kvm
->mm
!= current
->mm
)
1799 case KVM_CREATE_VCPU
:
1800 r
= kvm_vm_ioctl_create_vcpu(kvm
, arg
);
1804 case KVM_SET_USER_MEMORY_REGION
: {
1805 struct kvm_userspace_memory_region kvm_userspace_mem
;
1808 if (copy_from_user(&kvm_userspace_mem
, argp
,
1809 sizeof kvm_userspace_mem
))
1812 r
= kvm_vm_ioctl_set_memory_region(kvm
, &kvm_userspace_mem
, 1);
1817 case KVM_GET_DIRTY_LOG
: {
1818 struct kvm_dirty_log log
;
1821 if (copy_from_user(&log
, argp
, sizeof log
))
1823 r
= kvm_vm_ioctl_get_dirty_log(kvm
, &log
);
1828 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1829 case KVM_REGISTER_COALESCED_MMIO
: {
1830 struct kvm_coalesced_mmio_zone zone
;
1832 if (copy_from_user(&zone
, argp
, sizeof zone
))
1835 r
= kvm_vm_ioctl_register_coalesced_mmio(kvm
, &zone
);
1841 case KVM_UNREGISTER_COALESCED_MMIO
: {
1842 struct kvm_coalesced_mmio_zone zone
;
1844 if (copy_from_user(&zone
, argp
, sizeof zone
))
1847 r
= kvm_vm_ioctl_unregister_coalesced_mmio(kvm
, &zone
);
1854 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
1855 case KVM_ASSIGN_PCI_DEVICE
: {
1856 struct kvm_assigned_pci_dev assigned_dev
;
1859 if (copy_from_user(&assigned_dev
, argp
, sizeof assigned_dev
))
1861 r
= kvm_vm_ioctl_assign_device(kvm
, &assigned_dev
);
1866 case KVM_ASSIGN_IRQ
: {
1867 struct kvm_assigned_irq assigned_irq
;
1870 if (copy_from_user(&assigned_irq
, argp
, sizeof assigned_irq
))
1872 r
= kvm_vm_ioctl_assign_irq(kvm
, &assigned_irq
);
1878 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
1879 case KVM_DEASSIGN_PCI_DEVICE
: {
1880 struct kvm_assigned_pci_dev assigned_dev
;
1883 if (copy_from_user(&assigned_dev
, argp
, sizeof assigned_dev
))
1885 r
= kvm_vm_ioctl_deassign_device(kvm
, &assigned_dev
);
1891 #ifdef KVM_CAP_IRQ_ROUTING
1892 case KVM_SET_GSI_ROUTING
: {
1893 struct kvm_irq_routing routing
;
1894 struct kvm_irq_routing __user
*urouting
;
1895 struct kvm_irq_routing_entry
*entries
;
1898 if (copy_from_user(&routing
, argp
, sizeof(routing
)))
1901 if (routing
.nr
>= KVM_MAX_IRQ_ROUTES
)
1906 entries
= vmalloc(routing
.nr
* sizeof(*entries
));
1911 if (copy_from_user(entries
, urouting
->entries
,
1912 routing
.nr
* sizeof(*entries
)))
1913 goto out_free_irq_routing
;
1914 r
= kvm_set_irq_routing(kvm
, entries
, routing
.nr
,
1916 out_free_irq_routing
:
1922 r
= kvm_arch_vm_ioctl(filp
, ioctl
, arg
);
1928 static int kvm_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1930 struct page
*page
[1];
1933 gfn_t gfn
= vmf
->pgoff
;
1934 struct kvm
*kvm
= vma
->vm_file
->private_data
;
1936 addr
= gfn_to_hva(kvm
, gfn
);
1937 if (kvm_is_error_hva(addr
))
1938 return VM_FAULT_SIGBUS
;
1940 npages
= get_user_pages(current
, current
->mm
, addr
, 1, 1, 0, page
,
1942 if (unlikely(npages
!= 1))
1943 return VM_FAULT_SIGBUS
;
1945 vmf
->page
= page
[0];
1949 static struct vm_operations_struct kvm_vm_vm_ops
= {
1950 .fault
= kvm_vm_fault
,
1953 static int kvm_vm_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1955 vma
->vm_ops
= &kvm_vm_vm_ops
;
1959 static struct file_operations kvm_vm_fops
= {
1960 .release
= kvm_vm_release
,
1961 .unlocked_ioctl
= kvm_vm_ioctl
,
1962 .compat_ioctl
= kvm_vm_ioctl
,
1963 .mmap
= kvm_vm_mmap
,
1966 static int kvm_dev_ioctl_create_vm(void)
1971 kvm
= kvm_create_vm();
1973 return PTR_ERR(kvm
);
1974 fd
= anon_inode_getfd("kvm-vm", &kvm_vm_fops
, kvm
, 0);
1981 static long kvm_dev_ioctl_check_extension_generic(long arg
)
1984 case KVM_CAP_USER_MEMORY
:
1985 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS
:
1986 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS
:
1988 #ifdef CONFIG_HAVE_KVM_IRQCHIP
1989 case KVM_CAP_IRQ_ROUTING
:
1990 return KVM_MAX_IRQ_ROUTES
;
1995 return kvm_dev_ioctl_check_extension(arg
);
1998 static long kvm_dev_ioctl(struct file
*filp
,
1999 unsigned int ioctl
, unsigned long arg
)
2004 case KVM_GET_API_VERSION
:
2008 r
= KVM_API_VERSION
;
2014 r
= kvm_dev_ioctl_create_vm();
2016 case KVM_CHECK_EXTENSION
:
2017 r
= kvm_dev_ioctl_check_extension_generic(arg
);
2019 case KVM_GET_VCPU_MMAP_SIZE
:
2023 r
= PAGE_SIZE
; /* struct kvm_run */
2025 r
+= PAGE_SIZE
; /* pio data page */
2027 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2028 r
+= PAGE_SIZE
; /* coalesced mmio ring page */
2031 case KVM_TRACE_ENABLE
:
2032 case KVM_TRACE_PAUSE
:
2033 case KVM_TRACE_DISABLE
:
2034 r
= kvm_trace_ioctl(ioctl
, arg
);
2037 return kvm_arch_dev_ioctl(filp
, ioctl
, arg
);
2043 static struct file_operations kvm_chardev_ops
= {
2044 .unlocked_ioctl
= kvm_dev_ioctl
,
2045 .compat_ioctl
= kvm_dev_ioctl
,
2048 static struct miscdevice kvm_dev
= {
2054 static void hardware_enable(void *junk
)
2056 int cpu
= raw_smp_processor_id();
2058 if (cpumask_test_cpu(cpu
, cpus_hardware_enabled
))
2060 cpumask_set_cpu(cpu
, cpus_hardware_enabled
);
2061 kvm_arch_hardware_enable(NULL
);
2064 static void hardware_disable(void *junk
)
2066 int cpu
= raw_smp_processor_id();
2068 if (!cpumask_test_cpu(cpu
, cpus_hardware_enabled
))
2070 cpumask_clear_cpu(cpu
, cpus_hardware_enabled
);
2071 kvm_arch_hardware_disable(NULL
);
2074 static int kvm_cpu_hotplug(struct notifier_block
*notifier
, unsigned long val
,
2079 val
&= ~CPU_TASKS_FROZEN
;
2082 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
2084 hardware_disable(NULL
);
2086 case CPU_UP_CANCELED
:
2087 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
2089 smp_call_function_single(cpu
, hardware_disable
, NULL
, 1);
2092 printk(KERN_INFO
"kvm: enabling virtualization on CPU%d\n",
2094 smp_call_function_single(cpu
, hardware_enable
, NULL
, 1);
2101 asmlinkage
void kvm_handle_fault_on_reboot(void)
2104 /* spin while reset goes on */
2107 /* Fault while not rebooting. We want the trace. */
2110 EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot
);
2112 static int kvm_reboot(struct notifier_block
*notifier
, unsigned long val
,
2115 if (val
== SYS_RESTART
) {
2117 * Some (well, at least mine) BIOSes hang on reboot if
2120 printk(KERN_INFO
"kvm: exiting hardware virtualization\n");
2121 kvm_rebooting
= true;
2122 on_each_cpu(hardware_disable
, NULL
, 1);
2127 static struct notifier_block kvm_reboot_notifier
= {
2128 .notifier_call
= kvm_reboot
,
2132 void kvm_io_bus_init(struct kvm_io_bus
*bus
)
2134 memset(bus
, 0, sizeof(*bus
));
2137 void kvm_io_bus_destroy(struct kvm_io_bus
*bus
)
2141 for (i
= 0; i
< bus
->dev_count
; i
++) {
2142 struct kvm_io_device
*pos
= bus
->devs
[i
];
2144 kvm_iodevice_destructor(pos
);
2148 struct kvm_io_device
*kvm_io_bus_find_dev(struct kvm_io_bus
*bus
,
2149 gpa_t addr
, int len
, int is_write
)
2153 for (i
= 0; i
< bus
->dev_count
; i
++) {
2154 struct kvm_io_device
*pos
= bus
->devs
[i
];
2156 if (pos
->in_range(pos
, addr
, len
, is_write
))
2163 void kvm_io_bus_register_dev(struct kvm_io_bus
*bus
, struct kvm_io_device
*dev
)
2165 BUG_ON(bus
->dev_count
> (NR_IOBUS_DEVS
-1));
2167 bus
->devs
[bus
->dev_count
++] = dev
;
2170 static struct notifier_block kvm_cpu_notifier
= {
2171 .notifier_call
= kvm_cpu_hotplug
,
2172 .priority
= 20, /* must be > scheduler priority */
2175 static int vm_stat_get(void *_offset
, u64
*val
)
2177 unsigned offset
= (long)_offset
;
2181 spin_lock(&kvm_lock
);
2182 list_for_each_entry(kvm
, &vm_list
, vm_list
)
2183 *val
+= *(u32
*)((void *)kvm
+ offset
);
2184 spin_unlock(&kvm_lock
);
2188 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops
, vm_stat_get
, NULL
, "%llu\n");
2190 static int vcpu_stat_get(void *_offset
, u64
*val
)
2192 unsigned offset
= (long)_offset
;
2194 struct kvm_vcpu
*vcpu
;
2198 spin_lock(&kvm_lock
);
2199 list_for_each_entry(kvm
, &vm_list
, vm_list
)
2200 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
2201 vcpu
= kvm
->vcpus
[i
];
2203 *val
+= *(u32
*)((void *)vcpu
+ offset
);
2205 spin_unlock(&kvm_lock
);
2209 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops
, vcpu_stat_get
, NULL
, "%llu\n");
2211 static struct file_operations
*stat_fops
[] = {
2212 [KVM_STAT_VCPU
] = &vcpu_stat_fops
,
2213 [KVM_STAT_VM
] = &vm_stat_fops
,
2216 static void kvm_init_debug(void)
2218 struct kvm_stats_debugfs_item
*p
;
2220 kvm_debugfs_dir
= debugfs_create_dir("kvm", NULL
);
2221 for (p
= debugfs_entries
; p
->name
; ++p
)
2222 p
->dentry
= debugfs_create_file(p
->name
, 0444, kvm_debugfs_dir
,
2223 (void *)(long)p
->offset
,
2224 stat_fops
[p
->kind
]);
2227 static void kvm_exit_debug(void)
2229 struct kvm_stats_debugfs_item
*p
;
2231 for (p
= debugfs_entries
; p
->name
; ++p
)
2232 debugfs_remove(p
->dentry
);
2233 debugfs_remove(kvm_debugfs_dir
);
2236 static int kvm_suspend(struct sys_device
*dev
, pm_message_t state
)
2238 hardware_disable(NULL
);
2242 static int kvm_resume(struct sys_device
*dev
)
2244 hardware_enable(NULL
);
2248 static struct sysdev_class kvm_sysdev_class
= {
2250 .suspend
= kvm_suspend
,
2251 .resume
= kvm_resume
,
2254 static struct sys_device kvm_sysdev
= {
2256 .cls
= &kvm_sysdev_class
,
2259 struct page
*bad_page
;
2263 struct kvm_vcpu
*preempt_notifier_to_vcpu(struct preempt_notifier
*pn
)
2265 return container_of(pn
, struct kvm_vcpu
, preempt_notifier
);
2268 static void kvm_sched_in(struct preempt_notifier
*pn
, int cpu
)
2270 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
2272 kvm_arch_vcpu_load(vcpu
, cpu
);
2275 static void kvm_sched_out(struct preempt_notifier
*pn
,
2276 struct task_struct
*next
)
2278 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
2280 kvm_arch_vcpu_put(vcpu
);
2283 int kvm_init(void *opaque
, unsigned int vcpu_size
,
2284 struct module
*module
)
2291 r
= kvm_arch_init(opaque
);
2295 bad_page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
2297 if (bad_page
== NULL
) {
2302 bad_pfn
= page_to_pfn(bad_page
);
2304 if (!alloc_cpumask_var(&cpus_hardware_enabled
, GFP_KERNEL
)) {
2309 r
= kvm_arch_hardware_setup();
2313 for_each_online_cpu(cpu
) {
2314 smp_call_function_single(cpu
,
2315 kvm_arch_check_processor_compat
,
2321 on_each_cpu(hardware_enable
, NULL
, 1);
2322 r
= register_cpu_notifier(&kvm_cpu_notifier
);
2325 register_reboot_notifier(&kvm_reboot_notifier
);
2327 r
= sysdev_class_register(&kvm_sysdev_class
);
2331 r
= sysdev_register(&kvm_sysdev
);
2335 /* A kmem cache lets us meet the alignment requirements of fx_save. */
2336 kvm_vcpu_cache
= kmem_cache_create("kvm_vcpu", vcpu_size
,
2337 __alignof__(struct kvm_vcpu
),
2339 if (!kvm_vcpu_cache
) {
2344 kvm_chardev_ops
.owner
= module
;
2345 kvm_vm_fops
.owner
= module
;
2346 kvm_vcpu_fops
.owner
= module
;
2348 r
= misc_register(&kvm_dev
);
2350 printk(KERN_ERR
"kvm: misc device register failed\n");
2354 kvm_preempt_ops
.sched_in
= kvm_sched_in
;
2355 kvm_preempt_ops
.sched_out
= kvm_sched_out
;
2363 kmem_cache_destroy(kvm_vcpu_cache
);
2365 sysdev_unregister(&kvm_sysdev
);
2367 sysdev_class_unregister(&kvm_sysdev_class
);
2369 unregister_reboot_notifier(&kvm_reboot_notifier
);
2370 unregister_cpu_notifier(&kvm_cpu_notifier
);
2372 on_each_cpu(hardware_disable
, NULL
, 1);
2374 kvm_arch_hardware_unsetup();
2376 free_cpumask_var(cpus_hardware_enabled
);
2378 __free_page(bad_page
);
2385 EXPORT_SYMBOL_GPL(kvm_init
);
2389 kvm_trace_cleanup();
2390 misc_deregister(&kvm_dev
);
2391 kmem_cache_destroy(kvm_vcpu_cache
);
2392 sysdev_unregister(&kvm_sysdev
);
2393 sysdev_class_unregister(&kvm_sysdev_class
);
2394 unregister_reboot_notifier(&kvm_reboot_notifier
);
2395 unregister_cpu_notifier(&kvm_cpu_notifier
);
2396 on_each_cpu(hardware_disable
, NULL
, 1);
2397 kvm_arch_hardware_unsetup();
2400 free_cpumask_var(cpus_hardware_enabled
);
2401 __free_page(bad_page
);
2403 EXPORT_SYMBOL_GPL(kvm_exit
);